Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • tfs/controller
1 result
Show changes
Commits on Source (6)
Showing
with 616 additions and 393 deletions
[2022-09-30 10:06:58,753] {/var/teraflow/monitoring/service/MonitoringServiceServicerImpl.py:151} INFO - IncludeKpi
INFO:monitoringservice-server:IncludeKpi
[2022-09-30 10:06:58,754] {/var/teraflow/monitoring/service/MonitoringServiceServicerImpl.py:194} INFO - getting Kpi by KpiID
INFO:monitoringservice-server:getting Kpi by KpiID
[2022-09-30 10:06:58,764] {/var/teraflow/monitoring/service/MonitoringServiceServicerImpl.py:215} ERROR - GetKpiDescriptor exception
Traceback (most recent call last):
File "/var/teraflow/monitoring/service/MonitoringServiceServicerImpl.py", line 196, in GetKpiDescriptor
kpi_db = self.sql_db.get_KPI(int(request.kpi_id.uuid))
ValueError: invalid literal for int() with base 10: 'kpi_id {\n uuid: "17"\n}\n'
ERROR:monitoringservice-server:GetKpiDescriptor exception
Traceback (most recent call last):
File "/var/teraflow/monitoring/service/MonitoringServiceServicerImpl.py", line 196, in GetKpiDescriptor
kpi_db = self.sql_db.get_KPI(int(request.kpi_id.uuid))
ValueError: invalid literal for int() with base 10: 'kpi_id {\n uuid: "17"\n}\n'
[2022-09-30 10:06:58,780] {/var/teraflow/monitoring/service/MonitoringServiceServicerImpl.py:156} WARNING - Ignoring sample with KPIId(kpi_id {
uuid: "kpi_id {\n uuid: \"17\"\n}\n"
}
): not found in database
WARNING:monitoringservice-server:Ignoring sample with KPIId(kpi_id {
uuid: "kpi_id {\n uuid: \"17\"\n}\n"
}
): not found in database
[2022-09-30 10:06:58,807] {/var/teraflow/monitoring/service/MonitoringServiceServicerImpl.py:151} INFO - IncludeKpi
#!/bin/bash
# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
########################################################################################################################
# Define your deployment settings here
########################################################################################################################
# If not already set, set the name of the Kubernetes namespace to deploy to.
export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
########################################################################################################################
# Automated steps start here
########################################################################################################################
kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/l3-attackmitigatorservice -c server
#!/bin/bash
# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
########################################################################################################################
# Define your deployment settings here
########################################################################################################################
# If not already set, set the name of the Kubernetes namespace to deploy to.
export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"}
########################################################################################################################
# Automated steps start here
########################################################################################################################
kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/l3-centralizedattackdetectorservice -c server
......@@ -15,7 +15,7 @@
import json, logging
from typing import Dict, List, Tuple
from common.proto.context_pb2 import (
ConnectionEvent, ContextEvent, DeviceEvent, EventTypeEnum, LinkEvent, ServiceEvent, TopologyEvent)
ConnectionEvent, ContextEvent, DeviceEvent, EventTypeEnum, LinkEvent, ServiceEvent, SliceEvent, TopologyEvent)
from common.tools.grpc.Tools import grpc_message_to_json_string
from context.client.EventsCollector import EventsCollector
......@@ -32,6 +32,7 @@ CLASSNAME_CONTEXT_EVENT = class_to_classname(ContextEvent)
CLASSNAME_TOPOLOGY_EVENT = class_to_classname(TopologyEvent)
CLASSNAME_DEVICE_EVENT = class_to_classname(DeviceEvent)
CLASSNAME_LINK_EVENT = class_to_classname(LinkEvent)
CLASSNAME_SLICE_EVENT = class_to_classname(SliceEvent)
CLASSNAME_SERVICE_EVENT = class_to_classname(ServiceEvent)
CLASSNAME_CONNECTION_EVENT = class_to_classname(ConnectionEvent)
......@@ -40,6 +41,7 @@ EVENT_CLASS_NAME__TO__ENTITY_ID_SELECTOR = {
CLASSNAME_TOPOLOGY_EVENT : lambda event: event.topology_id,
CLASSNAME_DEVICE_EVENT : lambda event: event.device_id,
CLASSNAME_LINK_EVENT : lambda event: event.link_id,
CLASSNAME_SLICE_EVENT : lambda event: event.slice_id,
CLASSNAME_SERVICE_EVENT : lambda event: event.service_id,
CLASSNAME_CONNECTION_EVENT: lambda event: event.connection_id,
}
......
......@@ -98,10 +98,8 @@ def process_site_network_access(context_client : ContextClient, site_id : str, s
if circuit_id is not None: field_updates['circuit_id' ] = (circuit_id, True)
update_config_rule_custom(config_rules, endpoint_settings_key, field_updates)
field_updates = {}
if len(diversity_constraints) > 0:
field_updates.update(diversity_constraints)
update_constraint_custom(constraints, 'diversity', field_updates)
update_constraint_custom(constraints, 'diversity', diversity_constraints)
update_constraint_endpoint_location(constraints, endpoint_id, region=site_id)
if access_priority is not None: update_constraint_endpoint_priority(constraints, endpoint_id, access_priority)
......
......@@ -34,7 +34,6 @@ class EndPointModel(Model):
device_fk = ForeignKeyField(DeviceModel)
endpoint_uuid = StringField(required=True, allow_empty=False)
endpoint_type = StringField()
resource_key = StringField(required=True, allow_empty=False)
def dump_id(self) -> Dict:
device_id = DeviceModel(self.database, self.device_fk).dump_id()
......@@ -74,13 +73,7 @@ def set_endpoint_monitors(database : Database, db_endpoint : EndPointModel, grpc
for kpi_sample_type in grpc_endpoint_kpi_sample_types:
orm_kpi_sample_type = grpc_to_enum__kpi_sample_type(kpi_sample_type)
str_endpoint_kpi_sample_type_key = key_to_str([db_endpoint_pk, str(orm_kpi_sample_type.value)])
#db_endpoint_kpi_sample_type = EndPointMonitorModel(database, str_endpoint_kpi_sample_type_key)
#db_endpoint_kpi_sample_type.endpoint_fk = db_endpoint
#db_endpoint_kpi_sample_type.resource_key = '' # during initialization, allow empty value
#db_endpoint_kpi_sample_type.kpi_sample_type = orm_kpi_sample_type
#db_endpoint_kpi_sample_type.save()
update_or_create_object(database, EndPointMonitorModel, str_endpoint_kpi_sample_type_key, {
'endpoint_fk' : db_endpoint,
#'resource_key' : '', # during initialization, allow empty value
'kpi_sample_type': orm_kpi_sample_type,
})
......@@ -52,7 +52,7 @@ unit test l3_centralizedattackdetector:
- if docker container ls | grep $IMAGE_NAME; then docker rm -f $IMAGE_NAME; else echo "$IMAGE_NAME image is not in the system"; fi
script:
- docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG"
- docker run --name $IMAGE_NAME -d -p 10001:10001 -v "$PWD/src/$IMAGE_NAME/tests:/opt/results" --network=teraflowbridge $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
- docker run --name $IMAGE_NAME -d -p 10001:10001 --env CAD_CLASSIFICATION_THRESHOLD=0.5 -v "$PWD/src/$IMAGE_NAME/tests:/opt/results" --network=teraflowbridge $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG
- sleep 5
- docker ps -a
- docker logs $IMAGE_NAME
......
......@@ -14,22 +14,22 @@
from __future__ import print_function
from datetime import datetime
import os
import grpc
import numpy as np
import onnxruntime as rt
import logging
from common.proto.l3_centralizedattackdetector_pb2 import Empty
from common.proto.l3_centralizedattackdetector_pb2_grpc import L3CentralizedattackdetectorServicer
from common.proto.l3_attackmitigator_pb2 import L3AttackmitigatorOutput
from common.proto.l3_attackmitigator_pb2_grpc import L3AttackmitigatorStub
# KPIs and Monitoring
from common.proto.monitoring_pb2 import KpiDescriptor
from common.proto.kpi_sample_types_pb2 import KpiSampleType
# from monitoring.client.MonitoringClient import MonitoringClient
from monitoring.client.MonitoringClient import MonitoringClient
from common.proto.monitoring_pb2 import Kpi
......@@ -37,15 +37,16 @@ from common.tools.timestamp.Converters import timestamp_utcnow_to_float
from common.proto.context_pb2 import Timestamp
LOGGER = logging.getLogger(__name__)
here = os.path.dirname(os.path.abspath(__file__))
MODEL_FILE = os.path.join(here, "ml_model/crypto_5g_rf_spider_features.onnx")
current_dir = os.path.dirname(os.path.abspath(__file__))
MODEL_FILE = os.path.join(current_dir, "ml_model/crypto_5g_rf_spider_features.onnx")
classification_threshold = os.getenv("CAD_CLASSIFICATION_THRESHOLD", 0.5)
class l3_centralizedattackdetectorServiceServicerImpl(L3CentralizedattackdetectorServicer):
def __init__(self):
LOGGER.debug("Creating Servicer...")
LOGGER.info("Creating Centralized Attack Detector Service")
self.inference_values = []
self.model = rt.InferenceSession(MODEL_FILE)
self.input_name = self.model.get_inputs()[0].name
......@@ -56,11 +57,9 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto
self.class_probability_kpi_id = None
def create_predicted_class_kpi(self, client: MonitoringClient, service_id):
# create kpi
kpi_description: KpiDescriptor = KpiDescriptor()
kpi_description.kpi_description = "L3 security status of service {}".format(service_id)
# kpi_description.service_id.service_uuid.uuid = service_id
kpi_description.service_id.service_uuid.uuid = str(service_id)
kpi_description.service_id.service_uuid.uuid = service_id.service_uuid.uuid
kpi_description.kpi_sample_type = KpiSampleType.KPISAMPLETYPE_UNKNOWN
new_kpi = client.SetKpi(kpi_description)
......@@ -69,10 +68,9 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto
return new_kpi
def create_class_prob_kpi(self, client: MonitoringClient, service_id):
# create kpi
kpi_description: KpiDescriptor = KpiDescriptor()
kpi_description.kpi_description = "L3 security status of service {}".format(service_id)
kpi_description.service_id.service_uuid.uuid = str(service_id)
kpi_description.service_id.service_uuid.uuid = service_id.service_uuid.uuid
kpi_description.kpi_sample_type = KpiSampleType.KPISAMPLETYPE_UNKNOWN
new_kpi = client.SetKpi(kpi_description)
......@@ -81,7 +79,6 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto
return new_kpi
def make_inference(self, request):
# ML MODEL
x_data = np.array(
[
[
......@@ -100,7 +97,8 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto
)
predictions = self.model.run([self.prob_name], {self.input_name: x_data.astype(np.float32)})[0]
# Output format
# Gather the predicted class, the probability of that class and other relevant information required to block the attack
output_message = {
"confidence": None,
"timestamp": datetime.now().strftime("%d/%m/%Y %H:%M:%S"),
......@@ -117,6 +115,7 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto
"time_start": request.time_start,
"time_end": request.time_end,
}
if predictions[0][1] >= classification_threshold:
output_message["confidence"] = predictions[0][1]
output_message["tag_name"] = "Crypto"
......@@ -129,17 +128,15 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto
return L3AttackmitigatorOutput(**output_message)
def SendInput(self, request, context):
# PERFORM INFERENCE WITH SENT INPUTS
logging.debug("")
print("Inferencing ...", flush=True)
# STORE VALUES
# Store the data sent in the request
self.inference_values.append(request)
# MAKE INFERENCE
# Perform inference with the data sent in the request
logging.info("Performing inference...")
output = self.make_inference(request)
logging.info("Inference performed correctly")
# Monitoring
# Include monitored KPIs values
service_id = request.service_id
if self.predicted_class_kpi_id is None:
......@@ -148,20 +145,21 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto
if self.class_probability_kpi_id is None:
self.class_probability_kpi_id = self.create_class_prob_kpi(self.monitoring_client, service_id)
# Packet -> DAD -> CAD -> ML -> (2 Instantaneous Value: higher class probability, predicted class) -> Monitoring
# Packet Aggregation Features -> DAD -> CAD -> ML -> (2 Instantaneous Value: higher class probability, predicted class) -> Monitoring
# In addition, two counters:
# Counter 1: Total number of crypto attack connections
# Counter 2: Rate of crypto attack connections with respect to the total number of connections
# Predicted class KPI
kpi_class = Kpi()
kpi_class.kpi_id.kpi_id.uuid = str(self.predicted_class_kpi_id)
kpi_class.kpi_id.kpi_id.CopyFrom(self.predicted_class_kpi_id.kpi_id)
kpi_class.kpi_value.int32Val = 1 if output.tag_name == "Crypto" else 0
# Class probability KPI
kpi_prob = Kpi()
kpi_prob.kpi_id.kpi_id.uuid = str(self.class_probability_kpi_id)
kpi_prob.kpi_id.kpi_id.CopyFrom(self.class_probability_kpi_id.kpi_id)
kpi_prob.kpi_value.floatVal = output.confidence
# timestamp = timestamp_utcnow_to_float()
timestamp = Timestamp()
timestamp.timestamp = timestamp_utcnow_to_float()
......@@ -172,29 +170,37 @@ class l3_centralizedattackdetectorServiceServicerImpl(L3Centralizedattackdetecto
self.monitoring_client.IncludeKpi(kpi_prob)
if output.tag_name == "Crypto":
# SEND INFO TO MITIGATION SERVER
logging.info("Crypto attack detected")
# Notify the Attack Mitigator component about the attack
logging.info(
"Notifying the Attack Mitigator component about the attack in order to block the connection..."
)
try:
with grpc.insecure_channel("192.168.165.78:10002") as channel:
stub = L3AttackmitigatorStub(channel)
print("Sending to mitigator...", flush=True)
logging.info("Sending the connection information to the Attack Mitigator component...")
response = stub.SendOutput(output)
# print("Response received", response, "Hola", flush=True)
# print("Sent output to mitigator and received: ", response.message) #FIX No message received
logging.info(
"Attack Mitigator notified and received response: ", response.message
) # FIX No message received
# RETURN "OK" TO THE CALLER
return Empty(message="OK, information received and mitigator notified abou the attack")
except Exception as e:
print("This is an exception", repr(e), flush=True)
print("Couldnt find l3_attackmitigator", flush=True)
return Empty(message="Mitigator Not found")
logging.error("Error notifying the Attack Mitigator component about the attack: ", e)
logging.error("Couldn't find l3_attackmitigator")
return Empty(message="Attack Mitigator not found")
else:
print("No attack detected", flush=True)
return Empty(message="OK, information received (no attack detected)")
logging.info("No attack detected")
return Empty(message="Ok, information received (no attack detected)")
def GetOutput(self, request, context):
logging.debug("")
print("Returing inference output...")
logging.info("Returning inference output...")
k = np.multiply(self.inference_values, [2])
k = np.sum(k)
return self.make_inference(k)
# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sqlite3 as sl
class ManagementDB():
def __init__(self, database):
self.client = sl.connect(database, check_same_thread=False)
self.create_monitoring_table()
self.create_subscription_table()
self.create_alarm_table()
def create_monitoring_table(self):
self.client.execute("""
CREATE TABLE IF NOT EXISTS kpi(
kpi_id INTEGER PRIMARY KEY AUTOINCREMENT,
kpi_description TEXT,
kpi_sample_type INTEGER,
device_id INTEGER,
endpoint_id INTEGER,
service_id INTEGER
);
""")
def create_subscription_table(self):
self.client.execute("""
CREATE TABLE IF NOT EXISTS subscription(
subs_id INTEGER PRIMARY KEY AUTOINCREMENT,
kpi_id INTEGER,
subscriber TEXT,
sampling_duration_s REAL,
sampling_interval_s REAL,
start_timestamp REAL,
end_timestamp REAL
);
""")
def create_alarm_table(self):
self.client.execute("""
CREATE TABLE IF NOT EXISTS alarm(
alarm_id INTEGER PRIMARY KEY AUTOINCREMENT,
alarm_description TEXT,
alarm_name TEXT,
kpi_id INTEGER,
kpi_min_value REAL,
kpi_max_value REAL,
in_range INTEGER,
include_min_value INTEGER,
include_max_value INTEGER
);
""")
def insert_KPI(self,kpi_description,kpi_sample_type,device_id,endpoint_id,service_id):
c = self.client.cursor()
c.execute("SELECT kpi_id FROM kpi WHERE device_id is ? AND kpi_sample_type is ? AND endpoint_id is ? AND service_id is ?",(device_id,kpi_sample_type,endpoint_id,service_id))
data=c.fetchone()
if data is None:
c.execute("INSERT INTO kpi (kpi_description,kpi_sample_type,device_id,endpoint_id,service_id) VALUES (?,?,?,?,?)", (kpi_description,kpi_sample_type,device_id,endpoint_id,service_id))
self.client.commit()
return c.lastrowid
else:
return data[0]
def insert_subscription(self,kpi_id,subscriber,sampling_duration_s,sampling_interval_s,start_timestamp, end_timestamp):
c = self.client.cursor()
c.execute("SELECT subs_id FROM subscription WHERE kpi_id is ? AND subscriber is ? AND sampling_duration_s is ? AND sampling_interval_s is ? AND start_timestamp is ? AND end_timestamp is ?",(kpi_id,subscriber,sampling_duration_s,sampling_interval_s,start_timestamp, end_timestamp))
data=c.fetchone()
if data is None:
c.execute("INSERT INTO subscription (kpi_id,subscriber,sampling_duration_s,sampling_interval_s,start_timestamp, end_timestamp) VALUES (?,?,?,?,?,?)", (kpi_id,subscriber,sampling_duration_s,sampling_interval_s,start_timestamp, end_timestamp))
self.client.commit()
return c.lastrowid
else:
print("already exists")
return data[0]
def insert_alarm(self,alarm_description,alarm_name,kpi_id,kpi_min_value,kpi_max_value,in_range,include_min_value,include_max_value):
c = self.client.cursor()
c.execute("SELECT alarm_id FROM alarm WHERE alarm_description is ? AND alarm_name is ? AND kpi_id is ? AND kpi_min_value is ? AND kpi_max_value is ? AND in_range is ? AND include_min_value is ? AND include_max_value is ?",(alarm_description,alarm_name,kpi_id,kpi_min_value,kpi_max_value,in_range,include_min_value,include_max_value))
data=c.fetchone()
if data is None:
c.execute("INSERT INTO alarm (alarm_description, alarm_name, kpi_id, kpi_min_value, kpi_max_value, in_range, include_min_value, include_max_value) VALUES (?,?,?,?,?,?,?,?)", (alarm_description,alarm_name,kpi_id,kpi_min_value,kpi_max_value,in_range,include_min_value,include_max_value))
self.client.commit()
return c.lastrowid
else:
print("already exists")
return data[0]
def delete_KPI(self,kpi_id):
c = self.client.cursor()
c.execute("SELECT * FROM kpi WHERE kpi_id is ?",(kpi_id,))
data=c.fetchone()
if data is None:
return False
else:
c.execute("DELETE FROM kpi WHERE kpi_id is ?",(kpi_id,))
self.client.commit()
return True
def delete_subscription(self,subs_id):
c = self.client.cursor()
c.execute("SELECT * FROM subscription WHERE subs_id is ?",(subs_id,))
data=c.fetchone()
if data is None:
return False
else:
c.execute("DELETE FROM subscription WHERE subs_id is ?",(subs_id,))
self.client.commit()
return True
def delete_alarm(self,alarm_id):
c = self.client.cursor()
c.execute("SELECT * FROM alarm WHERE alarm_id is ?",(alarm_id,))
data=c.fetchone()
if data is None:
return False
else:
c.execute("DELETE FROM alarm WHERE alarm_id is ?",(alarm_id,))
self.client.commit()
return True
def get_KPI(self,kpi_id):
data = self.client.execute("SELECT * FROM kpi WHERE kpi_id is ?",(kpi_id,))
return data.fetchone()
def get_subscription(self,subs_id):
data = self.client.execute("SELECT * FROM subscription WHERE subs_id is ?",(subs_id,))
return data.fetchone()
def get_alarm(self,alarm_id):
data = self.client.execute("SELECT * FROM alarm WHERE alarm_id is ?",(alarm_id,))
return data.fetchone()
def get_KPIS(self):
data = self.client.execute("SELECT * FROM kpi")
return data.fetchall()
def get_subscriptions(self):
data = self.client.execute("SELECT * FROM subscription")
return data.fetchall()
def get_alarms(self):
data = self.client.execute("SELECT * FROM alarm")
return data.fetchall()
......@@ -20,7 +20,7 @@ def setup_config_rules(
service_uuid : str, connection_uuid : str, device_uuid : str, endpoint_uuid : str,
service_settings : TreeNode, endpoint_settings : TreeNode
) -> List[Dict]:
json_settings : Dict = {} if service_settings is None else service_settings.value
json_endpoint_settings : Dict = {} if endpoint_settings is None else endpoint_settings.value
......
......@@ -30,6 +30,7 @@ def setup_config_rules(
network_subinterface_desc = '{:s}-NetSubIf'.format(service_uuid)
mtu = json_settings.get('mtu', 1450 ) # 1512
#address_families = json_settings.get('address_families', [] ) # ['IPV4']
bgp_as = json_settings.get('bgp_as', 0 ) # 65000
bgp_route_target = json_settings.get('bgp_route_target', '0:0') # 65000:333
......@@ -134,4 +135,115 @@ def setup_config_rules(
}),
]
return json_config_rules
\ No newline at end of file
return json_config_rules
def teardown_config_rules(
service_uuid : str, connection_uuid : str, device_uuid : str, endpoint_uuid : str,
service_settings : TreeNode, endpoint_settings : TreeNode
) -> List[Dict]:
json_settings : Dict = {} if service_settings is None else service_settings.value
json_endpoint_settings : Dict = {} if endpoint_settings is None else endpoint_settings.value
#mtu = json_settings.get('mtu', 1450 ) # 1512
#address_families = json_settings.get('address_families', [] ) # ['IPV4']
#bgp_as = json_settings.get('bgp_as', 0 ) # 65000
bgp_route_target = json_settings.get('bgp_route_target', '0:0') # 65000:333
#router_id = json_endpoint_settings.get('router_id', '0.0.0.0') # '10.95.0.10'
#route_distinguisher = json_endpoint_settings.get('route_distinguisher', '0:0' ) # '60001:801'
sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0 ) # 1
vlan_id = json_endpoint_settings.get('vlan_id', 1 ) # 400
#address_ip = json_endpoint_settings.get('address_ip', '0.0.0.0') # '2.2.2.1'
#address_prefix = json_endpoint_settings.get('address_prefix', 24 ) # 30
if_subif_name = '{:s}.{:d}'.format(endpoint_uuid, vlan_id)
service_short_uuid = service_uuid.split('-')[-1]
network_instance_name = '{:s}-NetInst'.format(service_short_uuid)
#network_interface_desc = '{:s}-NetIf'.format(service_uuid)
#network_subinterface_desc = '{:s}-NetSubIf'.format(service_uuid)
json_config_rules = [
json_config_rule_delete(
'/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_subif_name), {
'name': network_instance_name, 'id': if_subif_name,
}),
json_config_rule_delete(
'/interface[{:s}]/subinterface[{:d}]'.format(endpoint_uuid, sub_interface_index), {
'name': endpoint_uuid, 'index': sub_interface_index,
}),
json_config_rule_delete(
'/interface[{:s}]'.format(endpoint_uuid), {
'name': endpoint_uuid,
}),
json_config_rule_delete(
'/network_instance[{:s}]/table_connections[DIRECTLY_CONNECTED][BGP][IPV4]'.format(
network_instance_name), {
'name': network_instance_name, 'src_protocol': 'DIRECTLY_CONNECTED', 'dst_protocol': 'BGP',
'address_family': 'IPV4',
}),
json_config_rule_delete(
'/network_instance[{:s}]/table_connections[STATIC][BGP][IPV4]'.format(network_instance_name), {
'name': network_instance_name, 'src_protocol': 'STATIC', 'dst_protocol': 'BGP',
'address_family': 'IPV4',
}),
json_config_rule_delete(
'/network_instance[{:s}]/protocols[BGP]'.format(network_instance_name), {
'name': network_instance_name, 'identifier': 'BGP', 'protocol_name': 'BGP',
}),
json_config_rule_delete(
# pylint: disable=duplicate-string-formatting-argument
'/network_instance[{:s}]/inter_instance_policies[{:s}_import]'.format(
network_instance_name, network_instance_name), {
'name': network_instance_name,
}),
json_config_rule_delete(
'/routing_policy/policy_definition[{:s}_import]/statement[{:s}]'.format(
network_instance_name, '3'), {
'policy_name': '{:s}_import'.format(network_instance_name), 'statement_name': '3',
}),
json_config_rule_delete(
'/routing_policy/policy_definition[{:s}_import]'.format(network_instance_name), {
'policy_name': '{:s}_import'.format(network_instance_name),
}),
json_config_rule_delete(
'/routing_policy/bgp_defined_set[{:s}_rt_import][route-target:{:s}]'.format(
network_instance_name, bgp_route_target), {
'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name),
'ext_community_member' : 'route-target:{:s}'.format(bgp_route_target),
}),
json_config_rule_delete(
'/routing_policy/bgp_defined_set[{:s}_rt_import]'.format(network_instance_name), {
'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name),
}),
json_config_rule_delete(
# pylint: disable=duplicate-string-formatting-argument
'/network_instance[{:s}]/inter_instance_policies[{:s}_export]'.format(
network_instance_name, network_instance_name), {
'name': network_instance_name,
}),
json_config_rule_delete(
'/routing_policy/policy_definition[{:s}_export]/statement[{:s}]'.format(
network_instance_name, '3'), {
'policy_name': '{:s}_export'.format(network_instance_name), 'statement_name': '3',
}),
json_config_rule_delete(
'/routing_policy/policy_definition[{:s}_export]'.format(network_instance_name), {
'policy_name': '{:s}_export'.format(network_instance_name),
}),
json_config_rule_delete(
'/routing_policy/bgp_defined_set[{:s}_rt_export][route-target:{:s}]'.format(
network_instance_name, bgp_route_target), {
'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name),
'ext_community_member' : 'route-target:{:s}'.format(bgp_route_target),
}),
json_config_rule_delete(
'/routing_policy/bgp_defined_set[{:s}_rt_export]'.format(network_instance_name), {
'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name),
}),
json_config_rule_delete(
'/network_instance[{:s}]'.format(network_instance_name), {
'name': network_instance_name
}),
]
return json_config_rules
# json_endpoint_settings : Dict = endpoint_settings.value
# #router_id = json_endpoint_settings.get('router_id', '0.0.0.0') # '10.95.0.10'
# route_distinguisher = json_endpoint_settings.get('route_distinguisher', '0:0' ) # '60001:801'
# sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0 ) # 1
# vlan_id = json_endpoint_settings.get('vlan_id', 1 ) # 400
# address_ip = json_endpoint_settings.get('address_ip', '0.0.0.0') # '2.2.2.1'
# address_prefix = json_endpoint_settings.get('address_prefix', 24 ) # 30
# if_subif_name = '{:s}.{:d}'.format(endpoint_uuid, vlan_id)
# db_device : DeviceModel = get_object(self.__database, DeviceModel, device_uuid, raise_if_not_found=True)
# device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid)))
# json_device = db_device.dump(include_config_rules=False, include_drivers=True, include_endpoints=True)
# json_device_config : Dict = json_device.setdefault('device_config', {})
# json_device_config_rules : List = json_device_config.setdefault('config_rules', [])
# json_device_config_rules.extend([
# json_config_rule_set(
# '/network_instance[{:s}]'.format(network_instance_name), {
# 'name': network_instance_name, 'description': network_interface_desc, 'type': 'L3VRF',
# 'route_distinguisher': route_distinguisher,
# #'router_id': router_id, 'address_families': address_families,
# }),
# json_config_rule_set(
# '/interface[{:s}]'.format(endpoint_uuid), {
# 'name': endpoint_uuid, 'description': network_interface_desc, 'mtu': mtu,
# }),
# json_config_rule_set(
# '/interface[{:s}]/subinterface[{:d}]'.format(endpoint_uuid, sub_interface_index), {
# 'name': endpoint_uuid, 'index': sub_interface_index,
# 'description': network_subinterface_desc, 'vlan_id': vlan_id,
# 'address_ip': address_ip, 'address_prefix': address_prefix,
# }),
# json_config_rule_set(
# '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_subif_name), {
# 'name': network_instance_name, 'id': if_subif_name, 'interface': endpoint_uuid,
# 'subinterface': sub_interface_index,
# }),
# json_config_rule_set(
# '/network_instance[{:s}]/protocols[BGP]'.format(network_instance_name), {
# 'name': network_instance_name, 'identifier': 'BGP', 'protocol_name': 'BGP', 'as': bgp_as,
# }),
# json_config_rule_set(
# '/network_instance[{:s}]/table_connections[STATIC][BGP][IPV4]'.format(network_instance_name), {
# 'name': network_instance_name, 'src_protocol': 'STATIC', 'dst_protocol': 'BGP',
# 'address_family': 'IPV4', #'default_import_policy': 'REJECT_ROUTE',
# }),
# json_config_rule_set(
# '/network_instance[{:s}]/table_connections[DIRECTLY_CONNECTED][BGP][IPV4]'.format(
# network_instance_name), {
# 'name': network_instance_name, 'src_protocol': 'DIRECTLY_CONNECTED', 'dst_protocol': 'BGP',
# 'address_family': 'IPV4', #'default_import_policy': 'REJECT_ROUTE',
# }),
# json_config_rule_set(
# '/routing_policy/bgp_defined_set[{:s}_rt_import]'.format(network_instance_name), {
# 'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name),
# }),
# json_config_rule_set(
# '/routing_policy/bgp_defined_set[{:s}_rt_import][route-target:{:s}]'.format(
# network_instance_name, bgp_route_target), {
# 'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name),
# 'ext_community_member' : 'route-target:{:s}'.format(bgp_route_target),
# }),
# json_config_rule_set(
# '/routing_policy/policy_definition[{:s}_import]'.format(network_instance_name), {
# 'policy_name': '{:s}_import'.format(network_instance_name),
# }),
# json_config_rule_set(
# '/routing_policy/policy_definition[{:s}_import]/statement[{:s}]'.format(
# network_instance_name, '3'), {
# 'policy_name': '{:s}_import'.format(network_instance_name), 'statement_name': '3',
# 'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name),
# 'match_set_options': 'ANY', 'policy_result': 'ACCEPT_ROUTE',
# }),
# json_config_rule_set(
# # pylint: disable=duplicate-string-formatting-argument
# '/network_instance[{:s}]/inter_instance_policies[{:s}_import]'.format(
# network_instance_name, network_instance_name), {
# 'name': network_instance_name, 'import_policy': '{:s}_import'.format(network_instance_name),
# }),
# json_config_rule_set(
# '/routing_policy/bgp_defined_set[{:s}_rt_export]'.format(network_instance_name), {
# 'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name),
# }),
# json_config_rule_set(
# '/routing_policy/bgp_defined_set[{:s}_rt_export][route-target:{:s}]'.format(
# network_instance_name, bgp_route_target), {
# 'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name),
# 'ext_community_member' : 'route-target:{:s}'.format(bgp_route_target),
# }),
# json_config_rule_set(
# '/routing_policy/policy_definition[{:s}_export]'.format(network_instance_name), {
# 'policy_name': '{:s}_export'.format(network_instance_name),
# }),
# json_config_rule_set(
# '/routing_policy/policy_definition[{:s}_export]/statement[{:s}]'.format(
# network_instance_name, '3'), {
# 'policy_name': '{:s}_export'.format(network_instance_name), 'statement_name': '3',
# 'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name),
# 'match_set_options': 'ANY', 'policy_result': 'ACCEPT_ROUTE',
# }),
# json_config_rule_set(
# # pylint: disable=duplicate-string-formatting-argument
# '/network_instance[{:s}]/inter_instance_policies[{:s}_export]'.format(
# network_instance_name, network_instance_name), {
# 'name': network_instance_name, 'export_policy': '{:s}_export'.format(network_instance_name),
# }),
# ])
# self.__device_client.ConfigureDevice(Device(**json_device))
# results.append(True)
......@@ -59,10 +59,10 @@ def test_scenario_empty(context_client : ContextClient): # pylint: disable=rede
def test_prepare_scenario(context_client : ContextClient): # pylint: disable=redefined-outer-name
# ----- Start the EventsCollector ----------------------------------------------------------------------------------
events_collector = EventsCollector(context_client)
events_collector.start()
#events_collector = EventsCollector(context_client)
#events_collector.start()
expected_events = []
#expected_events = []
# ----- Create Contexts and Topologies -----------------------------------------------------------------------------
for context in CONTEXTS:
......@@ -70,7 +70,7 @@ def test_prepare_scenario(context_client : ContextClient): # pylint: disable=re
LOGGER.info('Adding Context {:s}'.format(context_uuid))
response = context_client.SetContext(Context(**context))
assert response.context_uuid.uuid == context_uuid
expected_events.append(('ContextEvent', EVENT_CREATE, json_context_id(context_uuid)))
#expected_events.append(('ContextEvent', EVENT_CREATE, json_context_id(context_uuid)))
for topology in TOPOLOGIES:
context_uuid = topology['topology_id']['context_id']['context_uuid']['uuid']
......@@ -80,13 +80,13 @@ def test_prepare_scenario(context_client : ContextClient): # pylint: disable=re
assert response.context_id.context_uuid.uuid == context_uuid
assert response.topology_uuid.uuid == topology_uuid
context_id = json_context_id(context_uuid)
expected_events.append(('TopologyEvent', EVENT_CREATE, json_topology_id(topology_uuid, context_id=context_id)))
#expected_events.append(('TopologyEvent', EVENT_CREATE, json_topology_id(topology_uuid, context_id=context_id)))
# ----- Validate Collected Events ----------------------------------------------------------------------------------
check_events(events_collector, expected_events)
#check_events(events_collector, expected_events)
# ----- Stop the EventsCollector -----------------------------------------------------------------------------------
events_collector.stop()
#events_collector.stop()
def test_scenario_ready(context_client : ContextClient): # pylint: disable=redefined-outer-name
......@@ -111,10 +111,10 @@ def test_devices_bootstraping(
context_client : ContextClient, device_client : DeviceClient): # pylint: disable=redefined-outer-name
# ----- Start the EventsCollector ----------------------------------------------------------------------------------
events_collector = EventsCollector(context_client, log_events_received=True)
events_collector.start()
#events_collector = EventsCollector(context_client, log_events_received=True)
#events_collector.start()
expected_events = []
#expected_events = []
# ----- Create Devices and Validate Collected Events ---------------------------------------------------------------
for device, connect_rules in DEVICES:
......@@ -126,11 +126,11 @@ def test_devices_bootstraping(
response = device_client.AddDevice(Device(**device_with_connect_rules))
assert response.device_uuid.uuid == device_uuid
expected_events.extend([
# Device creation, update for automation to start the device
('DeviceEvent', EVENT_CREATE, json_device_id(device_uuid)),
#('DeviceEvent', EVENT_UPDATE, json_device_id(device_uuid)),
])
#expected_events.extend([
# # Device creation, update for automation to start the device
# ('DeviceEvent', EVENT_CREATE, json_device_id(device_uuid)),
# #('DeviceEvent', EVENT_UPDATE, json_device_id(device_uuid)),
#])
#response = context_client.GetDevice(response)
#for endpoint in response.device_endpoints:
......@@ -139,10 +139,10 @@ def test_devices_bootstraping(
# expected_events.append(('DeviceEvent', EVENT_UPDATE, json_device_id(device_uuid)))
# ----- Validate Collected Events ----------------------------------------------------------------------------------
check_events(events_collector, expected_events)
#check_events(events_collector, expected_events)
# ----- Stop the EventsCollector -----------------------------------------------------------------------------------
events_collector.stop()
#events_collector.stop()
def test_devices_bootstrapped(context_client : ContextClient): # pylint: disable=redefined-outer-name
......@@ -166,10 +166,10 @@ def test_devices_bootstrapped(context_client : ContextClient): # pylint: disabl
def test_links_creation(context_client : ContextClient): # pylint: disable=redefined-outer-name
# ----- Start the EventsCollector ----------------------------------------------------------------------------------
events_collector = EventsCollector(context_client)
events_collector.start()
#events_collector = EventsCollector(context_client)
#events_collector.start()
expected_events = []
#expected_events = []
# ----- Create Links and Validate Collected Events -----------------------------------------------------------------
for link in LINKS:
......@@ -177,13 +177,13 @@ def test_links_creation(context_client : ContextClient): # pylint: disable=rede
LOGGER.info('Adding Link {:s}'.format(link_uuid))
response = context_client.SetLink(Link(**link))
assert response.link_uuid.uuid == link_uuid
expected_events.append(('LinkEvent', EVENT_CREATE, json_link_id(link_uuid)))
#expected_events.append(('LinkEvent', EVENT_CREATE, json_link_id(link_uuid)))
# ----- Validate Collected Events ----------------------------------------------------------------------------------
check_events(events_collector, expected_events)
#check_events(events_collector, expected_events)
# ----- Stop the EventsCollector -----------------------------------------------------------------------------------
events_collector.stop()
#events_collector.stop()
def test_links_created(context_client : ContextClient): # pylint: disable=redefined-outer-name
......
......@@ -65,10 +65,10 @@ def test_scenario_cleanup(
context_client : ContextClient, device_client : DeviceClient): # pylint: disable=redefined-outer-name
# ----- Start the EventsCollector ----------------------------------------------------------------------------------
events_collector = EventsCollector(context_client)
events_collector.start()
#events_collector = EventsCollector(context_client)
#events_collector.start()
expected_events = []
#expected_events = []
# ----- Delete Links and Validate Collected Events -----------------------------------------------------------------
for link in LINKS:
......@@ -76,7 +76,7 @@ def test_scenario_cleanup(
link_uuid = link_id['link_uuid']['uuid']
LOGGER.info('Deleting Link {:s}'.format(link_uuid))
context_client.RemoveLink(LinkId(**link_id))
expected_events.append(('LinkEvent', EVENT_REMOVE, json_link_id(link_uuid)))
#expected_events.append(('LinkEvent', EVENT_REMOVE, json_link_id(link_uuid)))
# ----- Delete Devices and Validate Collected Events ---------------------------------------------------------------
for device, _ in DEVICES:
......@@ -84,7 +84,7 @@ def test_scenario_cleanup(
device_uuid = device_id['device_uuid']['uuid']
LOGGER.info('Deleting Device {:s}'.format(device_uuid))
device_client.DeleteDevice(DeviceId(**device_id))
expected_events.append(('DeviceEvent', EVENT_REMOVE, json_device_id(device_uuid)))
#expected_events.append(('DeviceEvent', EVENT_REMOVE, json_device_id(device_uuid)))
# ----- Delete Topologies and Validate Collected Events ------------------------------------------------------------
for topology in TOPOLOGIES:
......@@ -94,7 +94,7 @@ def test_scenario_cleanup(
LOGGER.info('Deleting Topology {:s}/{:s}'.format(context_uuid, topology_uuid))
context_client.RemoveTopology(TopologyId(**topology_id))
context_id = json_context_id(context_uuid)
expected_events.append(('TopologyEvent', EVENT_REMOVE, json_topology_id(topology_uuid, context_id=context_id)))
#expected_events.append(('TopologyEvent', EVENT_REMOVE, json_topology_id(topology_uuid, context_id=context_id)))
# ----- Delete Contexts and Validate Collected Events --------------------------------------------------------------
for context in CONTEXTS:
......@@ -102,13 +102,13 @@ def test_scenario_cleanup(
context_uuid = context_id['context_uuid']['uuid']
LOGGER.info('Deleting Context {:s}'.format(context_uuid))
context_client.RemoveContext(ContextId(**context_id))
expected_events.append(('ContextEvent', EVENT_REMOVE, json_context_id(context_uuid)))
#expected_events.append(('ContextEvent', EVENT_REMOVE, json_context_id(context_uuid)))
# ----- Validate Collected Events ----------------------------------------------------------------------------------
check_events(events_collector, expected_events)
#check_events(events_collector, expected_events)
# ----- Stop the EventsCollector -----------------------------------------------------------------------------------
events_collector.stop()
#events_collector.stop()
def test_scenario_empty_again(context_client : ContextClient): # pylint: disable=redefined-outer-name
......
......@@ -69,8 +69,8 @@ def test_scenario_is_correct(context_client : ContextClient): # pylint: disable
def test_service_creation(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name
# ----- Start the EventsCollector ----------------------------------------------------------------------------------
events_collector = EventsCollector(context_client, log_events_received=True)
events_collector.start()
#events_collector = EventsCollector(context_client, log_events_received=True)
#events_collector.start()
# ----- Create Service ---------------------------------------------------------------------------------------------
service_uuid = osm_wim.create_connectivity_service(WIM_SERVICE_TYPE, WIM_SERVICE_CONNECTION_POINTS)
......@@ -78,30 +78,30 @@ def test_service_creation(context_client : ContextClient, osm_wim : MockOSM): #
# ----- Validate collected events ----------------------------------------------------------------------------------
packet_connection_uuid = '{:s}:{:s}'.format(service_uuid, DEVTYPE_EMU_PR)
optical_connection_uuid = '{:s}:optical:{:s}'.format(service_uuid, DEVTYPE_EMU_OLS)
optical_service_uuid = '{:s}:optical'.format(service_uuid)
expected_events = [
# Create packet service and add first endpoint
('ServiceEvent', EVENT_CREATE, json_service_id(service_uuid, context_id=CONTEXT_ID)),
('ServiceEvent', EVENT_UPDATE, json_service_id(service_uuid, context_id=CONTEXT_ID)),
# Configure OLS controller, create optical service, create optical connection
('DeviceEvent', EVENT_UPDATE, json_device_id(DEVICE_O1_UUID)),
('ServiceEvent', EVENT_CREATE, json_service_id(optical_service_uuid, context_id=CONTEXT_ID)),
('ConnectionEvent', EVENT_CREATE, json_connection_id(optical_connection_uuid)),
# Configure endpoint packet devices, add second endpoint to service, create connection
('DeviceEvent', EVENT_UPDATE, json_device_id(DEVICE_R1_UUID)),
('DeviceEvent', EVENT_UPDATE, json_device_id(DEVICE_R3_UUID)),
('ServiceEvent', EVENT_UPDATE, json_service_id(service_uuid, context_id=CONTEXT_ID)),
('ConnectionEvent', EVENT_CREATE, json_connection_id(packet_connection_uuid)),
]
check_events(events_collector, expected_events)
#packet_connection_uuid = '{:s}:{:s}'.format(service_uuid, DEVTYPE_EMU_PR)
#optical_connection_uuid = '{:s}:optical:{:s}'.format(service_uuid, DEVTYPE_EMU_OLS)
#optical_service_uuid = '{:s}:optical'.format(service_uuid)
#expected_events = [
# # Create packet service and add first endpoint
# ('ServiceEvent', EVENT_CREATE, json_service_id(service_uuid, context_id=CONTEXT_ID)),
# ('ServiceEvent', EVENT_UPDATE, json_service_id(service_uuid, context_id=CONTEXT_ID)),
#
# # Configure OLS controller, create optical service, create optical connection
# ('DeviceEvent', EVENT_UPDATE, json_device_id(DEVICE_O1_UUID)),
# ('ServiceEvent', EVENT_CREATE, json_service_id(optical_service_uuid, context_id=CONTEXT_ID)),
# ('ConnectionEvent', EVENT_CREATE, json_connection_id(optical_connection_uuid)),
#
# # Configure endpoint packet devices, add second endpoint to service, create connection
# ('DeviceEvent', EVENT_UPDATE, json_device_id(DEVICE_R1_UUID)),
# ('DeviceEvent', EVENT_UPDATE, json_device_id(DEVICE_R3_UUID)),
# ('ServiceEvent', EVENT_UPDATE, json_service_id(service_uuid, context_id=CONTEXT_ID)),
# ('ConnectionEvent', EVENT_CREATE, json_connection_id(packet_connection_uuid)),
#]
#check_events(events_collector, expected_events)
# ----- Stop the EventsCollector -----------------------------------------------------------------------------------
events_collector.stop()
#events_collector.stop()
def test_scenario_service_created(context_client : ContextClient): # pylint: disable=redefined-outer-name
......
......@@ -23,7 +23,7 @@ from common.tools.grpc.Tools import grpc_message_to_json_string
from compute.tests.mock_osm.MockOSM import MockOSM
from context.client.ContextClient import ContextClient
from context.client.EventsCollector import EventsCollector
from common.proto.context_pb2 import ContextId, Empty
from common.proto.context_pb2 import ContextId, Empty, ServiceTypeEnum
from .Objects import (
CONTEXT_ID, CONTEXTS, DEVICE_O1_UUID, DEVICE_R1_UUID, DEVICE_R3_UUID, DEVICES, LINKS, TOPOLOGIES, WIM_MAPPING,
WIM_PASSWORD, WIM_USERNAME)
......@@ -77,43 +77,43 @@ def test_scenario_is_correct(context_client : ContextClient): # pylint: disable
def test_service_removal(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name
# ----- Start the EventsCollector ----------------------------------------------------------------------------------
events_collector = EventsCollector(context_client, log_events_received=True)
events_collector.start()
#events_collector = EventsCollector(context_client, log_events_received=True)
#events_collector.start()
# ----- Delete Service ---------------------------------------------------------------------------------------------
response = context_client.ListServiceIds(ContextId(**CONTEXT_ID))
LOGGER.info('Services[{:d}] = {:s}'.format(len(response.service_ids), grpc_message_to_json_string(response)))
assert len(response.service_ids) == 2 # L3NM + TAPI
response = context_client.ListServices(ContextId(**CONTEXT_ID))
LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response)))
assert len(response.services) == 2 # L3NM + TAPI
service_uuids = set()
for service_id in response.service_ids:
service_uuid = service_id.service_uuid.uuid
if service_uuid.endswith(':optical'): continue
for service in response.services:
if service.service_type != ServiceTypeEnum.SERVICETYPE_L3NM: continue
service_uuid = service.service_id.service_uuid.uuid
service_uuids.add(service_uuid)
osm_wim.conn_info[service_uuid] = {}
assert len(service_uuids) == 1 # assume a single service has been created
assert len(service_uuids) == 1 # assume a single L3NM service has been created
service_uuid = set(service_uuids).pop()
osm_wim.delete_connectivity_service(service_uuid)
# ----- Validate collected events ----------------------------------------------------------------------------------
packet_connection_uuid = '{:s}:{:s}'.format(service_uuid, DEVTYPE_EMU_PR)
optical_connection_uuid = '{:s}:optical:{:s}'.format(service_uuid, DEVTYPE_EMU_OLS)
optical_service_uuid = '{:s}:optical'.format(service_uuid)
expected_events = [
('ConnectionEvent', EVENT_REMOVE, json_connection_id(packet_connection_uuid)),
('DeviceEvent', EVENT_UPDATE, json_device_id(DEVICE_R1_UUID)),
('DeviceEvent', EVENT_UPDATE, json_device_id(DEVICE_R3_UUID)),
('ServiceEvent', EVENT_REMOVE, json_service_id(service_uuid, context_id=CONTEXT_ID)),
('ConnectionEvent', EVENT_REMOVE, json_connection_id(optical_connection_uuid)),
('DeviceEvent', EVENT_UPDATE, json_device_id(DEVICE_O1_UUID)),
('ServiceEvent', EVENT_REMOVE, json_service_id(optical_service_uuid, context_id=CONTEXT_ID)),
]
check_events(events_collector, expected_events)
#packet_connection_uuid = '{:s}:{:s}'.format(service_uuid, DEVTYPE_EMU_PR)
#optical_connection_uuid = '{:s}:optical:{:s}'.format(service_uuid, DEVTYPE_EMU_OLS)
#optical_service_uuid = '{:s}:optical'.format(service_uuid)
#expected_events = [
# ('ConnectionEvent', EVENT_REMOVE, json_connection_id(packet_connection_uuid)),
# ('DeviceEvent', EVENT_UPDATE, json_device_id(DEVICE_R1_UUID)),
# ('DeviceEvent', EVENT_UPDATE, json_device_id(DEVICE_R3_UUID)),
# ('ServiceEvent', EVENT_REMOVE, json_service_id(service_uuid, context_id=CONTEXT_ID)),
# ('ConnectionEvent', EVENT_REMOVE, json_connection_id(optical_connection_uuid)),
# ('DeviceEvent', EVENT_UPDATE, json_device_id(DEVICE_O1_UUID)),
# ('ServiceEvent', EVENT_REMOVE, json_service_id(optical_service_uuid, context_id=CONTEXT_ID)),
#]
#check_events(events_collector, expected_events)
# ----- Stop the EventsCollector -----------------------------------------------------------------------------------
events_collector.stop()
#events_collector.stop()
def test_services_removed(context_client : ContextClient): # pylint: disable=redefined-outer-name
......
......@@ -207,7 +207,19 @@
{{ connection.connection_id.connection_uuid.uuid }}
</td>
<td>
{{ connection.sub_service_ids|map(attribute='service_uuid')|map(attribute='uuid')|join(', ') }}
<ul>
{% for sub_service_id in connection.sub_service_ids %}
<li>
<a href="{{ url_for('service.detail', service_uuid=sub_service_id.service_uuid.uuid) }}">
{{ sub_service_id.service_uuid.uuid }}
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-eye" viewBox="0 0 16 16">
<path d="M16 8s-3-5.5-8-5.5S0 8 0 8s3 5.5 8 5.5S16 8 16 8zM1.173 8a13.133 13.133 0 0 1 1.66-2.043C4.12 4.668 5.88 3.5 8 3.5c2.12 0 3.879 1.168 5.168 2.457A13.133 13.133 0 0 1 14.828 8c-.058.087-.122.183-.195.288-.335.48-.83 1.12-1.465 1.755C11.879 11.332 10.119 12.5 8 12.5c-2.12 0-3.879-1.168-5.168-2.457A13.134 13.134 0 0 1 1.172 8z"/>
<path d="M8 5.5a2.5 2.5 0 1 0 0 5 2.5 2.5 0 0 0 0-5zM4.5 8a3.5 3.5 0 1 1 7 0 3.5 3.5 0 0 1-7 0z"/>
</svg>
</a>
</li>
{% endfor %}
</ul>
</td>
{% for i in range(connection.path_hops_endpoint_ids|length) %}
......
......@@ -46,11 +46,11 @@
{{ slice.slice_id.slice_uuid.uuid }}
</td>
<td>
{% for i in range(slice.slice_endpoint_ids|length) %}
<ul>
{% for i in range(slice.slice_endpoint_ids|length) %}
<li> {{ slice.slice_endpoint_ids[i].device_id.device_uuid.uuid }} / {{ slice.slice_endpoint_ids[i].endpoint_uuid.uuid }} </li>
</ul>
{% endfor %}
</ul>
</td>
<td>
{{ sse.Name(slice.slice_status.slice_status).replace('SLICESTATUS_', '') }}
......