From 8e513a8b6fcc07226d92015f555cefac5719407c Mon Sep 17 00:00:00 2001 From: hajipour Date: Mon, 21 Jul 2025 18:53:36 +0200 Subject: [PATCH 001/367] feat: basic dev of ecoc25 telemetry --- manifests/nbiservice.yaml | 12 + proto/device.proto | 11 +- proto/monitoring.proto | 18 + .../service/DeviceServiceServicerImpl.py | 30 +- src/device/service/driver_api/_Driver.py | 2 +- src/device/service/drivers/nce/driver.py | 141 +- .../service/drivers/nce/nce_fan_client.py | 103 +- src/nbi/requirements.in | 4 + src/nbi/service/NbiApplication.py | 18 + src/nbi/service/app.py | 2 + src/nbi/service/database/Engine.py | 66 + src/nbi/service/database/__init__.py | 14 + src/nbi/service/database/base.py | 10 + .../Full-Te-Topology-simap1.json | 3485 +++++++++++++++++ .../Full-Te-Topology-simap2.json | 3485 +++++++++++++++++ src/nbi/service/sse_telemetry/__init__.py | 34 + .../sse_telemetry/create_subscription.py | 132 + .../sse_telemetry/database/Subscription.py | 147 + .../sse_telemetry/database/__init__.py | 14 + .../database/models/Subscription.py | 42 + .../sse_telemetry/database/models/__init__.py | 13 + src/nbi/service/sse_telemetry/database_tmp.py | 2 + .../sse_telemetry/delete_subscription.py | 114 + src/nbi/service/sse_telemetry/topology.py | 187 + .../ecoc25-camara-e2e-telemetry/Dockerfile | 86 + .../data/camara-e2e-topology.json | 1725 ++++++++ .../deploy_specs.sh | 208 + .../mocks/Dockerfile | 30 + .../mocks/app/main.py | 299 ++ .../mocks/docker-compose.yml | 58 + .../mocks/requirements.txt | 5 + .../report_onboarding.xml | 1 + .../requirements.in | 30 + .../tests/Fixtures.py | 43 + .../tests/Tools.py | 109 + .../tests/__init__.py | 14 + .../tests/test_e2e_ietf_slice_operations.py | 478 +++ .../tests/test_onboarding.py | 67 + 38 files changed, 11136 insertions(+), 103 deletions(-) create mode 100644 src/nbi/service/database/Engine.py create mode 100644 src/nbi/service/database/__init__.py create mode 100644 src/nbi/service/database/base.py create mode 100644 src/nbi/service/sse_telemetry/Full-Te-Topology-simap1.json create mode 100644 src/nbi/service/sse_telemetry/Full-Te-Topology-simap2.json create mode 100644 src/nbi/service/sse_telemetry/__init__.py create mode 100644 src/nbi/service/sse_telemetry/create_subscription.py create mode 100644 src/nbi/service/sse_telemetry/database/Subscription.py create mode 100644 src/nbi/service/sse_telemetry/database/__init__.py create mode 100644 src/nbi/service/sse_telemetry/database/models/Subscription.py create mode 100644 src/nbi/service/sse_telemetry/database/models/__init__.py create mode 100644 src/nbi/service/sse_telemetry/database_tmp.py create mode 100644 src/nbi/service/sse_telemetry/delete_subscription.py create mode 100644 src/nbi/service/sse_telemetry/topology.py create mode 100644 src/tests/ecoc25-camara-e2e-telemetry/Dockerfile create mode 100644 src/tests/ecoc25-camara-e2e-telemetry/data/camara-e2e-topology.json create mode 100755 src/tests/ecoc25-camara-e2e-telemetry/deploy_specs.sh create mode 100644 src/tests/ecoc25-camara-e2e-telemetry/mocks/Dockerfile create mode 100644 src/tests/ecoc25-camara-e2e-telemetry/mocks/app/main.py create mode 100644 src/tests/ecoc25-camara-e2e-telemetry/mocks/docker-compose.yml create mode 100644 src/tests/ecoc25-camara-e2e-telemetry/mocks/requirements.txt create mode 100644 src/tests/ecoc25-camara-e2e-telemetry/report_onboarding.xml create mode 100644 src/tests/ecoc25-camara-e2e-telemetry/requirements.in create mode 100644 src/tests/ecoc25-camara-e2e-telemetry/tests/Fixtures.py create mode 100644 src/tests/ecoc25-camara-e2e-telemetry/tests/Tools.py create mode 100644 src/tests/ecoc25-camara-e2e-telemetry/tests/__init__.py create mode 100644 src/tests/ecoc25-camara-e2e-telemetry/tests/test_e2e_ietf_slice_operations.py create mode 100644 src/tests/ecoc25-camara-e2e-telemetry/tests/test_onboarding.py diff --git a/manifests/nbiservice.yaml b/manifests/nbiservice.yaml index 27026cc0f..cac267495 100644 --- a/manifests/nbiservice.yaml +++ b/manifests/nbiservice.yaml @@ -44,6 +44,18 @@ spec: value: "production" # normal value is "production", change to "development" if developing - name: IETF_NETWORK_RENDERER value: "LIBYANG" + - name: NBI_DATABASE + value: "tfs_nbi" + - name: CRDB_NAMESPACE + value: "crdb" + - name: CRDB_SQL_PORT + value: "26257" + - name: CRDB_USERNAME + value: "tfs" + - name: CRDB_PASSWORD + value: "tfs123" + - name: CRDB_SSLMODE + value: "require" envFrom: - secretRef: name: kfk-kpi-data diff --git a/proto/device.proto b/proto/device.proto index a4233d33d..265c96191 100644 --- a/proto/device.proto +++ b/proto/device.proto @@ -19,11 +19,12 @@ import "context.proto"; import "monitoring.proto"; // to be migrated to: "kpi_manager.proto" service DeviceService { - rpc AddDevice (context.Device ) returns (context.DeviceId ) {} - rpc ConfigureDevice (context.Device ) returns (context.DeviceId ) {} - rpc DeleteDevice (context.DeviceId ) returns (context.Empty ) {} - rpc GetInitialConfig(context.DeviceId ) returns (context.DeviceConfig) {} - rpc MonitorDeviceKpi(MonitoringSettings) returns (context.Empty ) {} + rpc AddDevice (context.Device ) returns (context.DeviceId ) {} + rpc ConfigureDevice (context.Device ) returns (context.DeviceId ) {} + rpc DeleteDevice (context.DeviceId ) returns (context.Empty ) {} + rpc GetInitialConfig (context.DeviceId ) returns (context.DeviceConfig ) {} + rpc MonitorDeviceKpi (MonitoringSettings ) returns (context.Empty ) {} + rpc SSETelemetrySubscribe(monitoring.SSEMonitoringSubscriptionConfig) returns (monitoring.SSEMonitoringSubscriptionResponse ) {} } message MonitoringSettings { diff --git a/proto/monitoring.proto b/proto/monitoring.proto index d027b792b..11dac60bf 100644 --- a/proto/monitoring.proto +++ b/proto/monitoring.proto @@ -172,3 +172,21 @@ message AlarmResponse { message AlarmList { repeated AlarmDescriptor alarm_descriptor = 1; } + +message SSEMonitoringSubscriptionConfig { + enum ConfigType { + Subscribe = 0; + Unsubscribe = 1; + GetTelemetry = 2; + } + context.DeviceId device_id = 1; + ConfigType config_type = 2; + string uri = 3; + string sampling_interval = 4; // in seconds + string identifier = 5; +} + +message SSEMonitoringSubscriptionResponse { + string identifier = 1; + string uri = 2; +} diff --git a/src/device/service/DeviceServiceServicerImpl.py b/src/device/service/DeviceServiceServicerImpl.py index d5cb2f61f..2ea57c7cc 100644 --- a/src/device/service/DeviceServiceServicerImpl.py +++ b/src/device/service/DeviceServiceServicerImpl.py @@ -25,6 +25,7 @@ from common.proto.context_pb2 import ( ) from common.proto.device_pb2 import MonitoringSettings from common.proto.device_pb2_grpc import DeviceServiceServicer +from common.proto.monitoring_pb2 import SSEMonitoringSubscriptionConfig, SSEMonitoringSubscriptionResponse from common.tools.context_queries.Device import get_device from common.tools.mutex_queues.MutexQueues import MutexQueues from context.client.ContextClient import ContextClient @@ -108,7 +109,7 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): # (which controller is in charge of which sub-device). new_sub_devices : Dict[str, Device] = dict() new_sub_links : Dict[str, Link] = dict() - + #----- Experimental ------------ new_optical_configs : Dict[str, OpticalConfig] = dict() @@ -400,3 +401,30 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): return Empty() finally: self.mutex_queues.signal_done(device_uuid) + + def SSETelemetrySubscribe(self, request: SSEMonitoringSubscriptionConfig, context : grpc.ServicerContext) -> SSEMonitoringSubscriptionResponse: + device_id = request.device_id.device_uuid.uuid + config_type = request.config_type + context_client = ContextClient() + device = get_device( + context_client, device_id, rw_copy=True, include_endpoints=False, include_components=False, + include_config_rules=True) + if device is None: + raise NotFoundException('Device', device_id, extra_details='loading in ConfigureDevice') + driver : _Driver = get_driver(self.driver_instance_cache, device) + if config_type == SSEMonitoringSubscriptionConfig.Subscribe: + r = driver.SubscribeState([(request.uri, 0, float(request.sampling_interval))]) + if len(r) != 1: + raise OperationFailedException( + 'SSETelemetrySubscribe', extra_details='Driver returned an unexpected number of responses: {:d}'.format(len(r)) + ) + sub_conf: dict = r[0] + return SSEMonitoringSubscriptionResponse(identifier=sub_conf['identifier'], uri=sub_conf['uri']) + if config_type == SSEMonitoringSubscriptionConfig.Unsubscribe: + r = driver.UnsubscribeState([(request.identifier, 0, 0)]) + if len(r) != 1: + raise OperationFailedException( + 'SSETelemetrySubscribe', extra_details='Driver returned an unexpected number of responses: {:d}'.format(len(r)) + ) + return SSEMonitoringSubscriptionResponse() + diff --git a/src/device/service/driver_api/_Driver.py b/src/device/service/driver_api/_Driver.py index e8540b872..1b080bca6 100644 --- a/src/device/service/driver_api/_Driver.py +++ b/src/device/service/driver_api/_Driver.py @@ -139,7 +139,7 @@ class _Driver: raise NotImplementedError() def SubscribeState(self, subscriptions: List[Tuple[str, float, float]]) -> \ - List[Union[bool, Exception]]: + List[Union[bool, dict[str, Any], Exception]]: """ Subscribe to state information of entire device or selected resources. Subscriptions are incremental. Driver should keep track of requested resources. diff --git a/src/device/service/drivers/nce/driver.py b/src/device/service/drivers/nce/driver.py index 2792f9e22..cabe17991 100644 --- a/src/device/service/drivers/nce/driver.py +++ b/src/device/service/drivers/nce/driver.py @@ -37,17 +37,21 @@ from device.service.driver_api.ImportTopologyEnum import ( ) from .Constants import SPECIAL_RESOURCE_MAPPINGS -from .nce_fan_client import NCEClient +from .nce_fan_client import ( + NCEClient, + SubscribedNotificationsSchema, + UnsubscribedNotificationsSchema, +) from .Tools import compose_resource_endpoint LOGGER = logging.getLogger(__name__) -RE_NCE_APP_FLOW_DATA = re.compile(r"^\/service\[[^\]]+\]\/AppFlow$") -RE_NCE_APP_FLOW_OPERATION = re.compile(r"^\/service\[[^\]]+\]\/AppFlow\/operation$") +RE_NCE_APP_FLOW_DATA = re.compile(r'^\/service\[[^\]]+\]\/AppFlow$') +RE_NCE_APP_FLOW_OPERATION = re.compile(r'^\/service\[[^\]]+\]\/AppFlow\/operation$') -DRIVER_NAME = "nce" -METRICS_POOL = MetricsPool("Device", "Driver", labels={"driver": DRIVER_NAME}) +DRIVER_NAME = 'nce' +METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': DRIVER_NAME}) class NCEDriver(_Driver): @@ -56,10 +60,10 @@ class NCEDriver(_Driver): self.__lock = threading.Lock() self.__started = threading.Event() self.__terminate = threading.Event() - self.__running = TreeNode(".") - scheme = self.settings.get("scheme", "http") - username = self.settings.get("username") - password = self.settings.get("password") + self.__running = TreeNode('.') + scheme = self.settings.get('scheme', 'http') + username = self.settings.get('username') + password = self.settings.get('password') self.nce = NCEClient( self.address, self.port, @@ -73,14 +77,12 @@ class NCEDriver(_Driver): # if username is not None and password is not None # else None # ) - self.__tfs_nbi_root = "{:s}://{:s}:{:d}".format( - scheme, self.address, int(self.port) - ) - self.__timeout = int(self.settings.get("timeout", 120)) + self.__tfs_nbi_root = '{:s}://{:s}:{:d}'.format(scheme, self.address, int(self.port)) + self.__timeout = int(self.settings.get('timeout', 120)) self.__import_topology = get_import_topology( self.settings, default=ImportTopologyEnum.DEVICES ) - endpoints = self.settings.get("endpoints", []) + endpoints = self.settings.get('endpoints', []) endpoint_resources = [] for endpoint in endpoints: endpoint_resource = compose_resource_endpoint(endpoint) @@ -89,26 +91,24 @@ class NCEDriver(_Driver): endpoint_resources.append(endpoint_resource) self._set_initial_config(endpoint_resources) - def _set_initial_config( - self, resources: List[Tuple[str, Any]] - ) -> List[Union[bool, Exception]]: - chk_type("resources", resources, list) + def _set_initial_config(self, resources: List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + chk_type('resources', resources, list) if len(resources) == 0: return [] results = [] - resolver = anytree.Resolver(pathattr="name") + resolver = anytree.Resolver(pathattr='name') with self.__lock: for i, resource in enumerate(resources): - str_resource_name = "resources[#{:d}]".format(i) + str_resource_name = 'resources[#{:d}]'.format(i) try: chk_type(str_resource_name, resource, (list, tuple)) chk_length(str_resource_name, resource, min_length=2, max_length=2) resource_key, resource_value = resource chk_string(str_resource_name, resource_key, allow_empty=False) - resource_path = resource_key.split("/") + resource_path = resource_key.split('/') except Exception as e: # pylint: disable=broad-except LOGGER.exception( - "Exception validating {:s}: {:s}".format( + 'Exception validating {:s}: {:s}'.format( str_resource_name, str(resource_key) ) ) @@ -120,9 +120,7 @@ class NCEDriver(_Driver): except: # pylint: disable=bare-except pass - set_subnode_value( - resolver, self.__running, resource_path, resource_value - ) + set_subnode_value(resolver, self.__running, resource_path, resource_value) results.append(True) return results @@ -155,34 +153,28 @@ class NCEDriver(_Driver): def GetConfig( self, resource_keys: List[str] = [] ) -> List[Tuple[str, Union[Any, None, Exception]]]: - chk_type("resources", resource_keys, list) + chk_type('resources', resource_keys, list) with self.__lock: if len(resource_keys) == 0: return dump_subtree(self.__running) results = [] - resolver = anytree.Resolver(pathattr="name") + resolver = anytree.Resolver(pathattr='name') for i, resource_key in enumerate(resource_keys): - str_resource_name = "resource_key[#{:d}]".format(i) + str_resource_name = 'resource_key[#{:d}]'.format(i) try: chk_string(str_resource_name, resource_key, allow_empty=False) - resource_key = SPECIAL_RESOURCE_MAPPINGS.get( - resource_key, resource_key - ) - resource_path = resource_key.split("/") + resource_key = SPECIAL_RESOURCE_MAPPINGS.get(resource_key, resource_key) + resource_path = resource_key.split('/') except Exception as e: # pylint: disable=broad-except LOGGER.exception( - "Exception validating {:s}: {:s}".format( + 'Exception validating {:s}: {:s}'.format( str_resource_name, str(resource_key) ) ) - results.append( - (resource_key, e) - ) # if validation fails, store the exception + results.append((resource_key, e)) # if validation fails, store the exception continue - resource_node = get_subnode( - resolver, self.__running, resource_path, default=None - ) + resource_node = get_subnode(resolver, self.__running, resource_path, default=None) # if not found, resource_node is None if resource_node is None: continue @@ -191,9 +183,7 @@ class NCEDriver(_Driver): return results @metered_subclass_method(METRICS_POOL) - def SetConfig( - self, resources: List[Tuple[str, Any]] - ) -> List[Union[bool, Exception]]: + def SetConfig(self, resources: List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: results = [] if len(resources) == 0: @@ -202,57 +192,49 @@ class NCEDriver(_Driver): with self.__lock: for resource in resources: resource_key, resource_value = resource - LOGGER.debug("resource = {:s}".format(str(resource))) + LOGGER.debug('resource = {:s}'.format(str(resource))) if RE_NCE_APP_FLOW_OPERATION.match(resource_key): - operation_type = json.loads(resource_value)["type"] + operation_type = json.loads(resource_value)['type'] results.append((resource_key, True)) break else: - raise Exception("operation type not found in resources") + raise Exception('operation type not found in resources') for resource in resources: - LOGGER.info("resource = {:s}".format(str(resource))) + LOGGER.info('resource = {:s}'.format(str(resource))) resource_key, resource_value = resource if not RE_NCE_APP_FLOW_DATA.match(resource_key): continue try: resource_value = json.loads(resource_value) - if operation_type == "create": - + if operation_type == 'create': self.nce.create_app_flow(resource_value) - elif operation_type == "delete": - - app_flow_name = resource_value["huawei-nce-app-flow:app-flows"][ - "app-flow" - ][0]["app-name"] + elif operation_type == 'delete': + app_flow_name = resource_value['huawei-nce-app-flow:app-flows']['app-flow'][ + 0 + ]['app-name'] self.nce.delete_app_flow(app_flow_name) results.append((resource_key, True)) except Exception as e: # pylint: disable=broad-except LOGGER.exception( - "Unhandled error processing resource_key({:s})".format( - str(resource_key) - ) + 'Unhandled error processing resource_key({:s})'.format(str(resource_key)) ) results.append((resource_key, e)) return results @metered_subclass_method(METRICS_POOL) - def DeleteConfig( - self, resources: List[Tuple[str, Any]] - ) -> List[Union[bool, Exception]]: + def DeleteConfig(self, resources: List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: results = [] if len(resources) == 0: return results with self.__lock: for resource in resources: - LOGGER.info("resource = {:s}".format(str(resource))) + LOGGER.info('resource = {:s}'.format(str(resource))) resource_key, resource_value = resource try: results.append((resource_key, True)) except Exception as e: # pylint: disable=broad-except LOGGER.exception( - "Unhandled error processing resource_key({:s})".format( - str(resource_key) - ) + 'Unhandled error processing resource_key({:s})'.format(str(resource_key)) ) results.append((resource_key, e)) return results @@ -260,19 +242,40 @@ class NCEDriver(_Driver): @metered_subclass_method(METRICS_POOL) def SubscribeState( self, subscriptions: List[Tuple[str, float, float]] - ) -> List[Union[bool, Exception]]: - # TODO: IETF L3VPN does not support monitoring by now - return [False for _ in subscriptions] + ) -> List[Union[bool, dict[str, Any], Exception]]: + if len(subscriptions) != 1: + raise ValueError('NCE driver supports only one subscription at a time') + s = subscriptions[0] + uri = s[0] + _ = s[1] # sampling duration + sampling_interval = s[2] + s_data: SubscribedNotificationsSchema = { + 'ietf-subscribed-notifications:input': { + 'datastore': 'operational', + 'ietf-yang-push:datastore-xpath-filter': uri, + 'ietf-yang-push:periodic': {'ietf-yang-push:period': str(sampling_interval)}, + } + } + s_id = self.nce.subscribe_telemetry(s_data) + return [s_id] @metered_subclass_method(METRICS_POOL) def UnsubscribeState( self, subscriptions: List[Tuple[str, float, float]] ) -> List[Union[bool, Exception]]: - # TODO: IETF L3VPN does not support monitoring by now - return [False for _ in subscriptions] + if len(subscriptions) != 1: + raise ValueError('NCE driver supports only one subscription at a time') + s = subscriptions[0] + identifier = s[0] + s_data: UnsubscribedNotificationsSchema = { + 'delete-subscription': { + 'identifier': identifier, + } + } + self.nce.unsubscribe_telemetry(s_data) + return [True] def GetState( self, blocking=False, terminate: Optional[threading.Event] = None ) -> Iterator[Tuple[float, str, Any]]: - # TODO: IETF L3VPN does not support monitoring by now return [] diff --git a/src/device/service/drivers/nce/nce_fan_client.py b/src/device/service/drivers/nce/nce_fan_client.py index 9805f9cf8..91e2001e3 100644 --- a/src/device/service/drivers/nce/nce_fan_client.py +++ b/src/device/service/drivers/nce/nce_fan_client.py @@ -14,13 +14,42 @@ import logging from typing import Optional +from typing_extensions import List, TypedDict import requests from requests.auth import HTTPBasicAuth + +Periodic = TypedDict('Periodic', {'ietf-yang-push:period': str}) + +Input = TypedDict( + 'Input', + { + 'datastore': str, + 'ietf-yang-push:datastore-xpath-filter': str, + 'ietf-yang-push:periodic': Periodic, + }, +) + +SubscribedNotificationsSchema = TypedDict( + 'SubscribedNotificationsSchema', {'ietf-subscribed-notifications:input': Input} +) + +SubscriptionSchema = TypedDict('SubscriptionSchema', {'identifier': str}) + +UnsubscribedNotificationsSchema = TypedDict( + 'UnsubscribedNotificationsSchema', {'delete-subscription': SubscriptionSchema} +) + + +class SubscriptionId(TypedDict): + identifier: str + uri: str + + LOGGER = logging.getLogger(__name__) -NCE_FAN_URL = "{:s}://{:s}:{:d}/restconf/v1/data" +NCE_FAN_URL = '{:s}://{:s}:{:d}' TIMEOUT = 30 HTTP_OK_CODES = { @@ -31,34 +60,35 @@ HTTP_OK_CODES = { } MAPPING_STATUS = { - "DEVICEOPERATIONALSTATUS_UNDEFINED": 0, - "DEVICEOPERATIONALSTATUS_DISABLED": 1, - "DEVICEOPERATIONALSTATUS_ENABLED": 2, + 'DEVICEOPERATIONALSTATUS_UNDEFINED': 0, + 'DEVICEOPERATIONALSTATUS_DISABLED': 1, + 'DEVICEOPERATIONALSTATUS_ENABLED': 2, } MAPPING_DRIVER = { - "DEVICEDRIVER_UNDEFINED": 0, - "DEVICEDRIVER_OPENCONFIG": 1, - "DEVICEDRIVER_TRANSPORT_API": 2, - "DEVICEDRIVER_P4": 3, - "DEVICEDRIVER_IETF_NETWORK_TOPOLOGY": 4, - "DEVICEDRIVER_ONF_TR_532": 5, - "DEVICEDRIVER_XR": 6, - "DEVICEDRIVER_IETF_L2VPN": 7, - "DEVICEDRIVER_GNMI_OPENCONFIG": 8, - "DEVICEDRIVER_OPTICAL_TFS": 9, - "DEVICEDRIVER_IETF_ACTN": 10, - "DEVICEDRIVER_OC": 11, + 'DEVICEDRIVER_UNDEFINED': 0, + 'DEVICEDRIVER_OPENCONFIG': 1, + 'DEVICEDRIVER_TRANSPORT_API': 2, + 'DEVICEDRIVER_P4': 3, + 'DEVICEDRIVER_IETF_NETWORK_TOPOLOGY': 4, + 'DEVICEDRIVER_ONF_TR_532': 5, + 'DEVICEDRIVER_XR': 6, + 'DEVICEDRIVER_IETF_L2VPN': 7, + 'DEVICEDRIVER_GNMI_OPENCONFIG': 8, + 'DEVICEDRIVER_OPTICAL_TFS': 9, + 'DEVICEDRIVER_IETF_ACTN': 10, + 'DEVICEDRIVER_OC': 11, } HEADERS = {'Content-Type': 'application/json'} + class NCEClient: def __init__( self, address: str, port: int, - scheme: str = "http", + scheme: str = 'http', username: Optional[str] = None, password: Optional[str] = None, ) -> None: @@ -67,28 +97,47 @@ class NCEClient: def create_app_flow(self, app_flow_data: dict) -> None: try: - app_data = app_flow_data["huawei-nce-app-flow:app-flows"]["applications"] - app_url = self._nce_fan_url + "/app-flows/apps" + app_data = app_flow_data['huawei-nce-app-flow:app-flows']['applications'] + app_url = self._nce_fan_url + '/restconf/v1/data' + '/app-flows/apps' LOGGER.info(f'Creating app: {app_data} URL: {app_url}') requests.post(app_url, json=app_data, headers=HEADERS) - app_flow_data = { - "app-flow": app_flow_data["huawei-nce-app-flow:app-flows"]["app-flow"] - } - app_flow_url = self._nce_fan_url + "/app-flows" + app_flow_data = {'app-flow': app_flow_data['huawei-nce-app-flow:app-flows']['app-flow']} + app_flow_url = self._nce_fan_url + '/restconf/v1/data' + '/app-flows' LOGGER.info(f'Creating app flow: {app_flow_data} URL: {app_flow_url}') requests.post(app_flow_url, json=app_flow_data, headers=HEADERS) except requests.exceptions.ConnectionError: - raise Exception("faild to send post requests to NCE FAN") + raise Exception('faild to send post requests to NCE FAN') def delete_app_flow(self, app_flow_name: str) -> None: try: - app_url = self._nce_fan_url + f"/app-flows/apps/application={app_flow_name}" + app_url = ( + self._nce_fan_url + + '/restconf/v1/data' + + f'/app-flows/apps/application={app_flow_name}' + ) LOGGER.info(f'Deleting app: {app_flow_name} URL: {app_url}') requests.delete(app_url) - app_flow_url = self._nce_fan_url + f"/app-flows/app-flow={app_flow_name}" + app_flow_url = ( + self._nce_fan_url + '/restconf/v1/data' + f'/app-flows/app-flow={app_flow_name}' + ) LOGGER.info(f'Deleting app flow: {app_flow_name} URL: {app_flow_url}') requests.delete(app_flow_url) except requests.exceptions.ConnectionError: - raise Exception("faild to send delete request to NCE FAN") + raise Exception('faild to send delete request to NCE FAN') + + def subscribe_telemetry( + self, subscription_data: SubscribedNotificationsSchema + ) -> SubscriptionId: + url = self._nce_fan_url + '/restconf/operations/subscriptions:establish-subscription' + LOGGER.debug(f'Subscribing to telemetry with data: {subscription_data} URL: {url}') + r = requests.post(url, json=subscription_data, headers=HEADERS) + r.raise_for_status() + return r.json() + + def unsubscribe_telemetry(self, unsubscription_data: UnsubscribedNotificationsSchema) -> None: + url = self._nce_fan_url + '/restconf/operations/subscriptions:delete-subscription' + LOGGER.debug(f'Unsubscribing to telemetry with data: {unsubscription_data} URL: {url}') + r = requests.post(url, json=unsubscription_data, headers=HEADERS) + r.raise_for_status() diff --git a/src/nbi/requirements.in b/src/nbi/requirements.in index 6c176e3f0..72ca62b1e 100644 --- a/src/nbi/requirements.in +++ b/src/nbi/requirements.in @@ -35,3 +35,7 @@ requests==2.27.* werkzeug==2.3.7 #websockets==12.0 websocket-client==1.8.0 # used by socketio to upgrate to websocket +psycopg2-binary==2.9.* +SQLAlchemy==1.4.* +sqlalchemy-cockroachdb==1.4.* +SQLAlchemy-Utils==0.38.* diff --git a/src/nbi/service/NbiApplication.py b/src/nbi/service/NbiApplication.py index ad02c754c..8d9e7a879 100644 --- a/src/nbi/service/NbiApplication.py +++ b/src/nbi/service/NbiApplication.py @@ -20,6 +20,8 @@ from flask_restful import Api, Resource from flask_socketio import Namespace, SocketIO from common.tools.kafka.Variables import KafkaConfig, KafkaTopic from nbi.Config import SECRET_KEY +from nbi.service.database.base import rebuild_database +from .database.Engine import Engine LOGGER = logging.getLogger(__name__) @@ -54,6 +56,22 @@ class NbiApplication: logger=True, engineio_logger=True ) + # Initialize the SQLAlchemy database engine + LOGGER.info('Getting SQLAlchemy DB Engine...') + self._db_engine = Engine.get_engine() + if self._db_engine is None: + LOGGER.error('Unable to get SQLAlchemy DB Engine. Exiting...') + raise Exception('Unable to get SQLAlchemy DB Engine') + + # Try creating the database or log any issues + try: + Engine.create_database(self._db_engine) + except Exception as e: # More specific exception handling + LOGGER.exception(f'Failed to check/create the database: {self._db_engine.url}. Error: {str(e)}') + raise e + + rebuild_database(self._db_engine) + def add_rest_api_resource(self, resource_class : Resource, *urls, **kwargs) -> None: self._api.add_resource(resource_class, *urls, **kwargs) diff --git a/src/nbi/service/app.py b/src/nbi/service/app.py index 2d6102a34..2ad81b1f3 100644 --- a/src/nbi/service/app.py +++ b/src/nbi/service/app.py @@ -43,6 +43,7 @@ from .restconf_root import register_restconf_root from .tfs_api import register_tfs_api #from .topology_updates import register_topology_updates from .vntm_recommend import register_vntm_recommend +from .sse_telemetry import register_telemetry_subscription from .well_known_meta import register_well_known @@ -97,6 +98,7 @@ register_ietf_acl (nbi_app) register_qkd_app (nbi_app) #register_topology_updates(nbi_app) # does not work; check if eventlet-grpc side effects register_vntm_recommend (nbi_app) +register_telemetry_subscription(nbi_app) register_camara_qod (nbi_app) LOGGER.info('All connectors registered') diff --git a/src/nbi/service/database/Engine.py b/src/nbi/service/database/Engine.py new file mode 100644 index 000000000..57f4b4db5 --- /dev/null +++ b/src/nbi/service/database/Engine.py @@ -0,0 +1,66 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, sqlalchemy, sqlalchemy_utils +from typing import Optional +from common.Settings import get_setting + +LOGGER = logging.getLogger(__name__) + +APP_NAME = 'tfs' +ECHO = False # true: dump SQL commands and transactions executed +CRDB_URI_TEMPLATE = ( + 'cockroachdb://{:s}:{:s}@cockroachdb-public.{:s}.svc.cluster.local:{:s}/{:s}?sslmode={:s}' +) + + +class Engine: + @staticmethod + def get_engine() -> Optional[sqlalchemy.engine.Engine]: + crdb_uri = get_setting('CRDB_URI', default=None) + if crdb_uri is None: + CRDB_NAMESPACE = get_setting('CRDB_NAMESPACE') + CRDB_SQL_PORT = get_setting('CRDB_SQL_PORT') + CRDB_DATABASE = get_setting('NBI_DATABASE') + CRDB_USERNAME = get_setting('CRDB_USERNAME') + CRDB_PASSWORD = get_setting('CRDB_PASSWORD') + CRDB_SSLMODE = get_setting('CRDB_SSLMODE') + crdb_uri = CRDB_URI_TEMPLATE.format( + CRDB_USERNAME, + CRDB_PASSWORD, + CRDB_NAMESPACE, + CRDB_SQL_PORT, + CRDB_DATABASE, + CRDB_SSLMODE, + ) + + try: + engine = sqlalchemy.create_engine( + crdb_uri, connect_args={'application_name': APP_NAME}, echo=ECHO, future=True + ) + except: # pylint: disable=bare-except # pragma: no cover + LOGGER.exception('Failed to connect to database: {:s}'.format(str(crdb_uri))) + return None + + return engine + + @staticmethod + def create_database(engine: sqlalchemy.engine.Engine) -> None: + if not sqlalchemy_utils.database_exists(engine.url): + sqlalchemy_utils.create_database(engine.url) + + @staticmethod + def drop_database(engine: sqlalchemy.engine.Engine) -> None: + if sqlalchemy_utils.database_exists(engine.url): + sqlalchemy_utils.drop_database(engine.url) diff --git a/src/nbi/service/database/__init__.py b/src/nbi/service/database/__init__.py new file mode 100644 index 000000000..3ccc21c7d --- /dev/null +++ b/src/nbi/service/database/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/nbi/service/database/base.py b/src/nbi/service/database/base.py new file mode 100644 index 000000000..b12f8f20c --- /dev/null +++ b/src/nbi/service/database/base.py @@ -0,0 +1,10 @@ +import sqlalchemy +from sqlalchemy.orm import declarative_base + +_Base = declarative_base() + + +def rebuild_database(db_engine: sqlalchemy.engine.Engine, drop_if_exists: bool = False): + if drop_if_exists: + _Base.metadata.drop_all(db_engine) + _Base.metadata.create_all(db_engine) diff --git a/src/nbi/service/sse_telemetry/Full-Te-Topology-simap1.json b/src/nbi/service/sse_telemetry/Full-Te-Topology-simap1.json new file mode 100644 index 000000000..351ec13f7 --- /dev/null +++ b/src/nbi/service/sse_telemetry/Full-Te-Topology-simap1.json @@ -0,0 +1,3485 @@ +{ + "ietf-network:networks": { + "network": [ + { + "network-id": "providerId-10-clientId-0-topologyId-1", + "ietf-te-topology:te": { + "name": "Huawei-Network" + }, + "ietf-te-topology:te-topology-identifier": { + "provider-id": 10, + "client-id": 0, + "topology-id": "1" + }, + "network-types": { + "ietf-te-topology:te-topology": { + "ietf-otn-topology:otn-topology": {} + } + }, + "node": [ + { + "node-id": "172.16.182.25", + "ietf-te-topology:te-node-id": "172.16.182.25", + "ietf-network-topology:termination-point": [ + { + "tp-id": "501", + "ietf-te-topology:te-tp-id": 501, + "ietf-te-topology:te": { + "name": "1-1-1-1-1", + "admin-status": "up", + "oper-status": "up", + "ietf-otn-topology:client-svc": { + "client-facing": false + }, + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-oduk", + "switching-capability": "ietf-te-types:switching-otn", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-otn-topology:otn": { + "odu-type": "ietf-layer1-types:ODU4" + } + } + } + ] + } + ] + } + }, + { + "tp-id": "500", + "ietf-te-topology:te-tp-id": 500, + "ietf-te-topology:te": { + "name": "1-1-1-1-1", + "admin-status": "up", + "oper-status": "up", + "ietf-otn-topology:client-svc": { + "client-facing": false + }, + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-oduk", + "switching-capability": "ietf-te-types:switching-otn", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-otn-topology:otn": { + "odu-type": "ietf-layer1-types:ODU4" + } + } + } + ] + } + ] + } + } + ], + "ietf-te-topology:te": { + "oper-status": "up", + "te-node-attributes": { + "admin-status": "up", + "name": "OA" + }, + "tunnel-termination-point": [ + { + "tunnel-tp-id": "NTAx", + "admin-status": "up", + "oper-status": "up", + "encoding": "ietf-te-types:lsp-encoding-oduk", + "name": "1-1-1-1-1", + "protection-type": "ietf-te-types:lsp-protection-unprotected", + "switching-capability": "ietf-te-types:switching-otn", + "local-link-connectivities": { + "local-link-connectivity": [ + { + "is-allowed": true, + "link-tp-ref": "501" + } + ] + } + }, + { + "tunnel-tp-id": "NTAw", + "admin-status": "up", + "oper-status": "up", + "encoding": "ietf-te-types:lsp-encoding-oduk", + "name": "1-1-1-1-1", + "protection-type": "ietf-te-types:lsp-protection-unprotected", + "switching-capability": "ietf-te-types:switching-otn", + "local-link-connectivities": { + "local-link-connectivity": [ + { + "is-allowed": true, + "link-tp-ref": "500" + } + ] + } + } + ] + } + }, + { + "node-id": "172.16.185.31", + "ietf-te-topology:te-node-id": "172.16.185.31", + "ietf-network-topology:termination-point": [ + { + "tp-id": "501", + "ietf-te-topology:te-tp-id": 501, + "ietf-te-topology:te": { + "name": "1-1-1-1-1", + "admin-status": "up", + "oper-status": "up", + "ietf-otn-topology:client-svc": { + "client-facing": false + }, + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-oduk", + "switching-capability": "ietf-te-types:switching-otn", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-otn-topology:otn": { + "odu-type": "ietf-layer1-types:ODU4" + } + } + } + ] + } + ] + } + }, + { + "tp-id": "500", + "ietf-te-topology:te-tp-id": 500, + "ietf-te-topology:te": { + "name": "1-1-1-1-1", + "admin-status": "up", + "oper-status": "up", + "ietf-otn-topology:client-svc": { + "client-facing": false + }, + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-oduk", + "switching-capability": "ietf-te-types:switching-otn", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-otn-topology:otn": { + "odu-type": "ietf-layer1-types:ODU4" + } + } + } + ] + } + ] + } + } + ], + "ietf-te-topology:te": { + "oper-status": "up", + "te-node-attributes": { + "admin-status": "up", + "name": "P" + }, + "tunnel-termination-point": [ + { + "tunnel-tp-id": "NTAx", + "admin-status": "up", + "oper-status": "up", + "encoding": "ietf-te-types:lsp-encoding-oduk", + "name": "1-1-1-1-1", + "protection-type": "ietf-te-types:lsp-protection-unprotected", + "switching-capability": "ietf-te-types:switching-otn", + "local-link-connectivities": { + "local-link-connectivity": [ + { + "is-allowed": true, + "link-tp-ref": "501" + } + ] + } + }, + { + "tunnel-tp-id": "NTAw", + "admin-status": "up", + "oper-status": "up", + "encoding": "ietf-te-types:lsp-encoding-oduk", + "name": "1-1-1-1-1", + "protection-type": "ietf-te-types:lsp-protection-unprotected", + "switching-capability": "ietf-te-types:switching-otn", + "local-link-connectivities": { + "local-link-connectivity": [ + { + "is-allowed": true, + "link-tp-ref": "500" + } + ] + } + } + ] + } + }, + { + "node-id": "172.16.185.33", + "ietf-te-topology:te-node-id": "172.16.185.33", + "ietf-network-topology:termination-point": [ + { + "tp-id": "500", + "ietf-te-topology:te-tp-id": 500, + "ietf-te-topology:te": { + "name": "1-1-1-1-1", + "admin-status": "up", + "oper-status": "up", + "ietf-otn-topology:client-svc": { + "client-facing": false + }, + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-oduk", + "switching-capability": "ietf-te-types:switching-otn", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-otn-topology:otn": { + "odu-type": "ietf-layer1-types:ODU4" + } + } + } + ] + } + ] + } + }, + { + "tp-id": "501", + "ietf-te-topology:te-tp-id": 501, + "ietf-te-topology:te": { + "name": "1-1-1-1-1", + "admin-status": "up", + "oper-status": "up", + "ietf-otn-topology:client-svc": { + "client-facing": false + }, + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-oduk", + "switching-capability": "ietf-te-types:switching-otn", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-otn-topology:otn": { + "odu-type": "ietf-layer1-types:ODU4" + } + } + } + ] + } + ] + } + } + ], + "ietf-te-topology:te": { + "oper-status": "up", + "te-node-attributes": { + "admin-status": "up", + "name": "P" + }, + "tunnel-termination-point": [ + { + "tunnel-tp-id": "NTAw", + "admin-status": "up", + "oper-status": "up", + "encoding": "ietf-te-types:lsp-encoding-oduk", + "name": "1-1-1-1-1", + "protection-type": "ietf-te-types:lsp-protection-unprotected", + "switching-capability": "ietf-te-types:switching-otn", + "local-link-connectivities": { + "local-link-connectivity": [ + { + "is-allowed": true, + "link-tp-ref": "500" + } + ] + } + }, + { + "tunnel-tp-id": "NTAx", + "admin-status": "up", + "oper-status": "up", + "encoding": "ietf-te-types:lsp-encoding-oduk", + "name": "1-1-1-1-1", + "protection-type": "ietf-te-types:lsp-protection-unprotected", + "switching-capability": "ietf-te-types:switching-otn", + "local-link-connectivities": { + "local-link-connectivity": [ + { + "is-allowed": true, + "link-tp-ref": "501" + } + ] + } + } + ] + } + }, + { + "node-id": "172.16.185.32", + "ietf-te-topology:te-node-id": "172.16.185.32", + "ietf-network-topology:termination-point": [ + { + "tp-id": "500", + "ietf-te-topology:te-tp-id": 500, + "ietf-te-topology:te": { + "name": "1-1-1-1-1", + "admin-status": "up", + "oper-status": "up", + "ietf-otn-topology:client-svc": { + "client-facing": false + }, + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-oduk", + "switching-capability": "ietf-te-types:switching-otn", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-otn-topology:otn": { + "odu-type": "ietf-layer1-types:ODU4" + } + } + } + ] + } + ] + } + }, + { + "tp-id": "501", + "ietf-te-topology:te-tp-id": 501, + "ietf-te-topology:te": { + "name": "1-1-1-1-1", + "admin-status": "up", + "oper-status": "up", + "ietf-otn-topology:client-svc": { + "client-facing": false + }, + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-oduk", + "switching-capability": "ietf-te-types:switching-otn", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-otn-topology:otn": { + "odu-type": "ietf-layer1-types:ODU4" + } + } + } + ] + } + ] + } + } + ], + "ietf-te-topology:te": { + "oper-status": "up", + "te-node-attributes": { + "admin-status": "up", + "name": "OE" + }, + "tunnel-termination-point": [ + { + "tunnel-tp-id": "NTAw", + "admin-status": "up", + "oper-status": "up", + "encoding": "ietf-te-types:lsp-encoding-oduk", + "name": "1-1-1-1-1", + "protection-type": "ietf-te-types:lsp-protection-unprotected", + "switching-capability": "ietf-te-types:switching-otn", + "local-link-connectivities": { + "local-link-connectivity": [ + { + "is-allowed": true, + "link-tp-ref": "500" + } + ] + } + }, + { + "tunnel-tp-id": "NTAx", + "admin-status": "up", + "oper-status": "up", + "encoding": "ietf-te-types:lsp-encoding-oduk", + "name": "1-1-1-1-1", + "protection-type": "ietf-te-types:lsp-protection-unprotected", + "switching-capability": "ietf-te-types:switching-otn", + "local-link-connectivities": { + "local-link-connectivity": [ + { + "is-allowed": true, + "link-tp-ref": "501" + } + ] + } + } + ] + } + } + ], + "ietf-network-topology:link": [ + { + "link-id": "172.16.182.25-501", + "source": { + "source-node": "172.16.182.25", + "source-tp": "501" + }, + "destination": { + "dest-node": "172.16.185.31", + "dest-tp": "501" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.182.25-501", + "te-delay-metric": 1, + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-otn-topology:odulist": [ + { + "odu-type": "ietf-layer1-types:ODU0", + "number": 80 + } + ] + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-otn-topology:odulist": [ + { + "number": 80, + "odu-type": "ietf-layer1-types:ODU0" + } + ] + } + } + ] + } + } + }, + { + "link-id": "172.16.182.25-500", + "source": { + "source-node": "172.16.182.25", + "source-tp": "500" + }, + "destination": { + "dest-node": "172.16.185.33", + "dest-tp": "500" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.182.25-500", + "te-delay-metric": 1, + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-otn-topology:odulist": [ + { + "odu-type": "ietf-layer1-types:ODU0", + "number": 80 + } + ] + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-otn-topology:odulist": [ + { + "number": 80, + "odu-type": "ietf-layer1-types:ODU0" + } + ] + } + } + ] + } + } + }, + { + "link-id": "172.16.185.31-501", + "source": { + "source-node": "172.16.185.31", + "source-tp": "501" + }, + "destination": { + "dest-node": "172.16.182.25", + "dest-tp": "501" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.185.31-501", + "te-delay-metric": 1, + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-otn-topology:odulist": [ + { + "odu-type": "ietf-layer1-types:ODU0", + "number": 80 + } + ] + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-otn-topology:odulist": [ + { + "number": 80, + "odu-type": "ietf-layer1-types:ODU0" + } + ] + } + } + ] + } + } + }, + { + "link-id": "172.16.185.31-500", + "source": { + "source-node": "172.16.185.31", + "source-tp": "500" + }, + "destination": { + "dest-node": "172.16.185.32", + "dest-tp": "500" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.185.31-500", + "te-delay-metric": 1, + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-otn-topology:odulist": [ + { + "odu-type": "ietf-layer1-types:ODU0", + "number": 80 + } + ] + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-otn-topology:odulist": [ + { + "number": 80, + "odu-type": "ietf-layer1-types:ODU0" + } + ] + } + } + ] + } + } + }, + { + "link-id": "172.16.185.33-500", + "source": { + "source-node": "172.16.185.33", + "source-tp": "500" + }, + "destination": { + "dest-node": "172.16.182.25", + "dest-tp": "500" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.185.33-500", + "te-delay-metric": 1, + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-otn-topology:odulist": [ + { + "odu-type": "ietf-layer1-types:ODU0", + "number": 80 + } + ] + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-otn-topology:odulist": [ + { + "number": 80, + "odu-type": "ietf-layer1-types:ODU0" + } + ] + } + } + ] + } + } + }, + { + "link-id": "172.16.185.33-501", + "source": { + "source-node": "172.16.185.33", + "source-tp": "501" + }, + "destination": { + "dest-node": "172.16.185.32", + "dest-tp": "501" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.185.33-501", + "te-delay-metric": 1, + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-otn-topology:odulist": [ + { + "odu-type": "ietf-layer1-types:ODU0", + "number": 80 + } + ] + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-otn-topology:odulist": [ + { + "number": 80, + "odu-type": "ietf-layer1-types:ODU0" + } + ] + } + } + ] + } + } + }, + { + "link-id": "172.16.185.32-500", + "source": { + "source-node": "172.16.185.32", + "source-tp": "500" + }, + "destination": { + "dest-node": "172.16.185.31", + "dest-tp": "500" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.185.32-500", + "te-delay-metric": 1, + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-otn-topology:odulist": [ + { + "odu-type": "ietf-layer1-types:ODU0", + "number": 80 + } + ] + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-otn-topology:odulist": [ + { + "number": 80, + "odu-type": "ietf-layer1-types:ODU0" + } + ] + } + } + ] + } + } + }, + { + "link-id": "172.16.185.32-501", + "source": { + "source-node": "172.16.185.32", + "source-tp": "501" + }, + "destination": { + "dest-node": "172.16.185.33", + "dest-tp": "501" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.185.32-501", + "te-delay-metric": 1, + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-otn-topology:odulist": [ + { + "odu-type": "ietf-layer1-types:ODU0", + "number": 80 + } + ] + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-otn-topology:odulist": [ + { + "number": 80, + "odu-type": "ietf-layer1-types:ODU0" + } + ] + } + } + ] + } + } + } + ] + }, + { + "network-id": "providerId-10-clientId-0-topologyId-2", + "ietf-te-topology:te": { + "name": "Huawei-Network" + }, + "ietf-te-topology:te-topology-identifier": { + "provider-id": 10, + "client-id": 0, + "topology-id": "2" + }, + "network-types": { + "ietf-te-topology:te-topology": { + "ietf-eth-te-topology:eth-tran-topology": {} + } + }, + "node": [ + { + "node-id": "172.1.201.22", + "ietf-te-topology:te-node-id": "172.1.201.22", + "ietf-network-topology:termination-point": [ + { + "tp-id": "500", + "ietf-te-topology:te-tp-id": 500, + "ietf-eth-te-topology:eth-svc": { + "client-facing": true, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + } + ], + "ietf-te-topology:te": { + "oper-status": "up", + "te-node-attributes": { + "admin-status": "up", + "name": "VM2" + } + } + }, + { + "node-id": "172.1.101.22", + "ietf-te-topology:te-node-id": "172.1.101.22", + "ietf-network-topology:termination-point": [ + { + "tp-id": "500", + "ietf-te-topology:te-tp-id": 500, + "ietf-eth-te-topology:eth-svc": { + "client-facing": true, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + } + ], + "ietf-te-topology:te": { + "oper-status": "up", + "te-node-attributes": { + "admin-status": "up", + "name": "VM1" + } + } + }, + { + "node-id": "172.16.204.221", + "ietf-te-topology:te-node-id": "172.16.204.221", + "ietf-network-topology:termination-point": [ + { + "tp-id": "500", + "ietf-te-topology:te-tp-id": "172.10.33.1", + "ietf-eth-te-topology:eth-svc": { + "client-facing": true, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + }, + { + "tp-id": "200", + "ietf-te-topology:te-tp-id": 200, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4096" + } + } + } + } + }, + { + "tp-id": "201", + "ietf-te-topology:te-tp-id": 201, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4096" + } + } + } + } + } + ], + "ietf-te-topology:te": { + "oper-status": "up", + "te-node-attributes": { + "admin-status": "up", + "name": "POP2" + } + } + }, + { + "node-id": "172.16.204.220", + "ietf-te-topology:te-node-id": "172.16.204.220", + "ietf-network-topology:termination-point": [ + { + "tp-id": "500", + "ietf-te-topology:te-tp-id": "172.10.33.2", + "ietf-eth-te-topology:eth-svc": { + "client-facing": true, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + }, + { + "tp-id": "200", + "ietf-te-topology:te-tp-id": 200, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4096" + } + } + } + } + }, + { + "tp-id": "201", + "ietf-te-topology:te-tp-id": 201, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4096" + } + } + } + } + } + ], + "ietf-te-topology:te": { + "oper-status": "up", + "te-node-attributes": { + "admin-status": "up", + "name": "POP1", + "connectivity-matrices": { + "label-restrictions": { + "label-restriction": [ + { + "index": 1, + "label-start": { + "te-label": { + "ietf-eth-te-topology:vlanid": 101 + } + }, + "label-end": { + "te-label": { + "ietf-eth-te-topology:vlanid": 101 + } + } + }, + { + "index": 2, + "label-start": { + "te-label": { + "ietf-eth-te-topology:vlanid": 201 + } + }, + "label-end": { + "te-label": { + "ietf-eth-te-topology:vlanid": 201 + } + } + } + ] + } + } + } + } + }, + { + "node-id": "172.16.122.25", + "ietf-te-topology:te-node-id": "172.16.122.25", + "ietf-network-topology:termination-point": [ + { + "tp-id": "200", + "ietf-te-topology:te-tp-id": "128.32.44.254", + "ietf-eth-te-topology:eth-svc": { + "client-facing": true, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + }, + { + "tp-id": "501", + "ietf-te-topology:te-tp-id": 501, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + }, + { + "tp-id": "500", + "ietf-te-topology:te-tp-id": 500, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + } + ], + "ietf-te-topology:te": { + "oper-status": "up", + "te-node-attributes": { + "admin-status": "up", + "name": "PE" + } + } + }, + { + "node-id": "172.16.125.31", + "ietf-te-topology:te-node-id": "172.16.125.31", + "ietf-network-topology:termination-point": [ + { + "tp-id": "501", + "ietf-te-topology:te-tp-id": 501, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + }, + { + "tp-id": "500", + "ietf-te-topology:te-tp-id": 500, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + } + ], + "ietf-te-topology:te": { + "oper-status": "up", + "te-node-attributes": { + "admin-status": "up", + "name": "P" + } + } + }, + { + "node-id": "172.16.125.33", + "ietf-te-topology:te-node-id": "172.16.125.33", + "ietf-network-topology:termination-point": [ + { + "tp-id": "501", + "ietf-te-topology:te-tp-id": 501, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + }, + { + "tp-id": "500", + "ietf-te-topology:te-tp-id": 500, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + } + ], + "ietf-te-topology:te": { + "oper-status": "up", + "te-node-attributes": { + "admin-status": "up", + "name": "P" + } + } + }, + { + "node-id": "172.16.125.32", + "ietf-te-topology:te-node-id": "172.16.125.32", + "ietf-network-topology:termination-point": [ + { + "tp-id": "200", + "ietf-te-topology:te-tp-id": "172.10.44.254", + "ietf-eth-te-topology:eth-svc": { + "client-facing": true, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + }, + { + "tp-id": "501", + "ietf-te-topology:te-tp-id": 501, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + }, + { + "tp-id": "500", + "ietf-te-topology:te-tp-id": 500, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + } + ], + "ietf-te-topology:te": { + "oper-status": "up", + "te-node-attributes": { + "admin-status": "up", + "name": "PE" + } + } + }, + { + "node-id": "172.16.182.25", + "ietf-te-topology:te-node-id": "172.16.182.25", + "ietf-network-topology:termination-point": [ + { + "tp-id": "200", + "ietf-te-topology:te-tp-id": "128.32.33.254", + "ietf-eth-te-topology:eth-svc": { + "client-facing": true, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + }, + { + "tp-id": "501", + "ietf-te-topology:te-tp-id": 501, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + }, + { + "tp-id": "500", + "ietf-te-topology:te-tp-id": 500, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + } + ], + "ietf-te-topology:te": { + "oper-status": "up", + "te-node-attributes": { + "admin-status": "up", + "name": "OA" + } + } + }, + { + "node-id": "172.16.185.31", + "ietf-te-topology:te-node-id": "172.16.185.31", + "ietf-network-topology:termination-point": [ + { + "tp-id": "501", + "ietf-te-topology:te-tp-id": 501, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + }, + { + "tp-id": "500", + "ietf-te-topology:te-tp-id": 500, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + } + ], + "ietf-te-topology:te": { + "oper-status": "up", + "te-node-attributes": { + "admin-status": "up", + "name": "P" + } + } + }, + { + "node-id": "172.16.185.33", + "ietf-te-topology:te-node-id": "172.16.185.33", + "ietf-network-topology:termination-point": [ + { + "tp-id": "501", + "ietf-te-topology:te-tp-id": 501, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + }, + { + "tp-id": "500", + "ietf-te-topology:te-tp-id": 500, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + } + ], + "ietf-te-topology:te": { + "oper-status": "up", + "te-node-attributes": { + "admin-status": "up", + "name": "P" + } + } + }, + { + "node-id": "172.16.185.32", + "ietf-te-topology:te-node-id": "172.16.185.32", + "ietf-network-topology:termination-point": [ + { + "tp-id": "200", + "ietf-te-topology:te-tp-id": "172.10.33.254", + "ietf-eth-te-topology:eth-svc": { + "client-facing": true, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + }, + { + "tp-id": "501", + "ietf-te-topology:te-tp-id": 501, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + }, + { + "tp-id": "500", + "ietf-te-topology:te-tp-id": 500, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + } + ], + "ietf-te-topology:te": { + "oper-status": "up", + "te-node-attributes": { + "admin-status": "up", + "name": "OE" + } + } + }, + { + "node-id": "172.16.58.10", + "ietf-te-topology:te-node-id": "172.16.58.10", + "ietf-network-topology:termination-point": [ + { + "tp-id": "501", + "ietf-te-topology:te-tp-id": "128.32.44.2", + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + }, + { + "tp-id": "500", + "ietf-te-topology:te-tp-id": "128.32.33.2", + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + }, + { + "tp-id": "200", + "ietf-te-topology:te-tp-id": 200, + "ietf-eth-te-topology:eth-svc": { + "client-facing": true, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4096" + } + } + } + } + }, + { + "tp-id": "201", + "ietf-te-topology:te-tp-id": 201, + "ietf-eth-te-topology:eth-svc": { + "client-facing": true, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4096" + } + } + } + } + } + ], + "ietf-te-topology:te": { + "oper-status": "up", + "te-node-attributes": { + "admin-status": "up", + "name": "OLT", + "connectivity-matrices": { + "label-restrictions": { + "label-restriction": [ + { + "index": 1, + "label-start": { + "te-label": { + "ietf-eth-te-topology:vlanid": 21 + } + }, + "label-end": { + "te-label": { + "ietf-eth-te-topology:vlanid": 21 + } + } + }, + { + "index": 2, + "label-start": { + "te-label": { + "ietf-eth-te-topology:vlanid": 31 + } + }, + "label-end": { + "te-label": { + "ietf-eth-te-topology:vlanid": 31 + } + } + } + ] + } + } + } + } + }, + { + "node-id": "172.16.61.10", + "ietf-te-topology:te-node-id": "172.16.61.10", + "ietf-network-topology:termination-point": [ + { + "tp-id": "500", + "ietf-te-topology:te-tp-id": 500, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + } + }, + { + "tp-id": "200", + "ietf-te-topology:te-tp-id": 200, + "ietf-eth-te-topology:eth-svc": { + "client-facing": true, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4096" + } + } + } + } + } + ], + "ietf-te-topology:te": { + "oper-status": "up", + "te-node-attributes": { + "admin-status": "up", + "name": "ONT1" + } + } + }, + { + "node-id": "172.16.61.11", + "ietf-te-topology:te-node-id": "172.16.61.11", + "ietf-network-topology:termination-point": [ + { + "tp-id": "500", + "ietf-te-topology:te-tp-id": 500, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + } + }, + { + "tp-id": "200", + "ietf-te-topology:te-tp-id": 200, + "ietf-eth-te-topology:eth-svc": { + "client-facing": true, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4096" + } + } + } + } + } + ], + "ietf-te-topology:te": { + "oper-status": "up", + "te-node-attributes": { + "admin-status": "up", + "name": "ONT2" + } + } + } + ], + "ietf-network-topology:link": [ + { + "link-id": "172.16.185.32-200", + "source": { + "source-node": "172.16.185.32", + "source-tp": "200" + }, + "destination": { + "dest-node": "172.16.204.220", + "dest-tp": "500" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.185.32-200", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.125.32-200", + "source": { + "source-node": "172.16.125.32", + "source-tp": "200" + }, + "destination": { + "dest-node": "172.16.204.221", + "dest-tp": "500" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.125.32-200", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.204.220-500", + "source": { + "source-node": "172.16.204.220", + "source-tp": "500" + }, + "destination": { + "dest-node": "172.16.185.32", + "dest-tp": "200" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.204.220-500", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.204.221-500", + "source": { + "source-node": "172.16.204.221", + "source-tp": "500" + }, + "destination": { + "dest-node": "172.16.125.32", + "dest-tp": "200" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.204.221-500", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.204.221-200", + "source": { + "source-node": "172.16.204.221", + "source-tp": "200" + }, + "destination": { + "dest-node": "172.1.101.22", + "dest-tp": "500" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.204.221-200", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.204.220-200", + "source": { + "source-node": "172.16.204.220", + "source-tp": "200" + }, + "destination": { + "dest-node": "172.1.201.22", + "dest-tp": "500" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.204.220-200", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.1.101.22-500", + "source": { + "source-node": "172.1.101.22", + "source-tp": "500" + }, + "destination": { + "dest-node": "172.16.204.221", + "dest-tp": "200" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.1.101.22-500", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.1.201.22-500", + "source": { + "source-node": "172.1.201.22", + "source-tp": "500" + }, + "destination": { + "dest-node": "172.16.204.220", + "dest-tp": "200" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.1.201.22-500", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.182.25-200", + "source": { + "source-node": "172.16.182.25", + "source-tp": "200" + }, + "destination": { + "dest-node": "172.16.58.10", + "dest-tp": "500" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.182.25-200", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.122.25-200", + "source": { + "source-node": "172.16.122.25", + "source-tp": "200" + }, + "destination": { + "dest-node": "172.16.58.10", + "dest-tp": "501" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.122.25-200", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.58.10-500", + "source": { + "source-node": "172.16.58.10", + "source-tp": "500" + }, + "destination": { + "dest-node": "172.16.182.25", + "dest-tp": "200" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.58.10-500", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.58.10-501", + "source": { + "source-node": "172.16.58.10", + "source-tp": "501" + }, + "destination": { + "dest-node": "172.16.122.25", + "dest-tp": "200" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.58.10-501", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.182.25-501", + "source": { + "source-node": "172.16.182.25", + "source-tp": "501" + }, + "destination": { + "dest-node": "172.16.185.31", + "dest-tp": "501" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.182.25-501", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.182.25-500", + "source": { + "source-node": "172.16.182.25", + "source-tp": "500" + }, + "destination": { + "dest-node": "172.16.185.33", + "dest-tp": "500" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.182.25-500", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.185.31-501", + "source": { + "source-node": "172.16.185.31", + "source-tp": "501" + }, + "destination": { + "dest-node": "172.16.182.25", + "dest-tp": "501" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.185.31-501", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.185.31-500", + "source": { + "source-node": "172.16.185.31", + "source-tp": "500" + }, + "destination": { + "dest-node": "172.16.185.32", + "dest-tp": "500" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.185.31-500", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.185.33-501", + "source": { + "source-node": "172.16.185.33", + "source-tp": "501" + }, + "destination": { + "dest-node": "172.16.185.32", + "dest-tp": "501" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.185.33-501", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.185.33-500", + "source": { + "source-node": "172.16.185.33", + "source-tp": "500" + }, + "destination": { + "dest-node": "172.16.182.25", + "dest-tp": "500" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.185.33-500", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.185.32-501", + "source": { + "source-node": "172.16.185.32", + "source-tp": "501" + }, + "destination": { + "dest-node": "172.16.185.33", + "dest-tp": "501" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.185.32-501", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.185.32-500", + "source": { + "source-node": "172.16.185.32", + "source-tp": "500" + }, + "destination": { + "dest-node": "172.16.185.31", + "dest-tp": "500" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.185.32-500", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.122.25-501", + "source": { + "source-node": "172.16.122.25", + "source-tp": "501" + }, + "destination": { + "dest-node": "172.16.125.31", + "dest-tp": "501" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.122.25-501", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.122.25-500", + "source": { + "source-node": "172.16.122.25", + "source-tp": "500" + }, + "destination": { + "dest-node": "172.16.125.33", + "dest-tp": "500" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.122.25-500", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.125.31-501", + "source": { + "source-node": "172.16.125.31", + "source-tp": "501" + }, + "destination": { + "dest-node": "172.16.122.25", + "dest-tp": "501" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.125.31-501", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.125.31-500", + "source": { + "source-node": "172.16.125.31", + "source-tp": "500" + }, + "destination": { + "dest-node": "172.16.125.32", + "dest-tp": "500" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.125.31-500", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.125.33-501", + "source": { + "source-node": "172.16.125.33", + "source-tp": "501" + }, + "destination": { + "dest-node": "172.16.125.32", + "dest-tp": "501" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.125.33-501", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.125.33-500", + "source": { + "source-node": "172.16.125.33", + "source-tp": "500" + }, + "destination": { + "dest-node": "172.16.122.25", + "dest-tp": "500" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.125.33-500", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.125.32-501", + "source": { + "source-node": "172.16.125.32", + "source-tp": "501" + }, + "destination": { + "dest-node": "172.16.125.33", + "dest-tp": "501" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.125.32-501", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.125.32-500", + "source": { + "source-node": "172.16.125.32", + "source-tp": "500" + }, + "destination": { + "dest-node": "172.16.125.31", + "dest-tp": "500" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.125.32-500", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.58.10-200", + "source": { + "source-node": "172.16.58.10", + "source-tp": "200" + }, + "destination": { + "dest-node": "172.16.61.10", + "dest-tp": "500" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.58.10-200", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.61.10-500", + "source": { + "source-node": "172.16.61.10", + "source-tp": "500" + }, + "destination": { + "dest-node": "172.16.58.10", + "dest-tp": "200" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.61.10-500", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.58.10-201", + "source": { + "source-node": "172.16.58.10", + "source-tp": "201" + }, + "destination": { + "dest-node": "172.16.61.11", + "dest-tp": "500" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.58.10-201", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.61.11-500", + "source": { + "source-node": "172.16.61.11", + "source-tp": "500" + }, + "destination": { + "dest-node": "172.16.58.10", + "dest-tp": "201" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.61.11-500", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + } + ] + }, + { + "network-id": "e2e-slice-simap-1", + "supporting-network": [ + { + "network-ref": "providerId-10-clientId-0-topologyId-2" + }, + { + "network-ref": "aggnet-simap-1" + } + ], + "node": [ + { + "node-id": "node-1", + "supporting-node": [ + { + "network-ref": "providerId-10-clientId-0-topologyId-2", + "node-ref": "172.16.61.10" + } + ] + }, + { + "node-id": "node-2", + "supporting-node": [ + { + "network-ref": "aggnet-simap-1", + "node-ref": "node-2" + } + ] + } + ], + "ietf-network-topology:link": [ + { + "link-id": "link-1", + "source": { + "source-node": "node-1", + "source-tp": "tp-1" + }, + "destination": { + "dest-node": "node-2", + "dest-tp": "tp-2" + }, + "simap-telemetry": { + "bandwidth-utilization": 76.51, + "latency": 2.321, + "related-service-ids": ["e2e-network-slice-1"] + }, + "ietf-network-topology:supporting-link": [ + { + "network-ref": "providerId-10-clientId-0-topologyId-2", + "link-ref": "172.16.61.10-500" + }, + { + "network-ref": "aggnet-simap-1", + "link-ref": "link-1" + } + ] + } + ] + }, + { + "network-id": "aggnet-simap-1", + "supporting-network": [ + { + "network-ref": "providerId-10-clientId-0-topologyId-2" + }, + { + "network-ref": "trans-simap-1" + } + ], + "node": [ + { + "node-id": "node-1", + "supporting-node": [ + { + "network-ref": "providerId-10-clientId-0-topologyId-2", + "node-ref": "172.16.58.10" + } + ] + }, + { + "node-id": "node-2", + "supporting-node": [ + { + "network-ref": "providerId-10-clientId-0-topologyId-2", + "node-ref": "172.16.204.221" + } + ] + } + ], + "ietf-network-topology:link": [ + { + "link-id": "link-1", + "source": { + "source-node": "node-1", + "source-tp": "tp-1" + }, + "destination": { + "dest-node": "node-2", + "dest-tp": "tp-2" + }, + "simap-telemetry": { + "bandwidth-utilization": 76.51, + "latency": 2.321, + "related-service-ids": ["trans-network-slice-1"] + }, + "ietf-network-topology:supporting-link": [ + { + "network-ref": "providerId-10-clientId-0-topologyId-2", + "link-ref": "172.16.58.10-501" + }, + { + "network-ref": "providerId-10-clientId-0-topologyId-2", + "link-ref": "172.16.204.221-500" + }, + { + "network-ref": "trans-simap-1", + "link-ref": "link-1" + } + ] + } + ] + }, + { + "network-id": "trans-simap-1", + "supporting-network": [ + { + "network-ref": "providerId-10-clientId-0-topologyId-2" + } + ], + "node": [ + { + "node-id": "node-1", + "supporting-node": [ + { + "network-ref": "providerId-10-clientId-0-topologyId-2", + "node-ref": "172.16.122.25" + } + ] + }, + { + "node-id": "node-2", + "supporting-node": [ + { + "network-ref": "providerId-10-clientId-0-topologyId-2", + "node-ref": "172.16.125.32" + } + ] + } + ], + "ietf-network-topology:link": [ + { + "link-id": "link-1", + "source": { + "source-node": "node-1", + "source-tp": "tp-1" + }, + "destination": { + "dest-node": "node-2", + "dest-tp": "tp-2" + }, + "simap-telemetry": { + "bandwidth-utilization": 76.51, + "latency": 2.321, + "related-service-ids": ["l3sm-instance-1"] + }, + "ietf-network-topology:supporting-link": [ + { + "network-ref": "providerId-10-clientId-0-topologyId-2", + "link-ref": "172.16.122.25-500" + }, + { + "network-ref": "providerId-10-clientId-0-topologyId-2", + "link-ref": "172.16.125.33-501" + } + ] + } + ] + } + ] + } +} diff --git a/src/nbi/service/sse_telemetry/Full-Te-Topology-simap2.json b/src/nbi/service/sse_telemetry/Full-Te-Topology-simap2.json new file mode 100644 index 000000000..f9019c481 --- /dev/null +++ b/src/nbi/service/sse_telemetry/Full-Te-Topology-simap2.json @@ -0,0 +1,3485 @@ +{ + "ietf-network:networks": { + "network": [ + { + "network-id": "providerId-10-clientId-0-topologyId-1", + "ietf-te-topology:te": { + "name": "Huawei-Network" + }, + "ietf-te-topology:te-topology-identifier": { + "provider-id": 10, + "client-id": 0, + "topology-id": "1" + }, + "network-types": { + "ietf-te-topology:te-topology": { + "ietf-otn-topology:otn-topology": {} + } + }, + "node": [ + { + "node-id": "172.16.182.25", + "ietf-te-topology:te-node-id": "172.16.182.25", + "ietf-network-topology:termination-point": [ + { + "tp-id": "501", + "ietf-te-topology:te-tp-id": 501, + "ietf-te-topology:te": { + "name": "1-1-1-1-1", + "admin-status": "up", + "oper-status": "up", + "ietf-otn-topology:client-svc": { + "client-facing": false + }, + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-oduk", + "switching-capability": "ietf-te-types:switching-otn", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-otn-topology:otn": { + "odu-type": "ietf-layer1-types:ODU4" + } + } + } + ] + } + ] + } + }, + { + "tp-id": "500", + "ietf-te-topology:te-tp-id": 500, + "ietf-te-topology:te": { + "name": "1-1-1-1-1", + "admin-status": "up", + "oper-status": "up", + "ietf-otn-topology:client-svc": { + "client-facing": false + }, + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-oduk", + "switching-capability": "ietf-te-types:switching-otn", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-otn-topology:otn": { + "odu-type": "ietf-layer1-types:ODU4" + } + } + } + ] + } + ] + } + } + ], + "ietf-te-topology:te": { + "oper-status": "up", + "te-node-attributes": { + "admin-status": "up", + "name": "OA" + }, + "tunnel-termination-point": [ + { + "tunnel-tp-id": "NTAx", + "admin-status": "up", + "oper-status": "up", + "encoding": "ietf-te-types:lsp-encoding-oduk", + "name": "1-1-1-1-1", + "protection-type": "ietf-te-types:lsp-protection-unprotected", + "switching-capability": "ietf-te-types:switching-otn", + "local-link-connectivities": { + "local-link-connectivity": [ + { + "is-allowed": true, + "link-tp-ref": "501" + } + ] + } + }, + { + "tunnel-tp-id": "NTAw", + "admin-status": "up", + "oper-status": "up", + "encoding": "ietf-te-types:lsp-encoding-oduk", + "name": "1-1-1-1-1", + "protection-type": "ietf-te-types:lsp-protection-unprotected", + "switching-capability": "ietf-te-types:switching-otn", + "local-link-connectivities": { + "local-link-connectivity": [ + { + "is-allowed": true, + "link-tp-ref": "500" + } + ] + } + } + ] + } + }, + { + "node-id": "172.16.185.31", + "ietf-te-topology:te-node-id": "172.16.185.31", + "ietf-network-topology:termination-point": [ + { + "tp-id": "501", + "ietf-te-topology:te-tp-id": 501, + "ietf-te-topology:te": { + "name": "1-1-1-1-1", + "admin-status": "up", + "oper-status": "up", + "ietf-otn-topology:client-svc": { + "client-facing": false + }, + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-oduk", + "switching-capability": "ietf-te-types:switching-otn", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-otn-topology:otn": { + "odu-type": "ietf-layer1-types:ODU4" + } + } + } + ] + } + ] + } + }, + { + "tp-id": "500", + "ietf-te-topology:te-tp-id": 500, + "ietf-te-topology:te": { + "name": "1-1-1-1-1", + "admin-status": "up", + "oper-status": "up", + "ietf-otn-topology:client-svc": { + "client-facing": false + }, + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-oduk", + "switching-capability": "ietf-te-types:switching-otn", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-otn-topology:otn": { + "odu-type": "ietf-layer1-types:ODU4" + } + } + } + ] + } + ] + } + } + ], + "ietf-te-topology:te": { + "oper-status": "up", + "te-node-attributes": { + "admin-status": "up", + "name": "P" + }, + "tunnel-termination-point": [ + { + "tunnel-tp-id": "NTAx", + "admin-status": "up", + "oper-status": "up", + "encoding": "ietf-te-types:lsp-encoding-oduk", + "name": "1-1-1-1-1", + "protection-type": "ietf-te-types:lsp-protection-unprotected", + "switching-capability": "ietf-te-types:switching-otn", + "local-link-connectivities": { + "local-link-connectivity": [ + { + "is-allowed": true, + "link-tp-ref": "501" + } + ] + } + }, + { + "tunnel-tp-id": "NTAw", + "admin-status": "up", + "oper-status": "up", + "encoding": "ietf-te-types:lsp-encoding-oduk", + "name": "1-1-1-1-1", + "protection-type": "ietf-te-types:lsp-protection-unprotected", + "switching-capability": "ietf-te-types:switching-otn", + "local-link-connectivities": { + "local-link-connectivity": [ + { + "is-allowed": true, + "link-tp-ref": "500" + } + ] + } + } + ] + } + }, + { + "node-id": "172.16.185.33", + "ietf-te-topology:te-node-id": "172.16.185.33", + "ietf-network-topology:termination-point": [ + { + "tp-id": "500", + "ietf-te-topology:te-tp-id": 500, + "ietf-te-topology:te": { + "name": "1-1-1-1-1", + "admin-status": "up", + "oper-status": "up", + "ietf-otn-topology:client-svc": { + "client-facing": false + }, + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-oduk", + "switching-capability": "ietf-te-types:switching-otn", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-otn-topology:otn": { + "odu-type": "ietf-layer1-types:ODU4" + } + } + } + ] + } + ] + } + }, + { + "tp-id": "501", + "ietf-te-topology:te-tp-id": 501, + "ietf-te-topology:te": { + "name": "1-1-1-1-1", + "admin-status": "up", + "oper-status": "up", + "ietf-otn-topology:client-svc": { + "client-facing": false + }, + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-oduk", + "switching-capability": "ietf-te-types:switching-otn", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-otn-topology:otn": { + "odu-type": "ietf-layer1-types:ODU4" + } + } + } + ] + } + ] + } + } + ], + "ietf-te-topology:te": { + "oper-status": "up", + "te-node-attributes": { + "admin-status": "up", + "name": "P" + }, + "tunnel-termination-point": [ + { + "tunnel-tp-id": "NTAw", + "admin-status": "up", + "oper-status": "up", + "encoding": "ietf-te-types:lsp-encoding-oduk", + "name": "1-1-1-1-1", + "protection-type": "ietf-te-types:lsp-protection-unprotected", + "switching-capability": "ietf-te-types:switching-otn", + "local-link-connectivities": { + "local-link-connectivity": [ + { + "is-allowed": true, + "link-tp-ref": "500" + } + ] + } + }, + { + "tunnel-tp-id": "NTAx", + "admin-status": "up", + "oper-status": "up", + "encoding": "ietf-te-types:lsp-encoding-oduk", + "name": "1-1-1-1-1", + "protection-type": "ietf-te-types:lsp-protection-unprotected", + "switching-capability": "ietf-te-types:switching-otn", + "local-link-connectivities": { + "local-link-connectivity": [ + { + "is-allowed": true, + "link-tp-ref": "501" + } + ] + } + } + ] + } + }, + { + "node-id": "172.16.185.32", + "ietf-te-topology:te-node-id": "172.16.185.32", + "ietf-network-topology:termination-point": [ + { + "tp-id": "500", + "ietf-te-topology:te-tp-id": 500, + "ietf-te-topology:te": { + "name": "1-1-1-1-1", + "admin-status": "up", + "oper-status": "up", + "ietf-otn-topology:client-svc": { + "client-facing": false + }, + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-oduk", + "switching-capability": "ietf-te-types:switching-otn", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-otn-topology:otn": { + "odu-type": "ietf-layer1-types:ODU4" + } + } + } + ] + } + ] + } + }, + { + "tp-id": "501", + "ietf-te-topology:te-tp-id": 501, + "ietf-te-topology:te": { + "name": "1-1-1-1-1", + "admin-status": "up", + "oper-status": "up", + "ietf-otn-topology:client-svc": { + "client-facing": false + }, + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-oduk", + "switching-capability": "ietf-te-types:switching-otn", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-otn-topology:otn": { + "odu-type": "ietf-layer1-types:ODU4" + } + } + } + ] + } + ] + } + } + ], + "ietf-te-topology:te": { + "oper-status": "up", + "te-node-attributes": { + "admin-status": "up", + "name": "OE" + }, + "tunnel-termination-point": [ + { + "tunnel-tp-id": "NTAw", + "admin-status": "up", + "oper-status": "up", + "encoding": "ietf-te-types:lsp-encoding-oduk", + "name": "1-1-1-1-1", + "protection-type": "ietf-te-types:lsp-protection-unprotected", + "switching-capability": "ietf-te-types:switching-otn", + "local-link-connectivities": { + "local-link-connectivity": [ + { + "is-allowed": true, + "link-tp-ref": "500" + } + ] + } + }, + { + "tunnel-tp-id": "NTAx", + "admin-status": "up", + "oper-status": "up", + "encoding": "ietf-te-types:lsp-encoding-oduk", + "name": "1-1-1-1-1", + "protection-type": "ietf-te-types:lsp-protection-unprotected", + "switching-capability": "ietf-te-types:switching-otn", + "local-link-connectivities": { + "local-link-connectivity": [ + { + "is-allowed": true, + "link-tp-ref": "501" + } + ] + } + } + ] + } + } + ], + "ietf-network-topology:link": [ + { + "link-id": "172.16.182.25-501", + "source": { + "source-node": "172.16.182.25", + "source-tp": "501" + }, + "destination": { + "dest-node": "172.16.185.31", + "dest-tp": "501" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.182.25-501", + "te-delay-metric": 1, + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-otn-topology:odulist": [ + { + "odu-type": "ietf-layer1-types:ODU0", + "number": 80 + } + ] + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-otn-topology:odulist": [ + { + "number": 80, + "odu-type": "ietf-layer1-types:ODU0" + } + ] + } + } + ] + } + } + }, + { + "link-id": "172.16.182.25-500", + "source": { + "source-node": "172.16.182.25", + "source-tp": "500" + }, + "destination": { + "dest-node": "172.16.185.33", + "dest-tp": "500" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.182.25-500", + "te-delay-metric": 1, + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-otn-topology:odulist": [ + { + "odu-type": "ietf-layer1-types:ODU0", + "number": 80 + } + ] + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-otn-topology:odulist": [ + { + "number": 80, + "odu-type": "ietf-layer1-types:ODU0" + } + ] + } + } + ] + } + } + }, + { + "link-id": "172.16.185.31-501", + "source": { + "source-node": "172.16.185.31", + "source-tp": "501" + }, + "destination": { + "dest-node": "172.16.182.25", + "dest-tp": "501" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.185.31-501", + "te-delay-metric": 1, + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-otn-topology:odulist": [ + { + "odu-type": "ietf-layer1-types:ODU0", + "number": 80 + } + ] + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-otn-topology:odulist": [ + { + "number": 80, + "odu-type": "ietf-layer1-types:ODU0" + } + ] + } + } + ] + } + } + }, + { + "link-id": "172.16.185.31-500", + "source": { + "source-node": "172.16.185.31", + "source-tp": "500" + }, + "destination": { + "dest-node": "172.16.185.32", + "dest-tp": "500" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.185.31-500", + "te-delay-metric": 1, + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-otn-topology:odulist": [ + { + "odu-type": "ietf-layer1-types:ODU0", + "number": 80 + } + ] + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-otn-topology:odulist": [ + { + "number": 80, + "odu-type": "ietf-layer1-types:ODU0" + } + ] + } + } + ] + } + } + }, + { + "link-id": "172.16.185.33-500", + "source": { + "source-node": "172.16.185.33", + "source-tp": "500" + }, + "destination": { + "dest-node": "172.16.182.25", + "dest-tp": "500" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.185.33-500", + "te-delay-metric": 1, + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-otn-topology:odulist": [ + { + "odu-type": "ietf-layer1-types:ODU0", + "number": 80 + } + ] + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-otn-topology:odulist": [ + { + "number": 80, + "odu-type": "ietf-layer1-types:ODU0" + } + ] + } + } + ] + } + } + }, + { + "link-id": "172.16.185.33-501", + "source": { + "source-node": "172.16.185.33", + "source-tp": "501" + }, + "destination": { + "dest-node": "172.16.185.32", + "dest-tp": "501" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.185.33-501", + "te-delay-metric": 1, + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-otn-topology:odulist": [ + { + "odu-type": "ietf-layer1-types:ODU0", + "number": 80 + } + ] + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-otn-topology:odulist": [ + { + "number": 80, + "odu-type": "ietf-layer1-types:ODU0" + } + ] + } + } + ] + } + } + }, + { + "link-id": "172.16.185.32-500", + "source": { + "source-node": "172.16.185.32", + "source-tp": "500" + }, + "destination": { + "dest-node": "172.16.185.31", + "dest-tp": "500" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.185.32-500", + "te-delay-metric": 1, + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-otn-topology:odulist": [ + { + "odu-type": "ietf-layer1-types:ODU0", + "number": 80 + } + ] + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-otn-topology:odulist": [ + { + "number": 80, + "odu-type": "ietf-layer1-types:ODU0" + } + ] + } + } + ] + } + } + }, + { + "link-id": "172.16.185.32-501", + "source": { + "source-node": "172.16.185.32", + "source-tp": "501" + }, + "destination": { + "dest-node": "172.16.185.33", + "dest-tp": "501" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.185.32-501", + "te-delay-metric": 1, + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-otn-topology:odulist": [ + { + "odu-type": "ietf-layer1-types:ODU0", + "number": 80 + } + ] + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-otn-topology:odulist": [ + { + "number": 80, + "odu-type": "ietf-layer1-types:ODU0" + } + ] + } + } + ] + } + } + } + ] + }, + { + "network-id": "providerId-10-clientId-0-topologyId-2", + "ietf-te-topology:te": { + "name": "Huawei-Network" + }, + "ietf-te-topology:te-topology-identifier": { + "provider-id": 10, + "client-id": 0, + "topology-id": "2" + }, + "network-types": { + "ietf-te-topology:te-topology": { + "ietf-eth-te-topology:eth-tran-topology": {} + } + }, + "node": [ + { + "node-id": "172.1.201.22", + "ietf-te-topology:te-node-id": "172.1.201.22", + "ietf-network-topology:termination-point": [ + { + "tp-id": "500", + "ietf-te-topology:te-tp-id": 500, + "ietf-eth-te-topology:eth-svc": { + "client-facing": true, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + } + ], + "ietf-te-topology:te": { + "oper-status": "up", + "te-node-attributes": { + "admin-status": "up", + "name": "VM2" + } + } + }, + { + "node-id": "172.1.101.22", + "ietf-te-topology:te-node-id": "172.1.101.22", + "ietf-network-topology:termination-point": [ + { + "tp-id": "500", + "ietf-te-topology:te-tp-id": 500, + "ietf-eth-te-topology:eth-svc": { + "client-facing": true, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + } + ], + "ietf-te-topology:te": { + "oper-status": "up", + "te-node-attributes": { + "admin-status": "up", + "name": "VM1" + } + } + }, + { + "node-id": "172.16.204.221", + "ietf-te-topology:te-node-id": "172.16.204.221", + "ietf-network-topology:termination-point": [ + { + "tp-id": "500", + "ietf-te-topology:te-tp-id": "172.10.33.1", + "ietf-eth-te-topology:eth-svc": { + "client-facing": true, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + }, + { + "tp-id": "200", + "ietf-te-topology:te-tp-id": 200, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4096" + } + } + } + } + }, + { + "tp-id": "201", + "ietf-te-topology:te-tp-id": 201, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4096" + } + } + } + } + } + ], + "ietf-te-topology:te": { + "oper-status": "up", + "te-node-attributes": { + "admin-status": "up", + "name": "POP2" + } + } + }, + { + "node-id": "172.16.204.220", + "ietf-te-topology:te-node-id": "172.16.204.220", + "ietf-network-topology:termination-point": [ + { + "tp-id": "500", + "ietf-te-topology:te-tp-id": "172.10.33.2", + "ietf-eth-te-topology:eth-svc": { + "client-facing": true, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + }, + { + "tp-id": "200", + "ietf-te-topology:te-tp-id": 200, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4096" + } + } + } + } + }, + { + "tp-id": "201", + "ietf-te-topology:te-tp-id": 201, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4096" + } + } + } + } + } + ], + "ietf-te-topology:te": { + "oper-status": "up", + "te-node-attributes": { + "admin-status": "up", + "name": "POP1", + "connectivity-matrices": { + "label-restrictions": { + "label-restriction": [ + { + "index": 1, + "label-start": { + "te-label": { + "ietf-eth-te-topology:vlanid": 101 + } + }, + "label-end": { + "te-label": { + "ietf-eth-te-topology:vlanid": 101 + } + } + }, + { + "index": 2, + "label-start": { + "te-label": { + "ietf-eth-te-topology:vlanid": 201 + } + }, + "label-end": { + "te-label": { + "ietf-eth-te-topology:vlanid": 201 + } + } + } + ] + } + } + } + } + }, + { + "node-id": "172.16.122.25", + "ietf-te-topology:te-node-id": "172.16.122.25", + "ietf-network-topology:termination-point": [ + { + "tp-id": "200", + "ietf-te-topology:te-tp-id": "128.32.44.254", + "ietf-eth-te-topology:eth-svc": { + "client-facing": true, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + }, + { + "tp-id": "501", + "ietf-te-topology:te-tp-id": 501, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + }, + { + "tp-id": "500", + "ietf-te-topology:te-tp-id": 500, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + } + ], + "ietf-te-topology:te": { + "oper-status": "up", + "te-node-attributes": { + "admin-status": "up", + "name": "PE" + } + } + }, + { + "node-id": "172.16.125.31", + "ietf-te-topology:te-node-id": "172.16.125.31", + "ietf-network-topology:termination-point": [ + { + "tp-id": "501", + "ietf-te-topology:te-tp-id": 501, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + }, + { + "tp-id": "500", + "ietf-te-topology:te-tp-id": 500, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + } + ], + "ietf-te-topology:te": { + "oper-status": "up", + "te-node-attributes": { + "admin-status": "up", + "name": "P" + } + } + }, + { + "node-id": "172.16.125.33", + "ietf-te-topology:te-node-id": "172.16.125.33", + "ietf-network-topology:termination-point": [ + { + "tp-id": "501", + "ietf-te-topology:te-tp-id": 501, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + }, + { + "tp-id": "500", + "ietf-te-topology:te-tp-id": 500, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + } + ], + "ietf-te-topology:te": { + "oper-status": "up", + "te-node-attributes": { + "admin-status": "up", + "name": "P" + } + } + }, + { + "node-id": "172.16.125.32", + "ietf-te-topology:te-node-id": "172.16.125.32", + "ietf-network-topology:termination-point": [ + { + "tp-id": "200", + "ietf-te-topology:te-tp-id": "172.10.44.254", + "ietf-eth-te-topology:eth-svc": { + "client-facing": true, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + }, + { + "tp-id": "501", + "ietf-te-topology:te-tp-id": 501, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + }, + { + "tp-id": "500", + "ietf-te-topology:te-tp-id": 500, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + } + ], + "ietf-te-topology:te": { + "oper-status": "up", + "te-node-attributes": { + "admin-status": "up", + "name": "PE" + } + } + }, + { + "node-id": "172.16.182.25", + "ietf-te-topology:te-node-id": "172.16.182.25", + "ietf-network-topology:termination-point": [ + { + "tp-id": "200", + "ietf-te-topology:te-tp-id": "128.32.33.254", + "ietf-eth-te-topology:eth-svc": { + "client-facing": true, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + }, + { + "tp-id": "501", + "ietf-te-topology:te-tp-id": 501, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + }, + { + "tp-id": "500", + "ietf-te-topology:te-tp-id": 500, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + } + ], + "ietf-te-topology:te": { + "oper-status": "up", + "te-node-attributes": { + "admin-status": "up", + "name": "OA" + } + } + }, + { + "node-id": "172.16.185.31", + "ietf-te-topology:te-node-id": "172.16.185.31", + "ietf-network-topology:termination-point": [ + { + "tp-id": "501", + "ietf-te-topology:te-tp-id": 501, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + }, + { + "tp-id": "500", + "ietf-te-topology:te-tp-id": 500, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + } + ], + "ietf-te-topology:te": { + "oper-status": "up", + "te-node-attributes": { + "admin-status": "up", + "name": "P" + } + } + }, + { + "node-id": "172.16.185.33", + "ietf-te-topology:te-node-id": "172.16.185.33", + "ietf-network-topology:termination-point": [ + { + "tp-id": "501", + "ietf-te-topology:te-tp-id": 501, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + }, + { + "tp-id": "500", + "ietf-te-topology:te-tp-id": 500, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + } + ], + "ietf-te-topology:te": { + "oper-status": "up", + "te-node-attributes": { + "admin-status": "up", + "name": "P" + } + } + }, + { + "node-id": "172.16.185.32", + "ietf-te-topology:te-node-id": "172.16.185.32", + "ietf-network-topology:termination-point": [ + { + "tp-id": "200", + "ietf-te-topology:te-tp-id": "172.10.33.254", + "ietf-eth-te-topology:eth-svc": { + "client-facing": true, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + }, + { + "tp-id": "501", + "ietf-te-topology:te-tp-id": 501, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + }, + { + "tp-id": "500", + "ietf-te-topology:te-tp-id": 500, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + } + ], + "ietf-te-topology:te": { + "oper-status": "up", + "te-node-attributes": { + "admin-status": "up", + "name": "OE" + } + } + }, + { + "node-id": "172.16.58.10", + "ietf-te-topology:te-node-id": "172.16.58.10", + "ietf-network-topology:termination-point": [ + { + "tp-id": "501", + "ietf-te-topology:te-tp-id": "128.32.44.2", + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + }, + { + "tp-id": "500", + "ietf-te-topology:te-tp-id": "128.32.33.2", + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + }, + "ietf-te-topology:te": { + "name": "endpoint:111", + "admin-status": "up", + "oper-status": "up", + "interface-switching-capability": [ + { + "encoding": "ietf-te-types:lsp-encoding-ethernet", + "switching-capability": "ietf-te-types:switching-l2sc", + "max-lsp-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + ] + } + }, + { + "tp-id": "200", + "ietf-te-topology:te-tp-id": 200, + "ietf-eth-te-topology:eth-svc": { + "client-facing": true, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4096" + } + } + } + } + }, + { + "tp-id": "201", + "ietf-te-topology:te-tp-id": 201, + "ietf-eth-te-topology:eth-svc": { + "client-facing": true, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4096" + } + } + } + } + } + ], + "ietf-te-topology:te": { + "oper-status": "up", + "te-node-attributes": { + "admin-status": "up", + "name": "OLT", + "connectivity-matrices": { + "label-restrictions": { + "label-restriction": [ + { + "index": 1, + "label-start": { + "te-label": { + "ietf-eth-te-topology:vlanid": 21 + } + }, + "label-end": { + "te-label": { + "ietf-eth-te-topology:vlanid": 21 + } + } + }, + { + "index": 2, + "label-start": { + "te-label": { + "ietf-eth-te-topology:vlanid": 31 + } + }, + "label-end": { + "te-label": { + "ietf-eth-te-topology:vlanid": 31 + } + } + } + ] + } + } + } + } + }, + { + "node-id": "172.16.61.10", + "ietf-te-topology:te-node-id": "172.16.61.10", + "ietf-network-topology:termination-point": [ + { + "tp-id": "500", + "ietf-te-topology:te-tp-id": 500, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + } + }, + { + "tp-id": "200", + "ietf-te-topology:te-tp-id": 200, + "ietf-eth-te-topology:eth-svc": { + "client-facing": true, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4096" + } + } + } + } + } + ], + "ietf-te-topology:te": { + "oper-status": "up", + "te-node-attributes": { + "admin-status": "up", + "name": "ONT1" + } + } + }, + { + "node-id": "172.16.61.11", + "ietf-te-topology:te-node-id": "172.16.61.11", + "ietf-network-topology:termination-point": [ + { + "tp-id": "500", + "ietf-te-topology:te-tp-id": 500, + "ietf-eth-te-topology:eth-svc": { + "client-facing": false, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4094" + } + } + } + } + }, + { + "tp-id": "200", + "ietf-te-topology:te-tp-id": 200, + "ietf-eth-te-topology:eth-svc": { + "client-facing": true, + "supported-classification": { + "port-classification": true, + "vlan-classification": { + "outer-tag": { + "supported-tag-types": [ + "ietf-eth-tran-types:classify-c-vlan", + "ietf-eth-tran-types:classify-s-vlan" + ], + "vlan-bundling": false, + "vlan-range": "1-4096" + } + } + } + } + } + ], + "ietf-te-topology:te": { + "oper-status": "up", + "te-node-attributes": { + "admin-status": "up", + "name": "ONT2" + } + } + } + ], + "ietf-network-topology:link": [ + { + "link-id": "172.16.185.32-200", + "source": { + "source-node": "172.16.185.32", + "source-tp": "200" + }, + "destination": { + "dest-node": "172.16.204.220", + "dest-tp": "500" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.185.32-200", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.125.32-200", + "source": { + "source-node": "172.16.125.32", + "source-tp": "200" + }, + "destination": { + "dest-node": "172.16.204.221", + "dest-tp": "500" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.125.32-200", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.204.220-500", + "source": { + "source-node": "172.16.204.220", + "source-tp": "500" + }, + "destination": { + "dest-node": "172.16.185.32", + "dest-tp": "200" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.204.220-500", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.204.221-500", + "source": { + "source-node": "172.16.204.221", + "source-tp": "500" + }, + "destination": { + "dest-node": "172.16.125.32", + "dest-tp": "200" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.204.221-500", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.204.221-200", + "source": { + "source-node": "172.16.204.221", + "source-tp": "200" + }, + "destination": { + "dest-node": "172.1.101.22", + "dest-tp": "500" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.204.221-200", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.204.220-200", + "source": { + "source-node": "172.16.204.220", + "source-tp": "200" + }, + "destination": { + "dest-node": "172.1.201.22", + "dest-tp": "500" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.204.220-200", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.1.101.22-500", + "source": { + "source-node": "172.1.101.22", + "source-tp": "500" + }, + "destination": { + "dest-node": "172.16.204.221", + "dest-tp": "200" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.1.101.22-500", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.1.201.22-500", + "source": { + "source-node": "172.1.201.22", + "source-tp": "500" + }, + "destination": { + "dest-node": "172.16.204.220", + "dest-tp": "200" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.1.201.22-500", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.182.25-200", + "source": { + "source-node": "172.16.182.25", + "source-tp": "200" + }, + "destination": { + "dest-node": "172.16.58.10", + "dest-tp": "500" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.182.25-200", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.122.25-200", + "source": { + "source-node": "172.16.122.25", + "source-tp": "200" + }, + "destination": { + "dest-node": "172.16.58.10", + "dest-tp": "501" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.122.25-200", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.58.10-500", + "source": { + "source-node": "172.16.58.10", + "source-tp": "500" + }, + "destination": { + "dest-node": "172.16.182.25", + "dest-tp": "200" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.58.10-500", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.58.10-501", + "source": { + "source-node": "172.16.58.10", + "source-tp": "501" + }, + "destination": { + "dest-node": "172.16.122.25", + "dest-tp": "200" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.58.10-501", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.182.25-501", + "source": { + "source-node": "172.16.182.25", + "source-tp": "501" + }, + "destination": { + "dest-node": "172.16.185.31", + "dest-tp": "501" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.182.25-501", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.182.25-500", + "source": { + "source-node": "172.16.182.25", + "source-tp": "500" + }, + "destination": { + "dest-node": "172.16.185.33", + "dest-tp": "500" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.182.25-500", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.185.31-501", + "source": { + "source-node": "172.16.185.31", + "source-tp": "501" + }, + "destination": { + "dest-node": "172.16.182.25", + "dest-tp": "501" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.185.31-501", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.185.31-500", + "source": { + "source-node": "172.16.185.31", + "source-tp": "500" + }, + "destination": { + "dest-node": "172.16.185.32", + "dest-tp": "500" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.185.31-500", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.185.33-501", + "source": { + "source-node": "172.16.185.33", + "source-tp": "501" + }, + "destination": { + "dest-node": "172.16.185.32", + "dest-tp": "501" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.185.33-501", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.185.33-500", + "source": { + "source-node": "172.16.185.33", + "source-tp": "500" + }, + "destination": { + "dest-node": "172.16.182.25", + "dest-tp": "500" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.185.33-500", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.185.32-501", + "source": { + "source-node": "172.16.185.32", + "source-tp": "501" + }, + "destination": { + "dest-node": "172.16.185.33", + "dest-tp": "501" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.185.32-501", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.185.32-500", + "source": { + "source-node": "172.16.185.32", + "source-tp": "500" + }, + "destination": { + "dest-node": "172.16.185.31", + "dest-tp": "500" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.185.32-500", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.122.25-501", + "source": { + "source-node": "172.16.122.25", + "source-tp": "501" + }, + "destination": { + "dest-node": "172.16.125.31", + "dest-tp": "501" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.122.25-501", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.122.25-500", + "source": { + "source-node": "172.16.122.25", + "source-tp": "500" + }, + "destination": { + "dest-node": "172.16.125.33", + "dest-tp": "500" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.122.25-500", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.125.31-501", + "source": { + "source-node": "172.16.125.31", + "source-tp": "501" + }, + "destination": { + "dest-node": "172.16.122.25", + "dest-tp": "501" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.125.31-501", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.125.31-500", + "source": { + "source-node": "172.16.125.31", + "source-tp": "500" + }, + "destination": { + "dest-node": "172.16.125.32", + "dest-tp": "500" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.125.31-500", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.125.33-501", + "source": { + "source-node": "172.16.125.33", + "source-tp": "501" + }, + "destination": { + "dest-node": "172.16.125.32", + "dest-tp": "501" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.125.33-501", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.125.33-500", + "source": { + "source-node": "172.16.125.33", + "source-tp": "500" + }, + "destination": { + "dest-node": "172.16.122.25", + "dest-tp": "500" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.125.33-500", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.125.32-501", + "source": { + "source-node": "172.16.125.32", + "source-tp": "501" + }, + "destination": { + "dest-node": "172.16.125.33", + "dest-tp": "501" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.125.32-501", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.125.32-500", + "source": { + "source-node": "172.16.125.32", + "source-tp": "500" + }, + "destination": { + "dest-node": "172.16.125.31", + "dest-tp": "500" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.125.32-500", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.58.10-200", + "source": { + "source-node": "172.16.58.10", + "source-tp": "200" + }, + "destination": { + "dest-node": "172.16.61.10", + "dest-tp": "500" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.58.10-200", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.61.10-500", + "source": { + "source-node": "172.16.61.10", + "source-tp": "500" + }, + "destination": { + "dest-node": "172.16.58.10", + "dest-tp": "200" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.61.10-500", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.58.10-201", + "source": { + "source-node": "172.16.58.10", + "source-tp": "201" + }, + "destination": { + "dest-node": "172.16.61.11", + "dest-tp": "500" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.58.10-201", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + }, + { + "link-id": "172.16.61.11-500", + "source": { + "source-node": "172.16.61.11", + "source-tp": "500" + }, + "destination": { + "dest-node": "172.16.58.10", + "dest-tp": "201" + }, + "ietf-te-topology:te": { + "oper-status": "up", + "te-link-attributes": { + "access-type": "point-to-point", + "admin-status": "up", + "name": "172.16.61.11-500", + "max-link-bandwidth": { + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + }, + "unreserved-bandwidth": [ + { + "priority": 7, + "te-bandwidth": { + "ietf-eth-te-topology:eth-bandwidth": 10000000 + } + } + ] + } + } + } + ] + }, + { + "network-id": "e2e-slice-simap-2", + "supporting-network": [ + { + "network-ref": "providerId-10-clientId-0-topologyId-2" + }, + { + "network-ref": "aggnet-simap-2" + } + ], + "node": [ + { + "node-id": "node-1", + "supporting-node": [ + { + "network-ref": "providerId-10-clientId-0-topologyId-2", + "node-ref": "172.16.61.10" + } + ] + }, + { + "node-id": "node-2", + "supporting-node": [ + { + "network-ref": "aggnet-simap-2", + "node-ref": "node-2" + } + ] + } + ], + "ietf-network-topology:link": [ + { + "link-id": "link-1", + "source": { + "source-node": "node-1", + "source-tp": "tp-1" + }, + "destination": { + "dest-node": "node-2", + "dest-tp": "tp-2" + }, + "simap-telemetry": { + "bandwidth-utilization": 76.51, + "latency": 2.321, + "related-service-ids": ["e2e-network-slice-1"] + }, + "ietf-network-topology:supporting-link": [ + { + "network-ref": "providerId-10-clientId-0-topologyId-2", + "link-ref": "172.16.61.10-500" + }, + { + "network-ref": "aggnet-simap-2", + "link-ref": "link-1" + } + ] + } + ] + }, + { + "network-id": "aggnet-simap-2", + "supporting-network": [ + { + "network-ref": "providerId-10-clientId-0-topologyId-2" + }, + { + "network-ref": "trans-simap-2" + } + ], + "node": [ + { + "node-id": "node-1", + "supporting-node": [ + { + "network-ref": "providerId-10-clientId-0-topologyId-2", + "node-ref": "172.16.58.10" + } + ] + }, + { + "node-id": "node-2", + "supporting-node": [ + { + "network-ref": "providerId-10-clientId-0-topologyId-2", + "node-ref": "172.16.204.220" + } + ] + } + ], + "ietf-network-topology:link": [ + { + "link-id": "link-1", + "source": { + "source-node": "node-1", + "source-tp": "tp-1" + }, + "destination": { + "dest-node": "node-2", + "dest-tp": "tp-2" + }, + "simap-telemetry": { + "bandwidth-utilization": 76.51, + "latency": 2.321, + "related-service-ids": ["trans-network-slice-1"] + }, + "ietf-network-topology:supporting-link": [ + { + "network-ref": "providerId-10-clientId-0-topologyId-2", + "link-ref": "172.16.58.10-500" + }, + { + "network-ref": "providerId-10-clientId-0-topologyId-2", + "link-ref": "172.16.204.220-500" + }, + { + "network-ref": "trans-simap-1", + "link-ref": "link-1" + } + ] + } + ] + }, + { + "network-id": "trans-simap-2", + "supporting-network": [ + { + "network-ref": "providerId-10-clientId-0-topologyId-2" + } + ], + "node": [ + { + "node-id": "node-1", + "supporting-node": [ + { + "network-ref": "providerId-10-clientId-0-topologyId-2", + "node-ref": "172.16.182.25" + } + ] + }, + { + "node-id": "node-2", + "supporting-node": [ + { + "network-ref": "providerId-10-clientId-0-topologyId-2", + "node-ref": "172.16.185.32" + } + ] + } + ], + "ietf-network-topology:link": [ + { + "link-id": "link-1", + "source": { + "source-node": "node-1", + "source-tp": "tp-1" + }, + "destination": { + "dest-node": "node-2", + "dest-tp": "tp-2" + }, + "simap-telemetry": { + "bandwidth-utilization": 76.51, + "latency": 2.321, + "related-service-ids": ["l3sm-instance-1"] + }, + "ietf-network-topology:supporting-link": [ + { + "network-ref": "providerId-10-clientId-0-topologyId-2", + "link-ref": "172.16.182.25-500" + }, + { + "network-ref": "providerId-10-clientId-0-topologyId-2", + "link-ref": "172.16.185.33-501" + } + ] + } + ] + } + ] + } +} \ No newline at end of file diff --git a/src/nbi/service/sse_telemetry/__init__.py b/src/nbi/service/sse_telemetry/__init__.py new file mode 100644 index 000000000..a27686b95 --- /dev/null +++ b/src/nbi/service/sse_telemetry/__init__.py @@ -0,0 +1,34 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# RFC 8299 - YANG Data Model for L3VPN Service Delivery +# Ref: https://datatracker.ietf.org/doc/rfc8299 + + +from nbi.service.NbiApplication import NbiApplication +from .create_subscription import CreateSubscription +from .delete_subscription import DeleteSubscription + + +def register_telemetry_subscription(nbi_app: NbiApplication): + nbi_app.add_rest_api_resource( + CreateSubscription, + '/restconf/operations/subscriptions:establish-subscription', + '/restconf/operations/subscriptions:establish-subscription/', + ) + nbi_app.add_rest_api_resource( + DeleteSubscription, + '/restconf/operations/subscriptions:delete-subscription', + '/restconf/operations/subscriptions:delete-subscription/', + ) diff --git a/src/nbi/service/sse_telemetry/create_subscription.py b/src/nbi/service/sse_telemetry/create_subscription.py new file mode 100644 index 000000000..b7f3b6505 --- /dev/null +++ b/src/nbi/service/sse_telemetry/create_subscription.py @@ -0,0 +1,132 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging +from random import choice +from sys import warnoptions +from typing import Dict, List, Optional, Set +from uuid import uuid4 +from typing_extensions import TypedDict +from flask import jsonify, request +from flask_restful import Resource +from werkzeug.exceptions import BadRequest, NotFound, UnsupportedMediaType, InternalServerError +from common.proto.monitoring_pb2 import SSEMonitoringSubscriptionConfig +from common.tools.context_queries.Device import get_device +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.proto.monitoring_pb2 import ( + SSEMonitoringSubscriptionConfig, + SSEMonitoringSubscriptionResponse, +) +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from nbi.service._tools.Authentication import HTTP_AUTH +from nbi.service.database.Engine import Engine +from nbi.service.sse_telemetry.database.Subscription import ( + SSESubsciprionDict, + list_identifiers, + set_subscription, +) +from .topology import ( + Controllers, + SubscribedNotificationsSchema, + decompose_subscription, + get_controller_name, +) + +from .database_tmp import SERVICE_ID + + +class SubscriptionId(TypedDict): + identifier: str + uri: str + + +LOGGER = logging.getLogger(__name__) + + +class CreateSubscription(Resource): + # @HTTP_AUTH.login_required + def post(self): + db = Engine.get_engine() + if db is None: + LOGGER.error('Database engine is not initialized') + raise InternalServerError('Database engine is not initialized') + if not request.is_json: + LOGGER.error('JSON payload is required') + raise UnsupportedMediaType('JSON payload is required') + request_data: Optional[SubscribedNotificationsSchema] = request.json + if request_data is None: + LOGGER.error('JSON payload is required') + raise UnsupportedMediaType('JSON payload is required') + LOGGER.debug('Received subscription request data: {:s}'.format(str(request_data))) + + # break the request into its abstract components for telemetry subscription + request_identifier = str( + choice([x for x in range(1000, 10000) if x not in list_identifiers(db)]) + ) + sub_subs = decompose_subscription(request_data, SERVICE_ID) + + # subscribe to each component + device_client = DeviceClient() + context_client = ContextClient() + for s in sub_subs: + xpath = s['ietf-subscribed-notifications:input'][ + 'ietf-yang-push:datastore-xpath-filter' + ] + + device_controller = get_controller_name(xpath, SERVICE_ID, context_client) + if device_controller == Controllers.CONTROLLERLESS: + LOGGER.warning( + 'Controllerless device detected, skipping subscription for: {:s}'.format(xpath) + ) + continue + s_req = SSEMonitoringSubscriptionConfig() + s_req.device_id.device_uuid.uuid = device_controller.value + s_req.config_type = SSEMonitoringSubscriptionConfig.Subscribe + s_req.uri = xpath + s_req.sampling_interval = s['ietf-subscribed-notifications:input'][ + 'ietf-yang-push:periodic' + ]['ietf-yang-push:period'] + r: SSEMonitoringSubscriptionResponse = device_client.SSETelemetrySubscribe(s_req) + s = SSESubsciprionDict( + uuid=str(uuid4()), + identifier=r.identifier, + uri=r.uri, + xpath=xpath, + status=True, + main_subscription=False, + main_subscription_id=request_identifier, + details=None, + ) + _ = set_subscription(db, s) + + # save the main subscription to the database + r_uri = f'/restconf/data/subscriptions/{request_identifier}' + s = SSESubsciprionDict( + uuid=str(uuid4()), + identifier=request_identifier, + uri=r_uri, + xpath=request_data['ietf-subscribed-notifications:input'][ + 'ietf-yang-push:datastore-xpath-filter' + ], + status=True, + main_subscription=True, + main_subscription_id=None, + details=None, + ) + _ = set_subscription(db, s) + + # Return the subscription ID + sub_id = SubscriptionId(identifier=request_identifier, uri=r_uri) + return jsonify(sub_id) diff --git a/src/nbi/service/sse_telemetry/database/Subscription.py b/src/nbi/service/sse_telemetry/database/Subscription.py new file mode 100644 index 000000000..79abe4871 --- /dev/null +++ b/src/nbi/service/sse_telemetry/database/Subscription.py @@ -0,0 +1,147 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from sqlalchemy.dialects.postgresql import insert +from sqlalchemy.engine import Engine +from sqlalchemy.orm import Session, sessionmaker +from sqlalchemy_cockroachdb import run_transaction +from typing import Any, List, Optional, TypedDict + +from .models.Subscription import SSESubscriptionModel + +LOGGER = logging.getLogger(__name__) + + +class SSESubsciprionDict(TypedDict): + uuid: str + identifier: str + uri: str + xpath: str + status: bool + main_subscription: bool + main_subscription_id: Optional[str] + details: Optional[dict[str, Any]] + + +def set_subscription(db_engine: Engine, request: SSESubsciprionDict) -> None: + def callback(session: Session) -> bool: + stmt = insert(SSESubscriptionModel).values([request]) + stmt = stmt.on_conflict_do_update( + index_elements=[SSESubscriptionModel.id], + set_=dict( + uuid=stmt.excluded.uuid, + identifier=stmt.excluded.identifier, + uri=stmt.excluded.uri, + xpath=stmt.excluded.xpath, + status=stmt.excluded.status, + main_subscription=stmt.excluded.main_subscription, + main_subscription_id=stmt.excluded.main_subscription_id, + details=stmt.excluded.details, + ), + ) + stmt = stmt.returning(SSESubscriptionModel) + subs = session.execute(stmt).fetchall() + return subs[0] + + _ = run_transaction(sessionmaker(bind=db_engine), callback) + + +def delete_subscription(db_engine: Engine, request: str, main_subscription: bool) -> None: + def callback(session: Session) -> bool: + num_deleted = ( + session.query(SSESubscriptionModel) + .filter_by(identifier=request, main_subscription=main_subscription) + .delete() + ) + return num_deleted > 0 + + _ = run_transaction(sessionmaker(bind=db_engine), callback) + + +def get_main_subscription(db_engine: Engine, request: str) -> Optional[SSESubsciprionDict]: + def callback(session: Session) -> Optional[SSESubsciprionDict]: + obj: Optional[SSESubscriptionModel] = ( + session.query(SSESubscriptionModel) + .filter_by(identifier=request, main_subscription=True) + .one_or_none() + ) + return ( + None + if obj is None + else SSESubsciprionDict( + uuid=obj.uuid, + identifier=obj.identifier, + uri=obj.uri, + xpath=obj.xpath, + status=obj.status, + main_subscription=obj.main_subscription, + main_subscription_id=obj.main_subscription_id, + details=obj.details, + ) + ) + + return run_transaction(sessionmaker(bind=db_engine), callback) + + +def get_sub_subscription(db_engine: Engine, request: str) -> List[SSESubsciprionDict]: + def callback(session: Session) -> List[SSESubsciprionDict]: + obj: List[SSESubscriptionModel] = ( + session.query(SSESubscriptionModel) + .filter_by(main_subscription_id=request, main_subscription=False) + .all() + ) + return [ + SSESubsciprionDict( + uuid=o.uuid, + identifier=o.identifier, + uri=o.uri, + xpath=o.xpath, + status=o.status, + main_subscription=o.main_subscription, + main_subscription_id=o.main_subscription_id, + details=o.details, + ) + for o in obj + ] + + return run_transaction(sessionmaker(bind=db_engine), callback) + + +def get_subscriptions(db_engine: Engine) -> List[SSESubsciprionDict]: + def callback(session: Session) -> List[SSESubsciprionDict]: + obj_list: List[SSESubscriptionModel] = session.query(SSESubscriptionModel).all() + return [ + SSESubsciprionDict( + uuid=obj.uuid, + identifier=obj.identifier, + uri=obj.uri, + xpath=obj.xpath, + status=obj.status, + main_subscription=obj.main_subscription, + main_subscription_id=obj.main_subscription_id, + details=obj.details, + ) + for obj in obj_list + ] + + return run_transaction(sessionmaker(bind=db_engine), callback) + + +def list_identifiers(db_engine: Engine) -> List[str]: + def callback(session: Session) -> set[str]: + obj_list: List[SSESubscriptionModel] = session.query(SSESubscriptionModel).all() + return {obj.identifier for obj in obj_list} + + return run_transaction(sessionmaker(bind=db_engine), callback) diff --git a/src/nbi/service/sse_telemetry/database/__init__.py b/src/nbi/service/sse_telemetry/database/__init__.py new file mode 100644 index 000000000..3ccc21c7d --- /dev/null +++ b/src/nbi/service/sse_telemetry/database/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/nbi/service/sse_telemetry/database/models/Subscription.py b/src/nbi/service/sse_telemetry/database/models/Subscription.py new file mode 100644 index 000000000..53fa57e81 --- /dev/null +++ b/src/nbi/service/sse_telemetry/database/models/Subscription.py @@ -0,0 +1,42 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sqlalchemy +from sqlalchemy import Column, Integer, String, JSON, Boolean +from sqlalchemy.dialects.postgresql import UUID + + +# from sqlalchemy.orm import declarative_base + +from nbi.service.database.base import _Base + +# _Base = declarative_base() + + +class SSESubscriptionModel(_Base): + __tablename__ = 'sse_subscription' + + uuid = Column(String, primary_key=True) + identifier = Column(String, nullable=False, unique=False) + uri = Column(String, nullable=False, unique=False) + xpath = Column(String, nullable=False, unique=False) + status = Column(Boolean, default=False) + main_subscription = Column(Boolean, default=False) + main_subscription_id = Column(String, nullable=True) + + +# def rebuild_database(db_engine: sqlalchemy.engine.Engine, drop_if_exists: bool = False): +# if drop_if_exists: +# _Base.metadata.drop_all(db_engine) +# _Base.metadata.create_all(db_engine) diff --git a/src/nbi/service/sse_telemetry/database/models/__init__.py b/src/nbi/service/sse_telemetry/database/models/__init__.py new file mode 100644 index 000000000..7363515f0 --- /dev/null +++ b/src/nbi/service/sse_telemetry/database/models/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/nbi/service/sse_telemetry/database_tmp.py b/src/nbi/service/sse_telemetry/database_tmp.py new file mode 100644 index 000000000..ba6f7d8f6 --- /dev/null +++ b/src/nbi/service/sse_telemetry/database_tmp.py @@ -0,0 +1,2 @@ + +SERVICE_ID = 'simap1' diff --git a/src/nbi/service/sse_telemetry/delete_subscription.py b/src/nbi/service/sse_telemetry/delete_subscription.py new file mode 100644 index 000000000..29d8c205e --- /dev/null +++ b/src/nbi/service/sse_telemetry/delete_subscription.py @@ -0,0 +1,114 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from typing import Optional +from flask import jsonify, request +from flask_restful import Resource +from werkzeug.exceptions import NotFound, InternalServerError, UnsupportedMediaType +from common.proto.monitoring_pb2 import ( + SSEMonitoringSubscriptionConfig, + SSEMonitoringSubscriptionResponse, +) +from device.client.DeviceClient import DeviceClient +from context.client.ContextClient import ContextClient +from nbi.service._tools.Authentication import HTTP_AUTH +from nbi.service.database.Engine import Engine +from nbi.service.sse_telemetry.database.Subscription import ( + get_main_subscription, + get_sub_subscription, + delete_subscription, +) +from nbi.service.sse_telemetry.topology import ( + Controllers, + UnsubscribedNotificationsSchema, + get_controller_name, +) + +from .database_tmp import SERVICE_ID + + +LOGGER = logging.getLogger(__name__) + + +class DeleteSubscription(Resource): + # @HTTP_AUTH.login_required + def post(self): + global SERVICE_ID + + db = Engine.get_engine() + if db is None: + LOGGER.error('Database engine is not initialized') + raise InternalServerError('Database engine is not initialized') + if not request.is_json: + LOGGER.error('JSON payload is required') + raise UnsupportedMediaType('JSON payload is required') + request_data: Optional[UnsubscribedNotificationsSchema] = request.json + if request_data is None: + LOGGER.error('JSON payload is required') + raise UnsupportedMediaType('JSON payload is required') + main_subscription_id = request_data['delete-subscription']['identifier'] + LOGGER.debug( + 'Received delete subscription request for ID: {:s}'.format(main_subscription_id) + ) + + # Get the main subscription + main_subscription = get_main_subscription(db, main_subscription_id) + if main_subscription is None: + LOGGER.error('Subscription not found: {:s}'.format(main_subscription_id)) + raise NotFound('Subscription not found') + + # Get all sub-subscriptions associated with this main subscription + sub_subscriptions = get_sub_subscription(db, main_subscription_id) + + device_client = DeviceClient() + context_client = ContextClient() + + # Unsubscribe from each sub-subscription + for sub_sub in sub_subscriptions: + # Create unsubscribe request + device_controller = get_controller_name(sub_sub['xpath'], SERVICE_ID, context_client) + if device_controller == Controllers.CONTROLLERLESS: + LOGGER.warning( + 'Controllerless device detected, skipping subscription for: {:s}'.format( + sub_sub['xpath'] + ) + ) + continue + unsub_req = SSEMonitoringSubscriptionConfig() + unsub_req.device_id.device_uuid.uuid = device_controller.value + unsub_req.config_type = SSEMonitoringSubscriptionConfig.Unsubscribe + unsub_req.uri = sub_sub['xpath'] + unsub_req.identifier = sub_sub['identifier'] + + # Send unsubscribe request to device + device_client.SSETelemetrySubscribe(unsub_req) + + delete_subscription(db, sub_sub['identifier'], False) + + LOGGER.info('Unsubscribed from {:s} successfully'.format(sub_sub.get('uri', ''))) + + # Delete the main subscription from database + delete_subscription(db, main_subscription_id, True) + + LOGGER.info('Successfully deleted main subscription: {:s}'.format(main_subscription_id)) + + if SERVICE_ID == 'simap1': + SERVICE_ID = 'simap2' + elif SERVICE_ID == 'simap2': + SERVICE_ID = 'simap1' + else: + LOGGER.warning('Unknown service ID, not switching: {:s}'.format(SERVICE_ID)) + + return jsonify({}) diff --git a/src/nbi/service/sse_telemetry/topology.py b/src/nbi/service/sse_telemetry/topology.py new file mode 100644 index 000000000..eb60f30c1 --- /dev/null +++ b/src/nbi/service/sse_telemetry/topology.py @@ -0,0 +1,187 @@ +import json +import logging +import os +from enum import Enum +from string import octdigits +from typing_extensions import List, TypedDict, Optional +import re + +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from common.proto.context_pb2 import Device, DeviceId, Empty +from common.tools.object_factory.Device import json_device_id +from common.DeviceTypes import DeviceTypeEnum + +Periodic = TypedDict('Periodic', {'ietf-yang-push:period': str}) + +Input = TypedDict( + 'Input', + { + 'datastore': str, + 'ietf-yang-push:datastore-xpath-filter': str, + 'ietf-yang-push:periodic': Periodic, + }, +) + +SubscribedNotificationsSchema = TypedDict( + 'SubscribedNotificationsSchema', {'ietf-subscribed-notifications:input': Input} +) + +UnsubInput = TypedDict('UnsubInput', {'identifier': str}) + +UnsubscribedNotificationsSchema = TypedDict( + 'UnsubscribedNotificationsSchema', {'delete-subscription': UnsubInput} +) + + +class Controllers(str, Enum): + AGG_NET_CONTROLLER = 'agg-net-controller' + IP_TRANSPORT_CONTROLLER = 'ip-transport-controller' + NCE_CONTROLLER = 'nce-controller' + CONTROLLERLESS = 'controllerless' + + +match_network = re.compile(r'\/network=([^\/]*)') + +te_link = re.compile(r'\/ietf-network-topology:link=([\d.]+)-') + +phy_network = re.compile(r'providerId-\d+-clientId-\d+-topologyId-\d+') + +LOGGER = logging.getLogger(__name__) + +dir_path = os.path.dirname(__file__) + +with open(os.path.join(dir_path, 'Full-Te-Topology-simap1.json'), 'r') as f: + NETWORK_DATA_SIMAP1 = json.load(f) + +with open(os.path.join(dir_path, 'Full-Te-Topology-simap2.json'), 'r') as f: + NETWORK_DATA_SIMAP2 = json.load(f) + + +def get_network_data(service_id: str) -> dict: + if service_id == 'simap1': + return NETWORK_DATA_SIMAP1 + elif service_id == 'simap2': + return NETWORK_DATA_SIMAP2 + else: + raise ValueError(f'Unsupported service_id: {service_id}. Expected "simap1" or "simap2".') + + +def decompose_subscription( + s: SubscribedNotificationsSchema, service_id: str +) -> List[SubscribedNotificationsSchema]: + """ + Decomposes a subscription into its components by finding supporting links + in the network hierarchy. + """ + input_data = s['ietf-subscribed-notifications:input'] + xpath_filter = input_data['ietf-yang-push:datastore-xpath-filter'] + + # Parse the XPath to extract network and link information + # Format: /ietf-network:networks/network=/ietf-network-topology:link=/simap-telemetry + parts = xpath_filter.split('/') + network_part = None + link_part = None + + for part in parts: + if part.startswith('network='): + network_part = part[8:] # Remove 'network=' prefix + elif part.startswith('ietf-network-topology:link='): + link_part = part[27:] # Remove 'ietf-network-topology:link=' prefix + + if not network_part or not link_part: + raise ValueError('Invalid XPath filter format') + + # Find the network in the topology data + networks = get_network_data(service_id)['ietf-network:networks']['network'] + target_network = None + + for network in networks: + if network['network-id'] == network_part: + target_network = network + break + + if not target_network: + raise ValueError(f'Network {network_part} not found in topology data') + + # Find the link in the network + links = target_network.get('ietf-network-topology:link', []) + target_link = None + + for link in links: + if link['link-id'] == link_part: + target_link = link + break + + if not target_link: + raise ValueError(f'Link {link_part} not found in network {network_part}') + + # Get supporting links + supporting_links = target_link.get('ietf-network-topology:supporting-link', []) + + if not supporting_links: + raise ValueError( + f'No supporting links found for link {link_part} in network {network_part}' + ) + + # Create decomposed subscriptions + decomposed = [] + + for supporting_link in supporting_links: + network_ref = supporting_link['network-ref'] + link_ref = supporting_link['link-ref'] + + # Create new XPath filter for the supporting link + new_xpath = f'/ietf-network:networks/network={network_ref}/ietf-network-topology:link={link_ref}/simap-telemetry' + + # Create new subscription + new_subscription = { + 'ietf-subscribed-notifications:input': { + 'datastore': input_data['datastore'], + 'ietf-yang-push:datastore-xpath-filter': new_xpath, + 'ietf-yang-push:periodic': input_data['ietf-yang-push:periodic'], + } + } + + decomposed.append(new_subscription) + + return decomposed + + +def get_controller_name(xpath: str, service_id: str, context_client: ContextClient) -> Controllers: + m = match_network.search(xpath) + network = m.groups()[0] + if 'simap' in network: + if 'aggnet' in network: + return Controllers.AGG_NET_CONTROLLER + elif 'trans' in network: + nodes = [] + for n in get_network_data(service_id)['ietf-network:networks']['network']: + nodes.extend( + [nn['node-ref'] for node in n['node'] for nn in node['supporting-node']] + ) + devices = context_client.ListDevices(Empty()) + transport_nodes = [ + d.name + for d in devices + if d.controller_id.device_uuid.uuid == 'ip-transport-controller' + ] + if all(node in transport_nodes for node in nodes): + return Controllers.IP_TRANSPORT_CONTROLLER + else: + raise ValueError(f'Unsupported transport network in XPath: {xpath}') + else: + raise ValueError(f'Unsupported network type in XPath: {xpath}') + elif phy_network.search(network): + LOGGER.info(f'Phy network detected in XPath: {xpath}') + m = te_link.search(xpath) + node = m.groups()[0] + device = context_client.GetDevice(DeviceId(**json_device_id(node))) + controller_uuid = device.controller_id.device_uuid.uuid + ctrl = context_client.GetDevice(DeviceId(**json_device_id(controller_uuid))) + if ctrl.name == 'nce-controller': + return Controllers.NCE_CONTROLLER + else: + return Controllers.CONTROLLERLESS + else: + raise ValueError(f'Unsupported XPath: {xpath}') diff --git a/src/tests/ecoc25-camara-e2e-telemetry/Dockerfile b/src/tests/ecoc25-camara-e2e-telemetry/Dockerfile new file mode 100644 index 000000000..cdd1b16d1 --- /dev/null +++ b/src/tests/ecoc25-camara-e2e-telemetry/Dockerfile @@ -0,0 +1,86 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM python:3.9-slim + +# Install dependencies +RUN apt-get --yes --quiet --quiet update && \ + apt-get --yes --quiet --quiet install wget g++ git && \ + rm -rf /var/lib/apt/lists/* + +# Set Python to show logs as they occur +ENV PYTHONUNBUFFERED=0 + +# Get generic Python packages +RUN python3 -m pip install --upgrade pip +RUN python3 -m pip install --upgrade setuptools wheel +RUN python3 -m pip install --upgrade pip-tools + +# Get common Python packages +# Note: this step enables sharing the previous Docker build steps among all the Python components +WORKDIR /var/teraflow +COPY common_requirements.in common_requirements.in +RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in +RUN python3 -m pip install -r common_requirements.txt + +# Add common files into working directory +WORKDIR /var/teraflow/common +COPY src/common/. ./ +RUN rm -rf proto + +# Create proto sub-folder, copy .proto files, and generate Python code +RUN mkdir -p /var/teraflow/common/proto +WORKDIR /var/teraflow/common/proto +RUN touch __init__.py +COPY proto/*.proto ./ +RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto +RUN rm *.proto +RUN find . -type f -exec sed -i -E 's/^(import\ .*)_pb2/from . \1_pb2/g' {} \; + +# Create component sub-folders, get specific Python packages +RUN mkdir -p /var/teraflow/tests/ofc25-camara-e2e-controller +WORKDIR /var/teraflow/tests/ofc25-camara-e2e-controller +COPY src/tests/ofc25-camara-e2e-controller/requirements.in requirements.in +RUN pip-compile --quiet --output-file=requirements.txt requirements.in +RUN python3 -m pip install -r requirements.txt + +# Add component files into working directory +WORKDIR /var/teraflow +COPY src/__init__.py ./__init__.py +COPY src/common/*.py ./common/ +COPY src/common/tests/. ./common/tests/ +COPY src/common/tools/. ./common/tools/ +COPY src/context/__init__.py context/__init__.py +COPY src/context/client/. context/client/ +COPY src/device/__init__.py device/__init__.py +COPY src/device/client/. device/client/ +COPY src/monitoring/__init__.py monitoring/__init__.py +COPY src/monitoring/client/. monitoring/client/ +COPY src/service/__init__.py service/__init__.py +COPY src/service/client/. service/client/ +COPY src/slice/__init__.py slice/__init__.py +COPY src/slice/client/. slice/client/ +COPY src/vnt_manager/__init__.py vnt_manager/__init__.py +COPY src/vnt_manager/client/. vnt_manager/client/ +COPY src/tests/*.py ./tests/ +COPY src/tests/ofc25-camara-e2e-controller/__init__.py ./tests/ofc25-camara-e2e-controller/__init__.py +COPY src/tests/ofc25-camara-e2e-controller/data/. ./tests/ofc25-camara-e2e-controller/data/ +COPY src/tests/ofc25-camara-e2e-controller/tests/. ./tests/ofc25-camara-e2e-controller/tests/ +COPY src/tests/ofc25-camara-e2e-controller/scripts/. ./ + +RUN apt-get --yes --quiet --quiet update && \ + apt-get --yes --quiet --quiet install tree && \ + rm -rf /var/lib/apt/lists/* + +RUN tree -la /var/teraflow diff --git a/src/tests/ecoc25-camara-e2e-telemetry/data/camara-e2e-topology.json b/src/tests/ecoc25-camara-e2e-telemetry/data/camara-e2e-topology.json new file mode 100644 index 000000000..b2a8617e2 --- /dev/null +++ b/src/tests/ecoc25-camara-e2e-telemetry/data/camara-e2e-topology.json @@ -0,0 +1,1725 @@ +{ + "contexts": [ + { + "context_id": { + "context_uuid": { + "uuid": "admin" + } + } + } + ], + "topologies": [ + { + "topology_id": { + "context_id": { + "context_uuid": { + "uuid": "admin" + } + }, + "topology_uuid": { + "uuid": "admin" + } + } + } + ], + "devices": [ + { + "device_id": { + "device_uuid": { + "uuid": "ip-transport-controller" + } + }, + "name": "ip-transport-controller", + "device_type": "ietf-slice", + "device_operational_status": 1, + "device_drivers": [ + 14 + ], + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "AGG_NET_IP" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "AGG_NET_PORT" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "endpoints": [ + { + "uuid": "mgmt", + "name": "mgmt", + "type": "mgmt" + } + ], + "scheme": "http", + "username": "admin", + "password": "admin", + "base_url": "/restconf/v2/data", + "timeout": 120, + "verify": false + } + } + } + ] + }, + "device_endpoints": [] + }, + { + "device_id": { + "device_uuid": { + "uuid": "agg-net-controller" + } + }, + "name": "agg-net-controller", + "device_type": "ietf-slice", + "device_operational_status": 1, + "device_drivers": [ + 14 + ], + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "AGG_NET_IP" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "AGG_NET_PORT" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "endpoints": [ + { + "uuid": "mgmt", + "name": "mgmt", + "type": "mgmt" + } + ], + "scheme": "http", + "username": "admin", + "password": "admin", + "base_url": "/restconf/v2/data", + "timeout": 120, + "verify": false + } + } + } + ] + }, + "device_endpoints": [] + }, + { + "device_id": { + "device_uuid": { + "uuid": "nce-controller" + } + }, + "name": "nce-controller", + "device_type": "nce", + "device_operational_status": 1, + "device_drivers": [ + 15 + ], + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "NCE_IP" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "NCE_PORT" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "endpoints": [ + { + "uuid": "mgmt", + "name": "mgmt", + "type": "mgmt" + } + ], + "scheme": "http", + "username": "admin", + "password": "admin", + "base_url": "/restconf/v2/data", + "timeout": 120, + "verify": false + } + } + } + ] + }, + "device_endpoints": [] + }, + { + "device_id": { + "device_uuid": { + "uuid": "172.16.182.25" + } + }, + "name": "172.16.182.25", + "device_type": "emu-packet-router", + "controller_id": { + "device_uuid": { + "uuid": "ip-transport-controller" + } + }, + "device_operational_status": 1, + "device_drivers": [ + 0, + 14 + ], + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "endpoints": [ + { + "uuid": "mgmt", + "name": "mgmt", + "type": "mgmt" + }, + { + "uuid": "200", + "name": "200", + "type": "optical", + "address_ip": "128.32.33.254", + "address_prefix": "24", + "site_location": "access", + "mtu": "1500" + }, + { + "uuid": "500", + "name": "500", + "type": "optical" + }, + { + "uuid": "501", + "name": "501", + "type": "optical" + } + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "172.16.185.31" + } + }, + "name": "172.16.185.31", + "device_type": "emu-packet-router", + "controller_id": { + "device_uuid": { + "uuid": "ip-transport-controller" + } + }, + "device_operational_status": 1, + "device_drivers": [ + 0, + 14 + ], + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "endpoints": [ + { + "uuid": "mgmt", + "name": "mgmt", + "type": "mgmt" + }, + { + "uuid": "500", + "name": "500", + "type": "optical" + }, + { + "uuid": "501", + "name": "501", + "type": "optical" + } + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "172.16.185.33" + } + }, + "name": "172.16.185.33", + "device_type": "emu-packet-router", + "controller_id": { + "device_uuid": { + "uuid": "ip-transport-controller" + } + }, + "device_operational_status": 1, + "device_drivers": [ + 0, + 14 + ], + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "endpoints": [ + { + "uuid": "mgmt", + "name": "mgmt", + "type": "mgmt" + }, + { + "uuid": "500", + "name": "500", + "type": "optical" + }, + { + "uuid": "501", + "name": "501", + "type": "optical" + } + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "172.16.185.32" + } + }, + "name": "172.16.185.32", + "device_type": "emu-packet-router", + "controller_id": { + "device_uuid": { + "uuid": "ip-transport-controller" + } + }, + "device_operational_status": 1, + "device_drivers": [ + 0, + 14 + ], + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "endpoints": [ + { + "uuid": "mgmt", + "name": "mgmt", + "type": "mgmt" + }, + { + "uuid": "200", + "name": "200", + "type": "optical", + "ce-ip": "172.10.33.2", + "address_ip": "172.10.33.254", + "address_prefix": "24", + "site_location": "cloud", + "mtu": "1500" + }, + { + "uuid": "500", + "name": "500", + "type": "optical" + }, + { + "uuid": "501", + "name": "501", + "type": "optical" + } + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "172.16.58.10" + } + }, + "name": "172.16.58.10", + "device_type": "emu-packet-router", + "controller_id": { + "device_uuid": { + "uuid": "nce-controller" + } + }, + "device_operational_status": 1, + "device_drivers": [ + 15 + ], + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "endpoints": [ + { + "uuid": "mgmt", + "name": "mgmt", + "type": "mgmt" + }, + { + "uuid": "200", + "name": "200", + "type": "optical", + "address_ip": "0.0.0.0", + "address_prefix": "24" + }, + { + "uuid": "201", + "name": "201", + "type": "optical", + "address_ip": "0.0.0.0", + "address_prefix": "24" + }, + { + "uuid": "500", + "name": "500", + "type": "optical", + "address_ip": "128.32.33.2", + "address_prefix": "24" + } + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "172.16.61.10" + } + }, + "name": "172.16.61.10", + "device_type": "emu-packet-router", + "controller_id": { + "device_uuid": { + "uuid": "nce-controller" + } + }, + "device_operational_status": 1, + "device_drivers": [ + 15 + ], + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "endpoints": [ + { + "uuid": "mgmt", + "name": "mgmt", + "type": "mgmt" + }, + { + "uuid": "200", + "name": "200", + "type": "optical", + "address_ip": "0.0.0.0", + "address_prefix": "24" + }, + { + "uuid": "500", + "name": "500", + "type": "optical", + "address_ip": "128.32.33.2", + "address_prefix": "24" + } + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "172.16.61.11" + } + }, + "name": "172.16.61.11", + "device_type": "emu-packet-router", + "controller_id": { + "device_uuid": { + "uuid": "nce-controller" + } + }, + "device_operational_status": 1, + "device_drivers": [ + 15 + ], + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "endpoints": [ + { + "uuid": "mgmt", + "name": "mgmt", + "type": "mgmt" + }, + { + "uuid": "200", + "name": "200", + "type": "optical", + "address_ip": "0.0.0.0", + "address_prefix": "24" + }, + { + "uuid": "500", + "name": "500", + "type": "optical", + "address_ip": "128.32.33.2", + "address_prefix": "24" + } + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "172.16.104.221" + } + }, + "device_type": "emu-datacenter", + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "endpoints": [ + { + "sample_types": [], + "type": "copper", + "uuid": "eth0" + } + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "172.16.104.222" + } + }, + "device_type": "emu-datacenter", + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "endpoints": [ + { + "sample_types": [], + "type": "copper", + "uuid": "eth0" + } + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "172.16.204.220" + } + }, + "device_type": "emu-datacenter", + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "endpoints": [ + { + "sample_types": [], + "type": "optical", + "uuid": "500" + }, + { + "sample_types": [], + "type": "optical", + "uuid": "200" + }, + { + "sample_types": [], + "type": "optical", + "uuid": "201" + } + ] + } + } + } + ] + } + } + ], + "links": [ + { + "link_id": { + "link_uuid": { + "uuid": "agg-net-controller/mgmt==ip-transport-controller/mgmt" + } + }, + "name": "agg-net-controller/mgmt==ip-transport-controller/mgmt", + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "agg-net-controller" + } + }, + "endpoint_uuid": { + "uuid": "mgmt" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "ip-transport-controller" + } + }, + "endpoint_uuid": { + "uuid": "mgmt" + } + } + ] + }, + { + "link_id": { + "link_uuid": { + "uuid": "nce-controller/mgmt==172.16.61.11/mgmt" + } + }, + "name": "nce-controller/mgmt==172.16.61.11/mgmt", + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "nce-controller" + } + }, + "endpoint_uuid": { + "uuid": "mgmt" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "172.16.61.11" + } + }, + "endpoint_uuid": { + "uuid": "mgmt" + } + } + ] + }, + { + "link_id": { + "link_uuid": { + "uuid": "nce-controller/mgmt==172.16.61.10/mgmt" + } + }, + "name": "nce-controller/mgmt==172.16.61.10/mgmt", + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "nce-controller" + } + }, + "endpoint_uuid": { + "uuid": "mgmt" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "172.16.61.10" + } + }, + "endpoint_uuid": { + "uuid": "mgmt" + } + } + ] + }, + { + "link_id": { + "link_uuid": { + "uuid": "nce-controller/mgmt==172.16.58.10/mgmt" + } + }, + "name": "nce-controller/mgmt==172.16.58.10/mgmt", + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "nce-controller" + } + }, + "endpoint_uuid": { + "uuid": "mgmt" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "172.16.58.10" + } + }, + "endpoint_uuid": { + "uuid": "mgmt" + } + } + ] + }, + { + "link_id": { + "link_uuid": { + "uuid": "ip-transport-controller/mgmt==172.16.185.33/mgmt" + } + }, + "name": "ip-transport-controller/mgmt==172.16.185.33/mgmt", + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "ip-transport-controller" + } + }, + "endpoint_uuid": { + "uuid": "mgmt" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "172.16.185.33" + } + }, + "endpoint_uuid": { + "uuid": "mgmt" + } + } + ] + }, + { + "link_id": { + "link_uuid": { + "uuid": "ip-transport-controller/mgmt==172.16.185.31/mgmt" + } + }, + "name": "ip-transport-controller/mgmt==172.16.185.31/mgmt", + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "ip-transport-controller" + } + }, + "endpoint_uuid": { + "uuid": "mgmt" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "172.16.185.31" + } + }, + "endpoint_uuid": { + "uuid": "mgmt" + } + } + ] + }, + { + "link_id": { + "link_uuid": { + "uuid": "ip-transport-controller/mgmt==172.16.185.32/mgmt" + } + }, + "name": "ip-transport-controller/mgmt==172.16.185.32/mgmt", + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "ip-transport-controller" + } + }, + "endpoint_uuid": { + "uuid": "mgmt" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "172.16.185.32" + } + }, + "endpoint_uuid": { + "uuid": "mgmt" + } + } + ] + }, + { + "link_id": { + "link_uuid": { + "uuid": "ip-transport-controller/mgmt==172.16.182.25/mgmt" + } + }, + "name": "ip-transport-controller/mgmt==172.16.182.25/mgmt", + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "ip-transport-controller" + } + }, + "endpoint_uuid": { + "uuid": "mgmt" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "172.16.182.25" + } + }, + "endpoint_uuid": { + "uuid": "mgmt" + } + } + ] + }, + { + "link_id": { + "link_uuid": { + "uuid": "172.16.182.25-500" + } + }, + "name": "172.16.182.25-500", + "attributes": { + "total_capacity_gbps": 10, + "used_capacity_gbps": 0 + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "172.16.182.25" + } + }, + "endpoint_uuid": { + "uuid": "500" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "172.16.185.33" + } + }, + "endpoint_uuid": { + "uuid": "500" + } + } + ] + }, + { + "link_id": { + "link_uuid": { + "uuid": "172.16.185.33-500" + } + }, + "name": "172.16.185.33-500", + "attributes": { + "total_capacity_gbps": 10, + "used_capacity_gbps": 0 + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "172.16.185.33" + } + }, + "endpoint_uuid": { + "uuid": "500" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "172.16.182.25" + } + }, + "endpoint_uuid": { + "uuid": "500" + } + } + ] + }, + { + "link_id": { + "link_uuid": { + "uuid": "172.16.182.25-501" + } + }, + "name": "172.16.182.25-501", + "attributes": { + "total_capacity_gbps": 10, + "used_capacity_gbps": 0 + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "172.16.182.25" + } + }, + "endpoint_uuid": { + "uuid": "501" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "172.16.185.31" + } + }, + "endpoint_uuid": { + "uuid": "501" + } + } + ] + }, + { + "link_id": { + "link_uuid": { + "uuid": "172.16.185.31-501" + } + }, + "name": "172.16.185.31-501", + "attributes": { + "total_capacity_gbps": 10, + "used_capacity_gbps": 0 + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "172.16.185.31" + } + }, + "endpoint_uuid": { + "uuid": "501" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "172.16.182.25" + } + }, + "endpoint_uuid": { + "uuid": "501" + } + } + ] + }, + { + "link_id": { + "link_uuid": { + "uuid": "172.16.185.31-500" + } + }, + "name": "172.16.185.31-500", + "attributes": { + "total_capacity_gbps": 10, + "used_capacity_gbps": 0 + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "172.16.185.31" + } + }, + "endpoint_uuid": { + "uuid": "500" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "172.16.185.32" + } + }, + "endpoint_uuid": { + "uuid": "500" + } + } + ] + }, + { + "link_id": { + "link_uuid": { + "uuid": "172.16.185.32-500" + } + }, + "name": "172.16.185.32-500", + "attributes": { + "total_capacity_gbps": 10, + "used_capacity_gbps": 0 + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "172.16.185.32" + } + }, + "endpoint_uuid": { + "uuid": "500" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "172.16.185.31" + } + }, + "endpoint_uuid": { + "uuid": "500" + } + } + ] + }, + { + "link_id": { + "link_uuid": { + "uuid": "172.16.185.33-501" + } + }, + "name": "172.16.185.33-501", + "attributes": { + "total_capacity_gbps": 10, + "used_capacity_gbps": 0 + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "172.16.185.33" + } + }, + "endpoint_uuid": { + "uuid": "501" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "172.16.185.32" + } + }, + "endpoint_uuid": { + "uuid": "501" + } + } + ] + }, + { + "link_id": { + "link_uuid": { + "uuid": "172.16.185.32-501" + } + }, + "name": "172.16.185.32-501", + "attributes": { + "total_capacity_gbps": 10, + "used_capacity_gbps": 0 + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "172.16.185.32" + } + }, + "endpoint_uuid": { + "uuid": "501" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "172.16.185.33" + } + }, + "endpoint_uuid": { + "uuid": "501" + } + } + ] + }, + { + "link_id": { + "link_uuid": { + "uuid": "172.16.185.32-200" + } + }, + "name": "172.16.185.32-200", + "attributes": { + "total_capacity_gbps": 10, + "used_capacity_gbps": 0 + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "172.16.185.32" + } + }, + "endpoint_uuid": { + "uuid": "200" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "172.16.204.220" + } + }, + "endpoint_uuid": { + "uuid": "500" + } + } + ] + }, + { + "link_id": { + "link_uuid": { + "uuid": "172.16.204.220-500" + } + }, + "name": "172.16.204.220-500", + "attributes": { + "total_capacity_gbps": 10, + "used_capacity_gbps": 0 + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "172.16.204.220" + } + }, + "endpoint_uuid": { + "uuid": "500" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "172.16.185.32" + } + }, + "endpoint_uuid": { + "uuid": "200" + } + } + ] + }, + { + "link_id": { + "link_uuid": { + "uuid": "172.16.182.25-200" + } + }, + "name": "172.16.182.25-200", + "attributes": { + "total_capacity_gbps": 10, + "used_capacity_gbps": 0 + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "172.16.182.25" + } + }, + "endpoint_uuid": { + "uuid": "200" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "172.16.58.10" + } + }, + "endpoint_uuid": { + "uuid": "500" + } + } + ] + }, + { + "link_id": { + "link_uuid": { + "uuid": "172.16.58.10-500" + } + }, + "name": "172.16.58.10-500", + "attributes": { + "total_capacity_gbps": 10, + "used_capacity_gbps": 0 + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "172.16.58.10" + } + }, + "endpoint_uuid": { + "uuid": "500" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "172.16.182.25" + } + }, + "endpoint_uuid": { + "uuid": "200" + } + } + ] + }, + { + "link_id": { + "link_uuid": { + "uuid": "172.16.58.10-200" + } + }, + "name": "172.16.58.10-200", + "attributes": { + "total_capacity_gbps": 10, + "used_capacity_gbps": 0 + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "172.16.58.10" + } + }, + "endpoint_uuid": { + "uuid": "200" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "172.16.61.10" + } + }, + "endpoint_uuid": { + "uuid": "500" + } + } + ] + }, + { + "link_id": { + "link_uuid": { + "uuid": "172.16.61.10-500" + } + }, + "name": "172.16.61.10-500", + "attributes": { + "total_capacity_gbps": 10, + "used_capacity_gbps": 0 + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "172.16.61.10" + } + }, + "endpoint_uuid": { + "uuid": "500" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "172.16.58.10" + } + }, + "endpoint_uuid": { + "uuid": "200" + } + } + ] + }, + { + "link_id": { + "link_uuid": { + "uuid": "172.16.58.10-201" + } + }, + "name": "172.16.58.10-201", + "attributes": { + "total_capacity_gbps": 10, + "used_capacity_gbps": 0 + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "172.16.58.10" + } + }, + "endpoint_uuid": { + "uuid": "201" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "172.16.61.11" + } + }, + "endpoint_uuid": { + "uuid": "500" + } + } + ] + }, + { + "link_id": { + "link_uuid": { + "uuid": "172.16.61.11-500" + } + }, + "name": "172.16.61.11-500", + "attributes": { + "total_capacity_gbps": 10, + "used_capacity_gbps": 0 + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "172.16.61.11" + } + }, + "endpoint_uuid": { + "uuid": "500" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "172.16.58.10" + } + }, + "endpoint_uuid": { + "uuid": "201" + } + } + ] + }, + { + "link_id": { + "link_uuid": { + "uuid": "172.16.61.10-200" + } + }, + "name": "172.16.61.10-200", + "attributes": { + "total_capacity_gbps": 10, + "used_capacity_gbps": 0 + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "172.16.61.10" + } + }, + "endpoint_uuid": { + "uuid": "200" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "172.16.104.221" + } + }, + "endpoint_uuid": { + "uuid": "eth0" + } + } + ] + }, + { + "link_id": { + "link_uuid": { + "uuid": "172.16.104.221-eth0" + } + }, + "name": "172.16.104.221-eth0", + "attributes": { + "total_capacity_gbps": 10, + "used_capacity_gbps": 0 + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "172.16.104.221" + } + }, + "endpoint_uuid": { + "uuid": "eth0" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "172.16.61.10" + } + }, + "endpoint_uuid": { + "uuid": "200" + } + } + ] + }, + { + "link_id": { + "link_uuid": { + "uuid": "172.16.61.11-200" + } + }, + "name": "172.16.61.11-200", + "attributes": { + "total_capacity_gbps": 10, + "used_capacity_gbps": 0 + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "172.16.61.11" + } + }, + "endpoint_uuid": { + "uuid": "200" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "172.16.104.222" + } + }, + "endpoint_uuid": { + "uuid": "eth0" + } + } + ] + }, + { + "link_id": { + "link_uuid": { + "uuid": "172.16.104.222-eth0" + } + }, + "name": "172.16.104.222-eth0", + "attributes": { + "total_capacity_gbps": 10, + "used_capacity_gbps": 0 + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "172.16.104.222" + } + }, + "endpoint_uuid": { + "uuid": "eth0" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "172.16.61.11" + } + }, + "endpoint_uuid": { + "uuid": "200" + } + } + ] + } + ] +} diff --git a/src/tests/ecoc25-camara-e2e-telemetry/deploy_specs.sh b/src/tests/ecoc25-camara-e2e-telemetry/deploy_specs.sh new file mode 100755 index 000000000..fc61779a3 --- /dev/null +++ b/src/tests/ecoc25-camara-e2e-telemetry/deploy_specs.sh @@ -0,0 +1,208 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# ----- TeraFlowSDN ------------------------------------------------------------ + +# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" + +# Set the list of components, separated by spaces, you want to build images for, and deploy. +#export TFS_COMPONENTS="context device pathcomp service slice nbi webui load_generator" +export TFS_COMPONENTS="context device pathcomp service slice nbi webui" + +# Uncomment to activate Monitoring (old) +#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring" + +# Uncomment to activate Monitoring Framework (new) +#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics automation" + +# Uncomment to activate QoS Profiles +#export TFS_COMPONENTS="${TFS_COMPONENTS} qos_profile" + +# Uncomment to activate BGP-LS Speaker +#export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker" + +# Uncomment to activate Optical Controller +# To manage optical connections, "service" requires "opticalcontroller" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "opticalcontroller" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} opticalcontroller service ${AFTER}" +#fi + +# Uncomment to activate ZTP +#export TFS_COMPONENTS="${TFS_COMPONENTS} ztp" + +# Uncomment to activate Policy Manager +#export TFS_COMPONENTS="${TFS_COMPONENTS} policy" + +# Uncomment to activate Optical CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager" + +# Uncomment to activate L3 CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector" + +# Uncomment to activate TE +#export TFS_COMPONENTS="${TFS_COMPONENTS} te" + +# Uncomment to activate Forecaster +#export TFS_COMPONENTS="${TFS_COMPONENTS} forecaster" + +# Uncomment to activate E2E Orchestrator +#export TFS_COMPONENTS="${TFS_COMPONENTS} e2e_orchestrator" + +# Uncomment to activate DLT and Interdomain +#export TFS_COMPONENTS="${TFS_COMPONENTS} interdomain dlt" +#if [[ "$TFS_COMPONENTS" == *"dlt"* ]]; then +# export KEY_DIRECTORY_PATH="src/dlt/gateway/keys/priv_sk" +# export CERT_DIRECTORY_PATH="src/dlt/gateway/keys/cert.pem" +# export TLS_CERT_PATH="src/dlt/gateway/keys/ca.crt" +#fi + +# Uncomment to activate QKD App +# To manage QKD Apps, "service" requires "qkd_app" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "qkd_app" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} qkd_app service ${AFTER}" +#fi + + +# Set the tag you want to use for your images. +export TFS_IMAGE_TAG="dev" + +# Set the name of the Kubernetes namespace to deploy TFS to. +export TFS_K8S_NAMESPACE="tfs" + +# Set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" + +# Uncomment to monitor performance of components +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml" + +# Uncomment when deploying Optical CyberSecurity +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml" + +# Set the new Grafana admin password +export TFS_GRAFANA_PASSWORD="admin123+" + +# Disable skip-build flag to rebuild the Docker images. +export TFS_SKIP_BUILD="" + + +# ----- CockroachDB ------------------------------------------------------------ + +# Set the namespace where CockroackDB will be deployed. +export CRDB_NAMESPACE="crdb" + +# Set the external port CockroackDB Postgre SQL interface will be exposed to. +export CRDB_EXT_PORT_SQL="26257" + +# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to. +export CRDB_EXT_PORT_HTTP="8081" + +# Set the database username to be used by Context. +export CRDB_USERNAME="tfs" + +# Set the database user's password to be used by Context. +export CRDB_PASSWORD="tfs123" + +# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/crdb.sh for additional details +export CRDB_DEPLOY_MODE="single" + +# Disable flag for dropping database, if it exists. +export CRDB_DROP_DATABASE_IF_EXISTS="YES" + +# Disable flag for re-deploying CockroachDB from scratch. +export CRDB_REDEPLOY="" + + +# ----- NATS ------------------------------------------------------------------- + +# Set the namespace where NATS will be deployed. +export NATS_NAMESPACE="nats" + +# Set the external port NATS Client interface will be exposed to. +export NATS_EXT_PORT_CLIENT="4222" + +# Set the external port NATS HTTP Mgmt GUI interface will be exposed to. +export NATS_EXT_PORT_HTTP="8222" + +# Set NATS installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/nats.sh for additional details +export NATS_DEPLOY_MODE="single" + +# Disable flag for re-deploying NATS from scratch. +export NATS_REDEPLOY="" + + +# ----- QuestDB ---------------------------------------------------------------- + +# Set the namespace where QuestDB will be deployed. +export QDB_NAMESPACE="qdb" + +# Set the external port QuestDB Postgre SQL interface will be exposed to. +export QDB_EXT_PORT_SQL="8812" + +# Set the external port QuestDB Influx Line Protocol interface will be exposed to. +export QDB_EXT_PORT_ILP="9009" + +# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to. +export QDB_EXT_PORT_HTTP="9000" + +# Set the database username to be used for QuestDB. +export QDB_USERNAME="admin" + +# Set the database user's password to be used for QuestDB. +export QDB_PASSWORD="quest" + +# Set the table name to be used by Monitoring for KPIs. +export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis" + +# Set the table name to be used by Slice for plotting groups. +export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups" + +# Disable flag for dropping tables if they exist. +export QDB_DROP_TABLES_IF_EXIST="YES" + +# Disable flag for re-deploying QuestDB from scratch. +export QDB_REDEPLOY="" + + +# ----- K8s Observability ------------------------------------------------------ + +# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to. +export PROM_EXT_PORT_HTTP="9090" + +# Set the external port Grafana HTTP Dashboards will be exposed to. +export GRAF_EXT_PORT_HTTP="3000" + + +# ----- Apache Kafka ----------------------------------------------------------- + +# Set the namespace where Apache Kafka will be deployed. +export KFK_NAMESPACE="kafka" + +# Set the port Apache Kafka server will be exposed to. +export KFK_SERVER_PORT="9092" + +# Set the flag to YES for redeploying of Apache Kafka +export KFK_REDEPLOY="" diff --git a/src/tests/ecoc25-camara-e2e-telemetry/mocks/Dockerfile b/src/tests/ecoc25-camara-e2e-telemetry/mocks/Dockerfile new file mode 100644 index 000000000..cf4797c4a --- /dev/null +++ b/src/tests/ecoc25-camara-e2e-telemetry/mocks/Dockerfile @@ -0,0 +1,30 @@ +FROM python:3.11-slim + +WORKDIR /app + +# Install system dependencies +RUN apt-get update && apt-get install -y \ + curl \ + && rm -rf /var/lib/apt/lists/* + +# Copy requirements first for better caching +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +# Copy application code +COPY app/ . + +# Create non-root user +RUN adduser --disabled-password --gecos '' appuser +RUN chown -R appuser:appuser /app +USER appuser + +# Expose port +EXPOSE 8000 + +# Health check +HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ + CMD curl -f http://localhost:8000/health || exit 1 + +# Run the application +CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"] diff --git a/src/tests/ecoc25-camara-e2e-telemetry/mocks/app/main.py b/src/tests/ecoc25-camara-e2e-telemetry/mocks/app/main.py new file mode 100644 index 000000000..c0ca9bb3c --- /dev/null +++ b/src/tests/ecoc25-camara-e2e-telemetry/mocks/app/main.py @@ -0,0 +1,299 @@ +import asyncio +import json +import logging +import os +import random +import re +from datetime import datetime +from typing import Dict + +from fastapi import FastAPI, HTTPException +from fastapi.responses import StreamingResponse +from pydantic import BaseModel, Field + +# Configure logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +# Environment variables +SERVICE_NAME = os.getenv('SERVICE_NAME', 'nce') +BASE_BANDWIDTH = float(os.getenv('BASE_BANDWIDTH', '75.0')) +BASE_DELAY = float(os.getenv('BASE_DELAY', '2.0')) +PORT = int(os.getenv('PORT', '8000')) + + +match_network = re.compile(r'\/network=([^\/]*)') + +# Global state +subscriptions: Dict[str, Dict] = {} +active_streams: Dict[str, bool] = {} +metrics_multipliers = {'bandwidth': 1.0, 'delay': 1.0} + +app = FastAPI( + title='F5G Telemetry API with SSE + YANG Push', + version='0.0.1', + description=f'Mock telemetry server for {SERVICE_NAME}', +) + + +# Pydantic models +class EstablishSubscriptionInput(BaseModel): + datastore: str + ietf_yang_push_datastore_xpath_filter: str = Field( + alias='ietf-yang-push:datastore-xpath-filter' + ) + ietf_yang_push_periodic: Dict[str, str] = Field(alias='ietf-yang-push:periodic') + + +class EstablishSubscriptionRequest(BaseModel): + ietf_subscribed_notifications_input: EstablishSubscriptionInput = Field( + alias='ietf-subscribed-notifications:input' + ) + + +class EstablishSubscriptionResponse(BaseModel): + identifier: str + uri: str + + +class DeleteSubscriptionInner(BaseModel): + identifier: str + + +class DeleteSubscriptionRequest(BaseModel): + delete_subscription: DeleteSubscriptionInner = Field(alias='delete-subscription') + + +class MetricRequest(BaseModel): + factor: float = Field(default=0.2, description='Factor to degrade/enhance metrics by') + + +def generate_telemetry_data(update_id: str, service_name: str) -> Dict: + """Generate realistic telemetry data with noise""" + base_bandwidth = BASE_BANDWIDTH * metrics_multipliers['bandwidth'] + base_delay = BASE_DELAY * metrics_multipliers['delay'] + + # Add realistic noise + bandwidth_noise = random.uniform(-5.0, 5.0) + delay_noise = random.uniform(-0.5, 0.5) + + bandwidth = max(0.0, base_bandwidth + bandwidth_noise) + delay = max(0.0, base_delay + delay_noise) + + return { + 'notification': { + 'eventTime': datetime.utcnow().isoformat() + 'Z', + 'push-update': { + 'id': update_id, + 'datastore-contents': { + 'ietf-network-topology:simap-telemetry': { + 'bandwidth-utilization': str(round(bandwidth, 2)), + 'latency': str(round(delay, 2)), + 'related-service-ids': [service_name], + } + }, + }, + } + } + + +@app.get('/health') +async def health_check(): + """Health check endpoint""" + return {'status': 'healthy', 'service': SERVICE_NAME} + + +@app.post('/restconf/operations/subscriptions:establish-subscription') +async def establish_subscription( + request: EstablishSubscriptionRequest, +) -> EstablishSubscriptionResponse: + """Establish a telemetry subscription""" + subscription_id = str(random.randint(10, 99)) + + m = match_network.search( + request.ietf_subscribed_notifications_input.ietf_yang_push_datastore_xpath_filter + ) + service_name = m.groups()[0] + # Store subscription details + subscriptions[subscription_id] = { + 'id': subscription_id, + 'datastore': request.ietf_subscribed_notifications_input.datastore, + 'xpath_filter': request.ietf_subscribed_notifications_input.ietf_yang_push_datastore_xpath_filter, + 'service': service_name, + 'period': request.ietf_subscribed_notifications_input.ietf_yang_push_periodic.get( + 'ietf-yang-push:period', '3' + ), + 'created_at': datetime.utcnow().isoformat(), + 'active': True, + } + + logger.info(f'Created subscription {subscription_id} for {SERVICE_NAME}') + + return EstablishSubscriptionResponse( + identifier=subscription_id, uri=f'/restconf/data/subscriptions/{subscription_id}' + ) + + +@app.get('/restconf/data/subscriptions/{subscription_id}') +async def start_telemetry_stream(subscription_id: str): + """Start SSE telemetry stream for a subscription""" + if subscription_id not in subscriptions: + raise HTTPException(status_code=404, detail='Subscription not found') + + subscription = subscriptions[subscription_id] + if not subscription['active']: + raise HTTPException(status_code=400, detail='Subscription is not active') + + active_streams[subscription_id] = True + + async def event_generator(): + """Generate SSE events""" + update_counter = 1 + period = float(subscription['period']) + + try: + while active_streams.get(subscription_id, False): + # Generate telemetry data + telemetry_data = generate_telemetry_data( + str(update_counter), subscription['service'] + ) + + # Format as SSE + sse_data = f'event: push-update\nid: {update_counter}\ndata: {json.dumps(telemetry_data)}\n\n' + + yield sse_data + + update_counter += 1 + await asyncio.sleep(period) + + # Send termination event + termination_event = ( + f'event: subscription-terminated\nid: {update_counter}\ndata: {{}}\n\n' + ) + yield termination_event + + except Exception as e: + logger.error(f'Error in event generator: {e}') + finally: + # Clean up + active_streams[subscription_id] = False + if subscription_id in subscriptions: + subscriptions[subscription_id]['active'] = False + + return StreamingResponse( + event_generator(), + media_type='text/event-stream', + headers={ + 'Cache-Control': 'no-cache', + 'Connection': 'keep-alive', + 'Access-Control-Allow-Origin': '*', + 'Access-Control-Allow-Headers': 'Cache-Control', + }, + ) + + +@app.post('/restconf/operations/subscriptions:delete-subscription') +async def delete_subscription(request: DeleteSubscriptionRequest): + """Delete a subscription""" + subscription_id = request.delete_subscription.identifier + + if subscription_id not in subscriptions: + raise HTTPException(status_code=404, detail='Subscription not found') + + # Stop active stream + active_streams[subscription_id] = False + + # Remove subscription + del subscriptions[subscription_id] + + logger.info(f'Deleted subscription {subscription_id} for {SERVICE_NAME}') + + return {'status': 'deleted'} + + +@app.post('/degrade/delay') +async def degrade_delay(request: MetricRequest): + """Degrade delay metrics""" + metrics_multipliers['delay'] *= 1.0 + request.factor + logger.info(f'Degraded delay by factor {request.factor} for {SERVICE_NAME}') + return {'status': 'degraded', 'new_multiplier': metrics_multipliers['delay']} + + +@app.post('/enhance/delay') +async def enhance_delay(request: MetricRequest): + """Enhance delay metrics""" + metrics_multipliers['delay'] *= 1.0 - request.factor + metrics_multipliers['delay'] = max(0.1, metrics_multipliers['delay']) # Prevent negative values + logger.info(f'Enhanced delay by factor {request.factor} for {SERVICE_NAME}') + return {'status': 'enhanced', 'new_multiplier': metrics_multipliers['delay']} + + +@app.post('/degrade/bandwidth') +async def degrade_bandwidth(request: MetricRequest): + """Degrade bandwidth metrics""" + metrics_multipliers['bandwidth'] *= 1.0 - request.factor + metrics_multipliers['bandwidth'] = max( + 0.1, metrics_multipliers['bandwidth'] + ) # Prevent negative values + logger.info(f'Degraded bandwidth by factor {request.factor} for {SERVICE_NAME}') + return {'status': 'degraded', 'new_multiplier': metrics_multipliers['bandwidth']} + + +@app.post('/enhance/bandwidth') +async def enhance_bandwidth(request: MetricRequest): + """Enhance bandwidth metrics""" + metrics_multipliers['bandwidth'] *= 1.0 + request.factor + logger.info(f'Enhanced bandwidth by factor {request.factor} for {SERVICE_NAME}') + return {'status': 'enhanced', 'new_multiplier': metrics_multipliers['bandwidth']} + + +@app.get('/subscriptions') +async def list_subscriptions(): + """List all active subscriptions""" + return {'subscriptions': subscriptions} + + +@app.get('/metrics/status') +async def get_metrics_status(): + """Get current metrics multipliers""" + return { + 'service': SERVICE_NAME, + 'base_bandwidth': BASE_BANDWIDTH, + 'base_delay': BASE_DELAY, + 'current_multipliers': metrics_multipliers, + 'effective_bandwidth': BASE_BANDWIDTH * metrics_multipliers['bandwidth'], + 'effective_delay': BASE_DELAY * metrics_multipliers['delay'], + } + + +if __name__ == '__main__': + import uvicorn + + uvicorn.run(app, host='0.0.0.0', port=PORT) + + + +''' +subcription: +curl -X curl -X POST http://localhost:8001/restconf/operations/subscriptions:establish-subscription -H "Content-Type: application/json" -d '{ + "ietf-subscribed-notifications:input": { + "datastore": "operational", + "ietf-yang-push:datastore-xpath-filter": "/ietf-network:networks/network=trans-simap-1/ietf-network-topology:link=link-1/simap-telemetry", + "ietf-yang-push:periodic": { + "ietf-yang-push:period": "3" + } + } + }' + + +start telemetry: +curl -X GET http://localhost:8001/restconf/data/subscriptions/34 + + +stop telemetry: +curl -X curl -X POST http://localhost:8001/restconf/operations/subscriptions:delete-subscription -H "Content-Type: application/json" -d '{ + "delete-subscription": { + "identifier": "34" + } + }' +''' diff --git a/src/tests/ecoc25-camara-e2e-telemetry/mocks/docker-compose.yml b/src/tests/ecoc25-camara-e2e-telemetry/mocks/docker-compose.yml new file mode 100644 index 000000000..c5cd0b8b2 --- /dev/null +++ b/src/tests/ecoc25-camara-e2e-telemetry/mocks/docker-compose.yml @@ -0,0 +1,58 @@ +version: '3.8' + +services: + nce: + build: . + ports: + - "8001:8000" + environment: + - SERVICE_NAME=nce + - BASE_BANDWIDTH=75.0 + - BASE_DELAY=2.0 + - PORT=8000 + volumes: + - ./app:/app + restart: unless-stopped + + aggnet-controller: + build: . + ports: + - "8002:8000" + environment: + - SERVICE_NAME=aggnet-controller + - BASE_BANDWIDTH=80.0 + - BASE_DELAY=1.5 + - PORT=8000 + volumes: + - ./app:/app + restart: unless-stopped + + ip-controller: + build: . + ports: + - "8003:8000" + environment: + - SERVICE_NAME=ip-controller + - BASE_BANDWIDTH=80.0 + - BASE_DELAY=1.5 + - PORT=8000 + volumes: + - ./app:/app + restart: unless-stopped + + optical-controller: + build: . + ports: + - "8004:8000" + environment: + - SERVICE_NAME=optical-controller + - BASE_BANDWIDTH=90.0 + - BASE_DELAY=1.0 + - PORT=8000 + volumes: + - ./app:/app + restart: unless-stopped + +networks: + default: + driver: bridge diff --git a/src/tests/ecoc25-camara-e2e-telemetry/mocks/requirements.txt b/src/tests/ecoc25-camara-e2e-telemetry/mocks/requirements.txt new file mode 100644 index 000000000..1cc77241c --- /dev/null +++ b/src/tests/ecoc25-camara-e2e-telemetry/mocks/requirements.txt @@ -0,0 +1,5 @@ +fastapi==0.104.1 +uvicorn[standard]==0.24.0 +pydantic==2.5.0 +python-multipart==0.0.6 +aiofiles==23.2.1 diff --git a/src/tests/ecoc25-camara-e2e-telemetry/report_onboarding.xml b/src/tests/ecoc25-camara-e2e-telemetry/report_onboarding.xml new file mode 100644 index 000000000..47645515b --- /dev/null +++ b/src/tests/ecoc25-camara-e2e-telemetry/report_onboarding.xml @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/src/tests/ecoc25-camara-e2e-telemetry/requirements.in b/src/tests/ecoc25-camara-e2e-telemetry/requirements.in new file mode 100644 index 000000000..1bdaec999 --- /dev/null +++ b/src/tests/ecoc25-camara-e2e-telemetry/requirements.in @@ -0,0 +1,30 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +deepdiff==6.7.* +requests==2.27.* + +coverage==6.3 +grpcio==1.47.* +grpcio-health-checking==1.47.* +grpcio-reflection==1.47.* +grpcio-tools==1.47.* +grpclib==0.4.4 +prettytable==3.5.0 +prometheus-client==0.13.0 +protobuf==3.20.* +pytest==6.2.5 +pytest-benchmark==3.4.1 +python-dateutil==2.8.2 +pytest-depends==1.0.1 diff --git a/src/tests/ecoc25-camara-e2e-telemetry/tests/Fixtures.py b/src/tests/ecoc25-camara-e2e-telemetry/tests/Fixtures.py new file mode 100644 index 000000000..5997e58c8 --- /dev/null +++ b/src/tests/ecoc25-camara-e2e-telemetry/tests/Fixtures.py @@ -0,0 +1,43 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from monitoring.client.MonitoringClient import MonitoringClient +from service.client.ServiceClient import ServiceClient + +@pytest.fixture(scope='session') +def context_client(): + _client = ContextClient() + yield _client + _client.close() + +@pytest.fixture(scope='session') +def device_client(): + _client = DeviceClient() + yield _client + _client.close() + +@pytest.fixture(scope='session') +def monitoring_client(): + _client = MonitoringClient() + yield _client + _client.close() + +@pytest.fixture(scope='session') +def service_client(): + _client = ServiceClient() + yield _client + _client.close() diff --git a/src/tests/ecoc25-camara-e2e-telemetry/tests/Tools.py b/src/tests/ecoc25-camara-e2e-telemetry/tests/Tools.py new file mode 100644 index 000000000..9ca1d7d21 --- /dev/null +++ b/src/tests/ecoc25-camara-e2e-telemetry/tests/Tools.py @@ -0,0 +1,109 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum, logging, requests +from typing import Any, Dict, List, Optional, Set, Union +from common.Constants import ServiceNameEnum +from common.Settings import get_service_host, get_service_port_http + +NBI_ADDRESS = get_service_host(ServiceNameEnum.NBI) +NBI_PORT = get_service_port_http(ServiceNameEnum.NBI) +NBI_USERNAME = 'admin' +NBI_PASSWORD = 'admin' +NBI_BASE_URL = '' + +class RestRequestMethod(enum.Enum): + GET = 'get' + POST = 'post' + PUT = 'put' + PATCH = 'patch' + DELETE = 'delete' + +EXPECTED_STATUS_CODES : Set[int] = { + requests.codes['OK' ], + requests.codes['CREATED' ], + requests.codes['ACCEPTED' ], + requests.codes['NO_CONTENT'], +} + +def do_rest_request( + method : RestRequestMethod, url : str, body : Optional[Any] = None, timeout : int = 10, + allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, + logger : Optional[logging.Logger] = None +) -> Optional[Union[Dict, List]]: + request_url = 'http://{:s}:{:s}@{:s}:{:d}{:s}{:s}'.format( + NBI_USERNAME, NBI_PASSWORD, NBI_ADDRESS, NBI_PORT, str(NBI_BASE_URL), url + ) + + if logger is not None: + msg = 'Request: {:s} {:s}'.format(str(method.value).upper(), str(request_url)) + if body is not None: msg += ' body={:s}'.format(str(body)) + logger.warning(msg) + reply = requests.request(method.value, request_url, headers={'Content-Type': 'application/json'}, timeout=timeout, json=body, allow_redirects=allow_redirects) + if logger is not None: + logger.warning('Reply: {:s}'.format(str(reply.text))) + assert reply.status_code in expected_status_codes, 'Reply failed with status code {:d}'.format(reply.status_code) + + if reply.content and len(reply.content) > 0: return reply.json() + return None + +def do_rest_get_request( + url : str, body : Optional[Any] = None, timeout : int = 10, + allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, + logger : Optional[logging.Logger] = None +) -> Optional[Union[Dict, List]]: + return do_rest_request( + RestRequestMethod.GET, url, body=body, timeout=timeout, allow_redirects=allow_redirects, + expected_status_codes=expected_status_codes, logger=logger + ) + +def do_rest_post_request( + url : str, body : Optional[Any] = None, timeout : int = 10, + allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, + logger : Optional[logging.Logger] = None +) -> Optional[Union[Dict, List]]: + return do_rest_request( + RestRequestMethod.POST, url, body=body, timeout=timeout, allow_redirects=allow_redirects, + expected_status_codes=expected_status_codes, logger=logger + ) + +def do_rest_put_request( + url : str, body : Optional[Any] = None, timeout : int = 10, + allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, + logger : Optional[logging.Logger] = None +) -> Optional[Union[Dict, List]]: + return do_rest_request( + RestRequestMethod.PUT, url, body=body, timeout=timeout, allow_redirects=allow_redirects, + expected_status_codes=expected_status_codes, logger=logger + ) + +def do_rest_patch_request( + url : str, body : Optional[Any] = None, timeout : int = 10, + allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, + logger : Optional[logging.Logger] = None +) -> Optional[Union[Dict, List]]: + return do_rest_request( + RestRequestMethod.PATCH, url, body=body, timeout=timeout, allow_redirects=allow_redirects, + expected_status_codes=expected_status_codes, logger=logger + ) + +def do_rest_delete_request( + url : str, body : Optional[Any] = None, timeout : int = 10, + allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, + logger : Optional[logging.Logger] = None +) -> Optional[Union[Dict, List]]: + return do_rest_request( + RestRequestMethod.DELETE, url, body=body, timeout=timeout, allow_redirects=allow_redirects, + expected_status_codes=expected_status_codes, logger=logger + ) diff --git a/src/tests/ecoc25-camara-e2e-telemetry/tests/__init__.py b/src/tests/ecoc25-camara-e2e-telemetry/tests/__init__.py new file mode 100644 index 000000000..3ccc21c7d --- /dev/null +++ b/src/tests/ecoc25-camara-e2e-telemetry/tests/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/tests/ecoc25-camara-e2e-telemetry/tests/test_e2e_ietf_slice_operations.py b/src/tests/ecoc25-camara-e2e-telemetry/tests/test_e2e_ietf_slice_operations.py new file mode 100644 index 000000000..cb991edbf --- /dev/null +++ b/src/tests/ecoc25-camara-e2e-telemetry/tests/test_e2e_ietf_slice_operations.py @@ -0,0 +1,478 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging, os +import requests +from deepdiff import DeepDiff + + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +HEADERS = {"Content-Type": "application/json"} + +POST_NETWORK_SLICE1 = os.path.join( + os.path.dirname(os.path.abspath(__file__)), + "..", + "data", + "slice", + "post_network_slice1.json", +) +POST_NETWORK_SLICE2 = os.path.join( + os.path.dirname(os.path.abspath(__file__)), + "..", + "data", + "slice", + "post_network_slice2.json", +) +POST_CONNECTION_GROUP_TO_NETWORK_SLICE1 = os.path.join( + os.path.dirname(os.path.abspath(__file__)), + "..", + "data", + "slice", + "post_connection_group_to_network_slice1.json", +) +POST_CONNECTION_GROUP_TO_NETWORK_SLICE2 = os.path.join( + os.path.dirname(os.path.abspath(__file__)), + "..", + "data", + "slice", + "post_connection_group_to_network_slice2.json", +) +POST_MATCH_CRITERIA_TO_SDP1_IN_SLICE1 = os.path.join( + os.path.dirname(os.path.abspath(__file__)), + "..", + "data", + "slice", + "post_match_criteria_to_sdp1_in_slice1.json", +) +POST_MATCH_CRITERIA_TO_SDP1_IN_SLICE2 = os.path.join( + os.path.dirname(os.path.abspath(__file__)), + "..", + "data", + "slice", + "post_match_criteria_to_sdp1_in_slice2.json", +) +POST_SDP_TO_NETWORK_SLICE1 = os.path.join( + os.path.dirname(os.path.abspath(__file__)), + "..", + "data", + "slice", + "post_sdp_to_network_slice1.json", +) +POST_SDP_TO_NETWORK_SLICE2 = os.path.join( + os.path.dirname(os.path.abspath(__file__)), + "..", + "data", + "slice", + "post_sdp_to_network_slice2.json", +) +TARGET_NCE_APP_FLOWS = os.path.join( + os.path.dirname(os.path.abspath(__file__)), + "..", + "data", + "target-nce-app-flows.json", +) +TARGET_NCE_APPS = os.path.join( + os.path.dirname(os.path.abspath(__file__)), + "..", + "data", + "target-nce-apps.json", +) +TARGET_FULL_IETF_SLICE = os.path.join( + os.path.dirname(os.path.abspath(__file__)), + "..", + "data", + "slice", + "target-full-ietf-slice.json", +) +TARGET_FULL_IETF_SLICE = os.path.join( + os.path.dirname(os.path.abspath(__file__)), + "..", + "data", + "target-full-ietf-slice.json", +) +TARGET_IETF_SLICE_POSTED_SLICES = os.path.join( + os.path.dirname(os.path.abspath(__file__)), + "..", + "data", + "target-ietf-slice-posted-slices.json", +) +TARGET_IETF_SLICE_PUT_CONNECTION_GROUPS = os.path.join( + os.path.dirname(os.path.abspath(__file__)), + "..", + "data", + "target-ietf-slice-put-connection-groups.json", +) + +NBI_ADDRESS = "localhost" +NBI_PORT = "80" +NBI_USERNAME = "admin" +NBI_PASSWORD = "admin" + +NCE_ADDRESS = "localhost" +NCE_PORT = 9090 + +AGG_TFS_ADDRESS = "localhost" +AGG_TFS_PORT = 9091 + +BASE_IETF_SLICE_URL = f"http://{NBI_ADDRESS}:{NBI_PORT}/restconf/data/ietf-network-slice-service:network-slice-services" +NCE_APP_DATA_URL = f"http://{NCE_ADDRESS}:{NCE_PORT}/restconf/v1/data/app-flows/apps" +NCE_APP_FLOW_DATA_URL = f"http://{NCE_ADDRESS}:{NCE_PORT}/restconf/v1/data/app-flows" +AGG_TFS_IETF_SLICE_URL = f"http://{AGG_TFS_ADDRESS}:{AGG_TFS_PORT}/restconf/data/ietf-network-slice-service:network-slice-services" + + +# pylint: disable=redefined-outer-name, unused-argument +def test_ietf_slice_creation_removal(): + # Issue service creation request + with open(POST_NETWORK_SLICE1, "r", encoding="UTF-8") as f: + post_network_slice1 = json.load(f) + with open(POST_NETWORK_SLICE2, "r", encoding="UTF-8") as f: + post_network_slice2 = json.load(f) + with open(POST_CONNECTION_GROUP_TO_NETWORK_SLICE1, "r", encoding="UTF-8") as f: + post_connection_group_to_network_slice1 = json.load(f) + with open(POST_CONNECTION_GROUP_TO_NETWORK_SLICE2, "r", encoding="UTF-8") as f: + post_connection_group_to_network_slice2 = json.load(f) + with open(POST_MATCH_CRITERIA_TO_SDP1_IN_SLICE1, "r", encoding="UTF-8") as f: + post_match_criteria_to_sdp1_in_slice1 = json.load(f) + with open(POST_MATCH_CRITERIA_TO_SDP1_IN_SLICE2, "r", encoding="UTF-8") as f: + post_match_criteria_to_sdp1_in_slice2 = json.load(f) + with open(POST_SDP_TO_NETWORK_SLICE1, "r", encoding="UTF-8") as f: + post_sdp_to_network_slice1 = json.load(f) + with open(POST_SDP_TO_NETWORK_SLICE2, "r", encoding="UTF-8") as f: + post_sdp_to_network_slice2 = json.load(f) + with open(TARGET_NCE_APPS, "r", encoding="UTF-8") as f: + target_nce_apps = json.load(f) + with open(TARGET_NCE_APP_FLOWS, "r", encoding="UTF-8") as f: + target_nce_app_flows = json.load(f) + with open(TARGET_FULL_IETF_SLICE, "r", encoding="UTF-8") as f: + target_full_ietf_slice = json.load(f) + with open(TARGET_IETF_SLICE_POSTED_SLICES, "r", encoding="UTF-8") as f: + target_ietf_slice_posted_slices = json.load(f) + with open(TARGET_IETF_SLICE_PUT_CONNECTION_GROUPS, "r", encoding="UTF-8") as f: + target_ietf_slice_put_connection_groups = json.load(f) + + # op 1 + URL = BASE_IETF_SLICE_URL + requests.post(URL, headers=HEADERS, json=post_network_slice1) + + URL = NCE_APP_DATA_URL + apps_response = requests.get(URL).json() + URL = NCE_APP_FLOW_DATA_URL + app_flows_response = requests.get(URL).json() + URL = AGG_TFS_IETF_SLICE_URL + ietf_slice_services = requests.get(URL).json() + URL = ( + AGG_TFS_IETF_SLICE_URL + + "/slice-service=dummy/connection-groups/connection-group=dummy" + ) + ietf_slice_connection_groups = requests.get(URL).json() + + app_name = "App_Flow_2_1_slice1" + apps_diff = DeepDiff(apps_response[app_name], target_nce_apps[app_name]) + app_flows_diff = DeepDiff( + app_flows_response[app_name], + target_nce_app_flows[app_name], + exclude_regex_paths=r"root\['app-flow'\]\[\d+\]\['user-id'\]", + ) + assert not apps_diff + assert not app_flows_diff + assert len(apps_response) == 1 and len(app_flows_response) == 1 + + assert len(ietf_slice_connection_groups) == 0 + assert len(ietf_slice_services) == 1 + slice_diff = DeepDiff( + ietf_slice_services["slice1"], target_ietf_slice_posted_slices[0] + ) + assert not slice_diff + + # op 2 + URL = BASE_IETF_SLICE_URL + "/slice-service=slice1/sdps" + requests.post(URL, headers=HEADERS, json=post_sdp_to_network_slice1) + URL = BASE_IETF_SLICE_URL + "/slice-service=slice1/connection-groups" + requests.post(URL, headers=HEADERS, json=post_connection_group_to_network_slice1) + URL = ( + BASE_IETF_SLICE_URL + "/slice-service=slice1/sdps/sdp=1/service-match-criteria" + ) + requests.post(URL, headers=HEADERS, json=post_match_criteria_to_sdp1_in_slice1) + + URL = NCE_APP_DATA_URL + apps_response = requests.get(URL).json() + URL = NCE_APP_FLOW_DATA_URL + app_flows_response = requests.get(URL).json() + URL = AGG_TFS_IETF_SLICE_URL + ietf_slice_services = requests.get(URL).json() + URL = ( + AGG_TFS_IETF_SLICE_URL + + "/slice-service=dummy/connection-groups/connection-group=dummy" + ) + ietf_slice_connection_groups = requests.get(URL).json() + + app_name = "App_Flow_3_1_slice1" + apps_diff = DeepDiff(apps_response[app_name], target_nce_apps[app_name]) + app_flows_diff = DeepDiff( + app_flows_response[app_name], + target_nce_app_flows[app_name], + exclude_regex_paths=r"root\['app-flow'\]\[\d+\]\['user-id'\]", + ) + assert not apps_diff + assert not app_flows_diff + assert len(apps_response) == 2 and len(app_flows_response) == 2 + + assert len(ietf_slice_connection_groups) == 1 + assert len(ietf_slice_services) == 1 + connection_group_diff = DeepDiff( + ietf_slice_connection_groups[0], target_ietf_slice_put_connection_groups[0] + ) + assert not connection_group_diff + + # op 3 + URL = BASE_IETF_SLICE_URL + requests.post(URL, headers=HEADERS, json=post_network_slice2) + + URL = NCE_APP_DATA_URL + apps_response = requests.get(URL).json() + URL = NCE_APP_FLOW_DATA_URL + app_flows_response = requests.get(URL).json() + URL = AGG_TFS_IETF_SLICE_URL + ietf_slice_services = requests.get(URL).json() + URL = ( + AGG_TFS_IETF_SLICE_URL + + "/slice-service=dummy/connection-groups/connection-group=dummy" + ) + ietf_slice_connection_groups = requests.get(URL).json() + + app_name = "App_Flow_2_1_slice2" + apps_diff = DeepDiff(apps_response[app_name], target_nce_apps[app_name]) + app_flows_diff = DeepDiff( + app_flows_response[app_name], + target_nce_app_flows[app_name], + exclude_regex_paths=r"root\['app-flow'\]\[\d+\]\['user-id'\]", + ) + assert not apps_diff + assert not app_flows_diff + assert len(apps_response) == 3 and len(app_flows_response) == 3 + + assert len(ietf_slice_connection_groups) == 1 + assert len(ietf_slice_services) == 2 + slice_diff = DeepDiff( + ietf_slice_services["slice2"], target_ietf_slice_posted_slices[1] + ) + assert not slice_diff + + # op 4 + URL = BASE_IETF_SLICE_URL + "/slice-service=slice2/sdps" + requests.post(URL, headers=HEADERS, json=post_sdp_to_network_slice2) + URL = BASE_IETF_SLICE_URL + "/slice-service=slice2/connection-groups" + requests.post(URL, headers=HEADERS, json=post_connection_group_to_network_slice2) + URL = ( + BASE_IETF_SLICE_URL + "/slice-service=slice2/sdps/sdp=1/service-match-criteria" + ) + requests.post(URL, headers=HEADERS, json=post_match_criteria_to_sdp1_in_slice2) + + URL = NCE_APP_DATA_URL + apps_response = requests.get(URL).json() + URL = NCE_APP_FLOW_DATA_URL + app_flows_response = requests.get(URL).json() + URL = AGG_TFS_IETF_SLICE_URL + ietf_slice_services = requests.get(URL).json() + URL = ( + AGG_TFS_IETF_SLICE_URL + + "/slice-service=dummy/connection-groups/connection-group=dummy" + ) + ietf_slice_connection_groups = requests.get(URL).json() + + app_name = "App_Flow_3_1_slice2" + apps_diff = DeepDiff(apps_response[app_name], target_nce_apps[app_name]) + app_flows_diff = DeepDiff( + app_flows_response[app_name], + target_nce_app_flows[app_name], + exclude_regex_paths=r"root\['app-flow'\]\[\d+\]\['user-id'\]", + ) + assert not apps_diff + assert not app_flows_diff + assert len(apps_response) == 4 and len(app_flows_response) == 4 + + assert len(ietf_slice_connection_groups) == 2 + assert len(ietf_slice_services) == 2 + connection_group_diff = DeepDiff( + ietf_slice_connection_groups[1], target_ietf_slice_put_connection_groups[1] + ) + assert not connection_group_diff + + # op 5 + ietf_slices_full_retrieved = requests.get(BASE_IETF_SLICE_URL).json() + ietf_slice_data = DeepDiff(ietf_slices_full_retrieved, target_full_ietf_slice) + assert not ietf_slice_data + + # op 6 + URL = BASE_IETF_SLICE_URL + "/slice-service=slice1/sdps/sdp=2" + requests.delete(URL) + URL = ( + BASE_IETF_SLICE_URL + + "/slice-service=slice1/sdps/sdp=1/service-match-criteria/match-criterion=1" + ) + requests.delete(URL) + URL = ( + BASE_IETF_SLICE_URL + + "/slice-service=slice1/connection-groups/connection-group=line1" + ) + requests.delete(URL) + + URL = NCE_APP_DATA_URL + apps_response = requests.get(URL).json() + URL = NCE_APP_FLOW_DATA_URL + app_flows_response = requests.get(URL).json() + URL = AGG_TFS_IETF_SLICE_URL + ietf_slice_services = requests.get(URL).json() + URL = ( + AGG_TFS_IETF_SLICE_URL + + "/slice-service=dummy/connection-groups/connection-group=dummy" + ) + ietf_slice_connection_groups = requests.get(URL).json() + + app_name = "App_Flow_2_1_slice1" + assert app_name not in apps_response + assert app_name not in app_flows_response + assert len(apps_response) == 3 and len(app_flows_response) == 3 + + assert len(ietf_slice_connection_groups) == 3 + assert len(ietf_slice_services) == 2 + connection_group_diff = DeepDiff( + ietf_slice_connection_groups[2], target_ietf_slice_put_connection_groups[2] + ) + assert not connection_group_diff + + # op 7 + URL = BASE_IETF_SLICE_URL + "/slice-service=slice1/sdps/sdp=3" + requests.delete(URL) + URL = ( + BASE_IETF_SLICE_URL + + "/slice-service=slice1/sdps/sdp=1/service-match-criteria/match-criterion=2" + ) + requests.delete(URL) + URL = ( + BASE_IETF_SLICE_URL + + "/slice-service=slice1/connection-groups/connection-group=line2" + ) + requests.delete(URL) + URL = BASE_IETF_SLICE_URL + "/slice-service=slice1/sdps/sdp=1" + + URL = NCE_APP_DATA_URL + apps_response = requests.get(URL).json() + URL = NCE_APP_FLOW_DATA_URL + app_flows_response = requests.get(URL).json() + URL = AGG_TFS_IETF_SLICE_URL + ietf_slice_services = requests.get(URL).json() + URL = ( + AGG_TFS_IETF_SLICE_URL + + "/slice-service=dummy/connection-groups/connection-group=dummy" + ) + ietf_slice_connection_groups = requests.get(URL).json() + + requests.delete(URL) + URL = BASE_IETF_SLICE_URL + "/slice-service=slice1" + requests.delete(URL) + + app_name = "App_Flow_3_1_slice1" + assert app_name not in apps_response + assert app_name not in app_flows_response + assert len(apps_response) == 2 and len(app_flows_response) == 2 + + assert len(ietf_slice_connection_groups) == 3 + assert len(ietf_slice_services) == 1 + assert "slice1" not in ietf_slice_services + + # op 8 + URL = BASE_IETF_SLICE_URL + "/slice-service=slice2/sdps/sdp=2" + requests.delete(URL) + URL = ( + BASE_IETF_SLICE_URL + + "/slice-service=slice2/sdps/sdp=1/service-match-criteria/match-criterion=1" + ) + requests.delete(URL) + URL = ( + BASE_IETF_SLICE_URL + + "/slice-service=slice2/connection-groups/connection-group=line1" + ) + requests.delete(URL) + + URL = NCE_APP_DATA_URL + apps_response = requests.get(URL).json() + URL = NCE_APP_FLOW_DATA_URL + app_flows_response = requests.get(URL).json() + URL = AGG_TFS_IETF_SLICE_URL + ietf_slice_services = requests.get(URL).json() + URL = ( + AGG_TFS_IETF_SLICE_URL + + "/slice-service=dummy/connection-groups/connection-group=dummy" + ) + ietf_slice_connection_groups = requests.get(URL).json() + + app_name = "App_Flow_2_1_slice2" + assert app_name not in apps_response + assert app_name not in app_flows_response + assert len(apps_response) == 1 and len(app_flows_response) == 1 + + assert len(ietf_slice_connection_groups) == 4 + assert len(ietf_slice_services) == 1 + connection_group_diff = DeepDiff( + ietf_slice_connection_groups[3], target_ietf_slice_put_connection_groups[3] + ) + assert not connection_group_diff + + # op 9 + URL = BASE_IETF_SLICE_URL + "/slice-service=slice2/sdps/sdp=3" + requests.delete(URL) + URL = ( + BASE_IETF_SLICE_URL + + "/slice-service=slice2/sdps/sdp=1/service-match-criteria/match-criterion=2" + ) + requests.delete(URL) + URL = ( + BASE_IETF_SLICE_URL + + "/slice-service=slice2/connection-groups/connection-group=line2" + ) + requests.delete(URL) + + URL = NCE_APP_DATA_URL + apps_response = requests.get(URL).json() + URL = NCE_APP_FLOW_DATA_URL + app_flows_response = requests.get(URL).json() + URL = AGG_TFS_IETF_SLICE_URL + ietf_slice_services = requests.get(URL).json() + URL = ( + AGG_TFS_IETF_SLICE_URL + + "/slice-service=dummy/connection-groups/connection-group=dummy" + ) + ietf_slice_connection_groups = requests.get(URL).json() + + URL = BASE_IETF_SLICE_URL + "/slice-service=slice2/sdps/sdp=1" + requests.delete(URL) + URL = BASE_IETF_SLICE_URL + "/slice-service=slice2" + requests.delete(URL) + + app_name = "App_Flow_3_1_slice2" + assert app_name not in apps_response + assert app_name not in app_flows_response + assert len(apps_response) == 0 and len(app_flows_response) == 0 + + assert len(ietf_slice_connection_groups) == 4 + assert len(ietf_slice_services) == 0 + + # op 10 + ietf_slices_full_retrieved = requests.get(BASE_IETF_SLICE_URL).json() + empty_ietf_slices = {"network-slice-services": {"slice-service": []}} + ietf_slice_data = DeepDiff(ietf_slices_full_retrieved, empty_ietf_slices) + assert not ietf_slice_data diff --git a/src/tests/ecoc25-camara-e2e-telemetry/tests/test_onboarding.py b/src/tests/ecoc25-camara-e2e-telemetry/tests/test_onboarding.py new file mode 100644 index 000000000..273d5d1f4 --- /dev/null +++ b/src/tests/ecoc25-camara-e2e-telemetry/tests/test_onboarding.py @@ -0,0 +1,67 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, os, time +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import ContextId, DeviceOperationalStatusEnum, Empty +from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results, validate_empty_scenario +from common.tools.object_factory.Context import json_context_id +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from .Fixtures import context_client, device_client # pylint: disable=unused-import + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'camara-e2e-topology-modified.json') +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) + +def test_scenario_onboarding( + context_client : ContextClient, # pylint: disable=redefined-outer-name + device_client : DeviceClient, # pylint: disable=redefined-outer-name +) -> None: + validate_empty_scenario(context_client) + + descriptor_loader = DescriptorLoader( + descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client) + results = descriptor_loader.process() + check_descriptor_load_results(results, descriptor_loader) + # descriptor_loader.validate() + + # Verify the scenario has no services/slices + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 0 + assert len(response.slice_ids) == 0 + +def test_scenario_devices_enabled( + context_client : ContextClient, # pylint: disable=redefined-outer-name +) -> None: + """ + This test validates that the devices are enabled. + """ + DEVICE_OP_STATUS_ENABLED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED + + num_devices = -1 + num_devices_enabled, num_retry = 0, 0 + while (num_devices != num_devices_enabled) and (num_retry < 10): + time.sleep(1.0) + response = context_client.ListDevices(Empty()) + num_devices = len(response.devices) + num_devices_enabled = 0 + for device in response.devices: + if device.device_operational_status != DEVICE_OP_STATUS_ENABLED: continue + num_devices_enabled += 1 + LOGGER.info('Num Devices enabled: {:d}/{:d}'.format(num_devices_enabled, num_devices)) + num_retry += 1 + assert num_devices_enabled == num_devices -- GitLab From 34af380fbe3e579dfb2b43108695e199f785a02d Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 24 Jul 2025 16:34:35 +0000 Subject: [PATCH 002/367] ECOC F5GA Telemetry Demo: - Added IP controller topology - Added IP controller deploy scripts and specs --- .../data/topology-ip.json | 129 +++++++++++ .../ecoc25-f5ga-telemetry/deploy-specs-ip.sh | 217 ++++++++++++++++++ .../ecoc25-f5ga-telemetry/redeploy-tfs-ip.sh | 17 ++ 3 files changed, 363 insertions(+) create mode 100644 src/tests/ecoc25-f5ga-telemetry/data/topology-ip.json create mode 100644 src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh create mode 100755 src/tests/ecoc25-f5ga-telemetry/redeploy-tfs-ip.sh diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology-ip.json b/src/tests/ecoc25-f5ga-telemetry/data/topology-ip.json new file mode 100644 index 000000000..bf2507ce8 --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology-ip.json @@ -0,0 +1,129 @@ +{ + "contexts": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}} + ], + "topologies": [ + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}} + ], + "devices": [ + { + "device_id": {"device_uuid": {"uuid": "172.16.125.25"}}, "name": "172.16.125.25", "device_type": "emu-packet-router", + "device_drivers": ["DEVICEDRIVER_UNKNOWN"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "200", "name": "200", "type": "ethernet"}, + {"uuid": "500", "name": "500", "type": "ethernet"}, + {"uuid": "501", "name": "501", "type": "ethernet"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "172.16.125.31"}}, "name": "172.16.125.31", "device_type": "emu-packet-router", + "device_drivers": ["DEVICEDRIVER_UNKNOWN"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "500", "name": "500", "type": "ethernet"}, + {"uuid": "501", "name": "501", "type": "ethernet"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "172.16.125.33"}}, "name": "172.16.125.33", "device_type": "emu-packet-router", + "device_drivers": ["DEVICEDRIVER_UNKNOWN"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "500", "name": "500", "type": "ethernet"}, + {"uuid": "501", "name": "501", "type": "ethernet"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "172.16.125.32"}}, "name": "172.16.125.32", "device_type": "emu-packet-router", + "device_drivers": ["DEVICEDRIVER_UNKNOWN"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "200", "name": "200", "type": "ethernet"}, + {"uuid": "500", "name": "500", "type": "ethernet"}, + {"uuid": "501", "name": "501", "type": "ethernet"} + ]}}} + ]} + } + ], + "links": [ + { + "link_id": {"link_uuid": {"uuid": "172.16.122.25-500"}}, "name": "172.16.122.25-500", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.122.25"}}, "endpoint_uuid": {"uuid": "500"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.122.31"}}, "endpoint_uuid": {"uuid": "500"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "172.16.122.31-500"}}, "name": "172.16.122.31-500", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.122.31"}}, "endpoint_uuid": {"uuid": "500"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.122.25"}}, "endpoint_uuid": {"uuid": "500"}} + ] + }, + + { + "link_id": {"link_uuid": {"uuid": "172.16.122.25-501"}}, "name": "172.16.122.25-501", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.122.25"}}, "endpoint_uuid": {"uuid": "501"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.122.33"}}, "endpoint_uuid": {"uuid": "500"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "172.16.122.33-500"}}, "name": "172.16.122.33-500", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.122.33"}}, "endpoint_uuid": {"uuid": "500"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.122.25"}}, "endpoint_uuid": {"uuid": "501"}} + ] + }, + + { + "link_id": {"link_uuid": {"uuid": "172.16.122.31-501"}}, "name": "172.16.122.31-501", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.122.31"}}, "endpoint_uuid": {"uuid": "501"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.122.32"}}, "endpoint_uuid": {"uuid": "500"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "172.16.122.32-500"}}, "name": "172.16.122.32-500", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.122.32"}}, "endpoint_uuid": {"uuid": "500"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.122.31"}}, "endpoint_uuid": {"uuid": "501"}} + ] + }, + + { + "link_id": {"link_uuid": {"uuid": "172.16.122.32-501"}}, "name": "172.16.122.32-501", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.122.32"}}, "endpoint_uuid": {"uuid": "501"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.122.33"}}, "endpoint_uuid": {"uuid": "501"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "172.16.122.33-501"}}, "name": "172.16.122.33-501", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.122.33"}}, "endpoint_uuid": {"uuid": "501"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.122.32"}}, "endpoint_uuid": {"uuid": "501"}} + ] + } + ] +} diff --git a/src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh b/src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh new file mode 100644 index 000000000..0820e21b7 --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh @@ -0,0 +1,217 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# ----- TeraFlowSDN ------------------------------------------------------------ + +# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" + +# Set the list of components, separated by spaces, you want to build images for, and deploy. +export TFS_COMPONENTS="context device pathcomp service slice nbi webui" + +# Uncomment to activate Monitoring (old) +#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring" + +# Uncomment to activate Monitoring Framework (new) +#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics automation" + +# Uncomment to activate QoS Profiles +#export TFS_COMPONENTS="${TFS_COMPONENTS} qos_profile" + +# Uncomment to activate BGP-LS Speaker +#export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker" + +# Uncomment to activate Optical Controller +# To manage optical connections, "service" requires "opticalcontroller" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "opticalcontroller" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} opticalcontroller service ${AFTER}" +#fi + +# Uncomment to activate ZTP +#export TFS_COMPONENTS="${TFS_COMPONENTS} ztp" + +# Uncomment to activate Policy Manager +#export TFS_COMPONENTS="${TFS_COMPONENTS} policy" + +# Uncomment to activate Optical CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager" + +# Uncomment to activate L3 CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector" + +# Uncomment to activate TE +#export TFS_COMPONENTS="${TFS_COMPONENTS} te" + +# Uncomment to activate Forecaster +#export TFS_COMPONENTS="${TFS_COMPONENTS} forecaster" + +# Uncomment to activate E2E Orchestrator +#export TFS_COMPONENTS="${TFS_COMPONENTS} e2e_orchestrator" + +# Uncomment to activate VNT Manager +#export TFS_COMPONENTS="${TFS_COMPONENTS} vnt_manager" + +# Uncomment to activate DLT and Interdomain +#export TFS_COMPONENTS="${TFS_COMPONENTS} interdomain dlt" +#if [[ "$TFS_COMPONENTS" == *"dlt"* ]]; then +# export KEY_DIRECTORY_PATH="src/dlt/gateway/keys/priv_sk" +# export CERT_DIRECTORY_PATH="src/dlt/gateway/keys/cert.pem" +# export TLS_CERT_PATH="src/dlt/gateway/keys/ca.crt" +#fi + +# Uncomment to activate QKD App +# To manage QKD Apps, "service" requires "qkd_app" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "qkd_app" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} qkd_app service ${AFTER}" +#fi + +# Uncomment to activate Load Generator +#export TFS_COMPONENTS="${TFS_COMPONENTS} load_generator" + + +# Set the tag you want to use for your images. +export TFS_IMAGE_TAG="dev" + +# Set the name of the Kubernetes namespace to deploy TFS to. +export TFS_K8S_NAMESPACE="tfs" + +# Set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" + +# Uncomment to monitor performance of components +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml" + +# Uncomment when deploying Optical CyberSecurity +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml" + +# Set the new Grafana admin password +export TFS_GRAFANA_PASSWORD="admin123+" + +# Disable skip-build flag to rebuild the Docker images. +export TFS_SKIP_BUILD="" + + +# ----- CockroachDB ------------------------------------------------------------ + +# Set the namespace where CockroackDB will be deployed. +export CRDB_NAMESPACE="crdb" + +# Set the external port CockroackDB Postgre SQL interface will be exposed to. +export CRDB_EXT_PORT_SQL="26257" + +# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to. +export CRDB_EXT_PORT_HTTP="8081" + +# Set the database username to be used by Context. +export CRDB_USERNAME="tfs" + +# Set the database user's password to be used by Context. +export CRDB_PASSWORD="tfs123" + +# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/crdb.sh for additional details +export CRDB_DEPLOY_MODE="single" + +# Disable flag for dropping database, if it exists. +export CRDB_DROP_DATABASE_IF_EXISTS="YES" + +# Disable flag for re-deploying CockroachDB from scratch. +export CRDB_REDEPLOY="" + + +# ----- NATS ------------------------------------------------------------------- + +# Set the namespace where NATS will be deployed. +export NATS_NAMESPACE="nats" + +# Set the external port NATS Client interface will be exposed to. +export NATS_EXT_PORT_CLIENT="4222" + +# Set the external port NATS HTTP Mgmt GUI interface will be exposed to. +export NATS_EXT_PORT_HTTP="8222" + +# Set NATS installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/nats.sh for additional details +export NATS_DEPLOY_MODE="single" + +# Disable flag for re-deploying NATS from scratch. +export NATS_REDEPLOY="" + + +# ----- Apache Kafka ----------------------------------------------------------- + +# Set the namespace where Apache Kafka will be deployed. +export KFK_NAMESPACE="kafka" + +# Set the port Apache Kafka server will be exposed to. +export KFK_EXT_PORT_CLIENT="9092" + +# Set Kafka installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/kafka.sh for additional details +export KFK_DEPLOY_MODE="single" + +# Disable flag for re-deploying Kafka from scratch. +export KFK_REDEPLOY="" + + +# ----- QuestDB ---------------------------------------------------------------- + +# Set the namespace where QuestDB will be deployed. +export QDB_NAMESPACE="qdb" + +# Set the external port QuestDB Postgre SQL interface will be exposed to. +export QDB_EXT_PORT_SQL="8812" + +# Set the external port QuestDB Influx Line Protocol interface will be exposed to. +export QDB_EXT_PORT_ILP="9009" + +# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to. +export QDB_EXT_PORT_HTTP="9000" + +# Set the database username to be used for QuestDB. +export QDB_USERNAME="admin" + +# Set the database user's password to be used for QuestDB. +export QDB_PASSWORD="quest" + +# Set the table name to be used by Monitoring for KPIs. +export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis" + +# Set the table name to be used by Slice for plotting groups. +export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups" + +# Disable flag for dropping tables if they exist. +export QDB_DROP_TABLES_IF_EXIST="" + +# Disable flag for re-deploying QuestDB from scratch. +export QDB_REDEPLOY="" + + +# ----- K8s Observability ------------------------------------------------------ + +# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to. +export PROM_EXT_PORT_HTTP="9090" + +# Set the external port Grafana HTTP Dashboards will be exposed to. +export GRAF_EXT_PORT_HTTP="3000" diff --git a/src/tests/ecoc25-f5ga-telemetry/redeploy-tfs-ip.sh b/src/tests/ecoc25-f5ga-telemetry/redeploy-tfs-ip.sh new file mode 100755 index 000000000..32edf24d1 --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/redeploy-tfs-ip.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source ~/tfs-ctrl/src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh +./deploy/all.sh -- GitLab From 6b750f017d49a45f0fbd74252368a4fc0ed588d4 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 24 Jul 2025 16:35:25 +0000 Subject: [PATCH 003/367] Disabled CI/CD tests --- .gitlab-ci.yml | 78 +++++++++++++++++++++++++------------------------- 1 file changed, 39 insertions(+), 39 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index ca970101f..9e0f7e485 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -21,42 +21,42 @@ stages: # include the individual .gitlab-ci.yml of each micro-service and tests include: - #- local: '/manifests/.gitlab-ci.yml' - - local: '/src/monitoring/.gitlab-ci.yml' - - local: '/src/nbi/.gitlab-ci.yml' - - local: '/src/context/.gitlab-ci.yml' - - local: '/src/device/.gitlab-ci.yml' - - local: '/src/service/.gitlab-ci.yml' - - local: '/src/qkd_app/.gitlab-ci.yml' - - local: '/src/dbscanserving/.gitlab-ci.yml' - - local: '/src/opticalattackmitigator/.gitlab-ci.yml' - - local: '/src/opticalattackdetector/.gitlab-ci.yml' - - local: '/src/opticalattackmanager/.gitlab-ci.yml' - - local: '/src/opticalcontroller/.gitlab-ci.yml' - - local: '/src/ztp/.gitlab-ci.yml' - - local: '/src/policy/.gitlab-ci.yml' - - local: '/src/automation/.gitlab-ci.yml' - - local: '/src/forecaster/.gitlab-ci.yml' - #- local: '/src/webui/.gitlab-ci.yml' - #- local: '/src/l3_distributedattackdetector/.gitlab-ci.yml' - #- local: '/src/l3_centralizedattackdetector/.gitlab-ci.yml' - #- local: '/src/l3_attackmitigator/.gitlab-ci.yml' - - local: '/src/slice/.gitlab-ci.yml' - #- local: '/src/interdomain/.gitlab-ci.yml' - - local: '/src/pathcomp/.gitlab-ci.yml' - #- local: '/src/dlt/.gitlab-ci.yml' - - local: '/src/load_generator/.gitlab-ci.yml' - - local: '/src/bgpls_speaker/.gitlab-ci.yml' - - local: '/src/kpi_manager/.gitlab-ci.yml' - - local: '/src/kpi_value_api/.gitlab-ci.yml' - #- local: '/src/kpi_value_writer/.gitlab-ci.yml' - #- local: '/src/telemetry/.gitlab-ci.yml' - - local: '/src/analytics/.gitlab-ci.yml' - - local: '/src/qos_profile/.gitlab-ci.yml' - - local: '/src/vnt_manager/.gitlab-ci.yml' - - local: '/src/e2e_orchestrator/.gitlab-ci.yml' - - local: '/src/ztp_server/.gitlab-ci.yml' - - local: '/src/osm_client/.gitlab-ci.yml' - - # This should be last one: end-to-end integration tests - - local: '/src/tests/.gitlab-ci.yml' +# #- local: '/manifests/.gitlab-ci.yml' +# - local: '/src/monitoring/.gitlab-ci.yml' +# - local: '/src/nbi/.gitlab-ci.yml' +# - local: '/src/context/.gitlab-ci.yml' +# - local: '/src/device/.gitlab-ci.yml' +# - local: '/src/service/.gitlab-ci.yml' +# - local: '/src/qkd_app/.gitlab-ci.yml' +# - local: '/src/dbscanserving/.gitlab-ci.yml' +# - local: '/src/opticalattackmitigator/.gitlab-ci.yml' +# - local: '/src/opticalattackdetector/.gitlab-ci.yml' +# - local: '/src/opticalattackmanager/.gitlab-ci.yml' +# - local: '/src/opticalcontroller/.gitlab-ci.yml' +# - local: '/src/ztp/.gitlab-ci.yml' +# - local: '/src/policy/.gitlab-ci.yml' +# - local: '/src/automation/.gitlab-ci.yml' +# - local: '/src/forecaster/.gitlab-ci.yml' +# #- local: '/src/webui/.gitlab-ci.yml' +# #- local: '/src/l3_distributedattackdetector/.gitlab-ci.yml' +# #- local: '/src/l3_centralizedattackdetector/.gitlab-ci.yml' +# #- local: '/src/l3_attackmitigator/.gitlab-ci.yml' +# - local: '/src/slice/.gitlab-ci.yml' +# #- local: '/src/interdomain/.gitlab-ci.yml' +# - local: '/src/pathcomp/.gitlab-ci.yml' +# #- local: '/src/dlt/.gitlab-ci.yml' +# - local: '/src/load_generator/.gitlab-ci.yml' +# - local: '/src/bgpls_speaker/.gitlab-ci.yml' +# - local: '/src/kpi_manager/.gitlab-ci.yml' +# - local: '/src/kpi_value_api/.gitlab-ci.yml' +# #- local: '/src/kpi_value_writer/.gitlab-ci.yml' +# #- local: '/src/telemetry/.gitlab-ci.yml' +# - local: '/src/analytics/.gitlab-ci.yml' +# - local: '/src/qos_profile/.gitlab-ci.yml' +# - local: '/src/vnt_manager/.gitlab-ci.yml' +# - local: '/src/e2e_orchestrator/.gitlab-ci.yml' +# - local: '/src/ztp_server/.gitlab-ci.yml' +# - local: '/src/osm_client/.gitlab-ci.yml' +# +# # This should be last one: end-to-end integration tests +# - local: '/src/tests/.gitlab-ci.yml' -- GitLab From de93ac7a8bcd95e383759d980616213d39c42a00 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 24 Jul 2025 17:19:34 +0000 Subject: [PATCH 004/367] ECOC F5GA Telemetry Demo: - Updated IP controller topology descriptor --- .../data/topology-ip.json | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology-ip.json b/src/tests/ecoc25-f5ga-telemetry/data/topology-ip.json index bf2507ce8..28758674d 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/topology-ip.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology-ip.json @@ -13,9 +13,9 @@ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ - {"uuid": "200", "name": "200", "type": "ethernet"}, - {"uuid": "500", "name": "500", "type": "ethernet"}, - {"uuid": "501", "name": "501", "type": "ethernet"} + {"uuid": "200", "name": "200", "type": "copper"}, + {"uuid": "500", "name": "500", "type": "copper"}, + {"uuid": "501", "name": "501", "type": "copper"} ]}}} ]} }, @@ -26,8 +26,8 @@ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ - {"uuid": "500", "name": "500", "type": "ethernet"}, - {"uuid": "501", "name": "501", "type": "ethernet"} + {"uuid": "500", "name": "500", "type": "copper"}, + {"uuid": "501", "name": "501", "type": "copper"} ]}}} ]} }, @@ -38,8 +38,8 @@ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ - {"uuid": "500", "name": "500", "type": "ethernet"}, - {"uuid": "501", "name": "501", "type": "ethernet"} + {"uuid": "500", "name": "500", "type": "copper"}, + {"uuid": "501", "name": "501", "type": "copper"} ]}}} ]} }, @@ -50,9 +50,9 @@ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ - {"uuid": "200", "name": "200", "type": "ethernet"}, - {"uuid": "500", "name": "500", "type": "ethernet"}, - {"uuid": "501", "name": "501", "type": "ethernet"} + {"uuid": "200", "name": "200", "type": "copper"}, + {"uuid": "500", "name": "500", "type": "copper"}, + {"uuid": "501", "name": "501", "type": "copper"} ]}}} ]} } -- GitLab From 334ac734401d3ac84d736820cea63d96aef9dee9 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 24 Jul 2025 17:20:28 +0000 Subject: [PATCH 005/367] ECOC F5GA Telemetry Demo: - Added Agg controller topology descriptor - Added Agg controller deploy and specs scripts - Added E2E controller deploy and specs scripts --- .../data/topology-agg.json | 94 ++++++++ .../ecoc25-f5ga-telemetry/deploy-specs-agg.sh | 217 ++++++++++++++++++ .../ecoc25-f5ga-telemetry/deploy-specs-e2e.sh | 217 ++++++++++++++++++ .../ecoc25-f5ga-telemetry/redeploy-tfs-agg.sh | 17 ++ .../ecoc25-f5ga-telemetry/redeploy-tfs-e2e.sh | 17 ++ 5 files changed, 562 insertions(+) create mode 100644 src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json create mode 100644 src/tests/ecoc25-f5ga-telemetry/deploy-specs-agg.sh create mode 100644 src/tests/ecoc25-f5ga-telemetry/deploy-specs-e2e.sh create mode 100755 src/tests/ecoc25-f5ga-telemetry/redeploy-tfs-agg.sh create mode 100755 src/tests/ecoc25-f5ga-telemetry/redeploy-tfs-e2e.sh diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json b/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json new file mode 100644 index 000000000..d48c200c9 --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json @@ -0,0 +1,94 @@ +{ + "contexts": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}} + ], + "topologies": [ + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}} + ], + "devices": [ + {"device_id": {"device_uuid": {"uuid": "TFS-IP"}}, "name": "TFS-IP", "device_type": "ip-sdn-controller", + "device_drivers": ["DEVICEDRIVER_IETF_L3VPN"], "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.6.208"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "80" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { + "scheme": "http", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", + "timeout": 120, "verify": false + }}} + ]}}, + {"device_id": {"device_uuid": {"uuid": "NCE-T"}}, "name": "NCE-T", "device_type": "ip-sdn-controller", + "device_drivers": ["DEVICEDRIVER_IETF_ACTN"], "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.6.221"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8443" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { + "scheme": "https", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", + "timeout": 120, "verify": false + }}} + ]}}, + {"device_id": {"device_uuid": {"uuid": "172.16.58.10"}}, "name": "172.16.58.10", "device_type": "emu-packet-router", + "device_drivers": ["DEVICEDRIVER_UNKNOWN"], "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "lo", "name": "lo", "type": "loopback"}, + {"uuid": "500", "name": "500", "type": "copper"}, + {"uuid": "501", "name": "501", "type": "copper"} + ]}}} + ]}}, + {"device_id": {"device_uuid": {"uuid": "172.16.204.22x"}}, "device_type": "emu-datacenter", + "device_drivers": ["DEVICEDRIVER_UNKNOWN"], "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "lo", "name": "lo", "type": "loopback"}, + {"uuid": "500a", "name": "500a", "type": "copper"}, + {"uuid": "500b", "name": "500b", "type": "copper"} + ]}}} + ]} + } + ], + "links": [ + {"link_id": {"link_uuid": {"uuid": "172.16.58.10-501"}}, "name": "172.16.58.10-501", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.58.10"}}, "endpoint_uuid": {"uuid": "501"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.125.25"}}, "endpoint_uuid": {"uuid": "200"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "172.16.125.25-200"}}, "name": "172.16.125.25-200", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.125.25"}}, "endpoint_uuid": {"uuid": "200"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.58.10"}}, "endpoint_uuid": {"uuid": "501"}} + ]}, + + {"link_id": {"link_uuid": {"uuid": "172.16.58.10-500"}}, "name": "172.16.58.10-500", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.58.10"}}, "endpoint_uuid": {"uuid": "500"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.182.25"}}, "endpoint_uuid": {"uuid": "200"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "172.16.182.25-200"}}, "name": "172.16.182.25-200", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.182.25"}}, "endpoint_uuid": {"uuid": "200"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.58.10"}}, "endpoint_uuid": {"uuid": "500"}} + ]}, + + {"link_id": {"link_uuid": {"uuid": "172.16.125.32-200"}}, "name": "172.16.125.32-200", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.125.32"}}, "endpoint_uuid": {"uuid": "200"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.204.22x"}}, "endpoint_uuid": {"uuid": "500a"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "172.16.204.22x-500a"}}, "name": "172.16.204.22x-500a", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.204.22x"}}, "endpoint_uuid": {"uuid": "500a"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.125.32"}}, "endpoint_uuid": {"uuid": "200"}} + ]}, + + {"link_id": {"link_uuid": {"uuid": "172.16.204.22x-500b"}}, "name": "172.16.204.22x-500b", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.204.22x"}}, "endpoint_uuid": {"uuid": "500b"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.185.32"}}, "endpoint_uuid": {"uuid": "200"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "172.16.185.32-200"}}, "name": "172.16.185.32-200", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.185.32"}}, "endpoint_uuid": {"uuid": "200"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.204.22x"}}, "endpoint_uuid": {"uuid": "500b"}} + ]} + ] +} diff --git a/src/tests/ecoc25-f5ga-telemetry/deploy-specs-agg.sh b/src/tests/ecoc25-f5ga-telemetry/deploy-specs-agg.sh new file mode 100644 index 000000000..0820e21b7 --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/deploy-specs-agg.sh @@ -0,0 +1,217 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# ----- TeraFlowSDN ------------------------------------------------------------ + +# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" + +# Set the list of components, separated by spaces, you want to build images for, and deploy. +export TFS_COMPONENTS="context device pathcomp service slice nbi webui" + +# Uncomment to activate Monitoring (old) +#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring" + +# Uncomment to activate Monitoring Framework (new) +#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics automation" + +# Uncomment to activate QoS Profiles +#export TFS_COMPONENTS="${TFS_COMPONENTS} qos_profile" + +# Uncomment to activate BGP-LS Speaker +#export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker" + +# Uncomment to activate Optical Controller +# To manage optical connections, "service" requires "opticalcontroller" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "opticalcontroller" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} opticalcontroller service ${AFTER}" +#fi + +# Uncomment to activate ZTP +#export TFS_COMPONENTS="${TFS_COMPONENTS} ztp" + +# Uncomment to activate Policy Manager +#export TFS_COMPONENTS="${TFS_COMPONENTS} policy" + +# Uncomment to activate Optical CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager" + +# Uncomment to activate L3 CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector" + +# Uncomment to activate TE +#export TFS_COMPONENTS="${TFS_COMPONENTS} te" + +# Uncomment to activate Forecaster +#export TFS_COMPONENTS="${TFS_COMPONENTS} forecaster" + +# Uncomment to activate E2E Orchestrator +#export TFS_COMPONENTS="${TFS_COMPONENTS} e2e_orchestrator" + +# Uncomment to activate VNT Manager +#export TFS_COMPONENTS="${TFS_COMPONENTS} vnt_manager" + +# Uncomment to activate DLT and Interdomain +#export TFS_COMPONENTS="${TFS_COMPONENTS} interdomain dlt" +#if [[ "$TFS_COMPONENTS" == *"dlt"* ]]; then +# export KEY_DIRECTORY_PATH="src/dlt/gateway/keys/priv_sk" +# export CERT_DIRECTORY_PATH="src/dlt/gateway/keys/cert.pem" +# export TLS_CERT_PATH="src/dlt/gateway/keys/ca.crt" +#fi + +# Uncomment to activate QKD App +# To manage QKD Apps, "service" requires "qkd_app" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "qkd_app" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} qkd_app service ${AFTER}" +#fi + +# Uncomment to activate Load Generator +#export TFS_COMPONENTS="${TFS_COMPONENTS} load_generator" + + +# Set the tag you want to use for your images. +export TFS_IMAGE_TAG="dev" + +# Set the name of the Kubernetes namespace to deploy TFS to. +export TFS_K8S_NAMESPACE="tfs" + +# Set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" + +# Uncomment to monitor performance of components +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml" + +# Uncomment when deploying Optical CyberSecurity +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml" + +# Set the new Grafana admin password +export TFS_GRAFANA_PASSWORD="admin123+" + +# Disable skip-build flag to rebuild the Docker images. +export TFS_SKIP_BUILD="" + + +# ----- CockroachDB ------------------------------------------------------------ + +# Set the namespace where CockroackDB will be deployed. +export CRDB_NAMESPACE="crdb" + +# Set the external port CockroackDB Postgre SQL interface will be exposed to. +export CRDB_EXT_PORT_SQL="26257" + +# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to. +export CRDB_EXT_PORT_HTTP="8081" + +# Set the database username to be used by Context. +export CRDB_USERNAME="tfs" + +# Set the database user's password to be used by Context. +export CRDB_PASSWORD="tfs123" + +# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/crdb.sh for additional details +export CRDB_DEPLOY_MODE="single" + +# Disable flag for dropping database, if it exists. +export CRDB_DROP_DATABASE_IF_EXISTS="YES" + +# Disable flag for re-deploying CockroachDB from scratch. +export CRDB_REDEPLOY="" + + +# ----- NATS ------------------------------------------------------------------- + +# Set the namespace where NATS will be deployed. +export NATS_NAMESPACE="nats" + +# Set the external port NATS Client interface will be exposed to. +export NATS_EXT_PORT_CLIENT="4222" + +# Set the external port NATS HTTP Mgmt GUI interface will be exposed to. +export NATS_EXT_PORT_HTTP="8222" + +# Set NATS installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/nats.sh for additional details +export NATS_DEPLOY_MODE="single" + +# Disable flag for re-deploying NATS from scratch. +export NATS_REDEPLOY="" + + +# ----- Apache Kafka ----------------------------------------------------------- + +# Set the namespace where Apache Kafka will be deployed. +export KFK_NAMESPACE="kafka" + +# Set the port Apache Kafka server will be exposed to. +export KFK_EXT_PORT_CLIENT="9092" + +# Set Kafka installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/kafka.sh for additional details +export KFK_DEPLOY_MODE="single" + +# Disable flag for re-deploying Kafka from scratch. +export KFK_REDEPLOY="" + + +# ----- QuestDB ---------------------------------------------------------------- + +# Set the namespace where QuestDB will be deployed. +export QDB_NAMESPACE="qdb" + +# Set the external port QuestDB Postgre SQL interface will be exposed to. +export QDB_EXT_PORT_SQL="8812" + +# Set the external port QuestDB Influx Line Protocol interface will be exposed to. +export QDB_EXT_PORT_ILP="9009" + +# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to. +export QDB_EXT_PORT_HTTP="9000" + +# Set the database username to be used for QuestDB. +export QDB_USERNAME="admin" + +# Set the database user's password to be used for QuestDB. +export QDB_PASSWORD="quest" + +# Set the table name to be used by Monitoring for KPIs. +export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis" + +# Set the table name to be used by Slice for plotting groups. +export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups" + +# Disable flag for dropping tables if they exist. +export QDB_DROP_TABLES_IF_EXIST="" + +# Disable flag for re-deploying QuestDB from scratch. +export QDB_REDEPLOY="" + + +# ----- K8s Observability ------------------------------------------------------ + +# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to. +export PROM_EXT_PORT_HTTP="9090" + +# Set the external port Grafana HTTP Dashboards will be exposed to. +export GRAF_EXT_PORT_HTTP="3000" diff --git a/src/tests/ecoc25-f5ga-telemetry/deploy-specs-e2e.sh b/src/tests/ecoc25-f5ga-telemetry/deploy-specs-e2e.sh new file mode 100644 index 000000000..0820e21b7 --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/deploy-specs-e2e.sh @@ -0,0 +1,217 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# ----- TeraFlowSDN ------------------------------------------------------------ + +# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" + +# Set the list of components, separated by spaces, you want to build images for, and deploy. +export TFS_COMPONENTS="context device pathcomp service slice nbi webui" + +# Uncomment to activate Monitoring (old) +#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring" + +# Uncomment to activate Monitoring Framework (new) +#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics automation" + +# Uncomment to activate QoS Profiles +#export TFS_COMPONENTS="${TFS_COMPONENTS} qos_profile" + +# Uncomment to activate BGP-LS Speaker +#export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker" + +# Uncomment to activate Optical Controller +# To manage optical connections, "service" requires "opticalcontroller" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "opticalcontroller" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} opticalcontroller service ${AFTER}" +#fi + +# Uncomment to activate ZTP +#export TFS_COMPONENTS="${TFS_COMPONENTS} ztp" + +# Uncomment to activate Policy Manager +#export TFS_COMPONENTS="${TFS_COMPONENTS} policy" + +# Uncomment to activate Optical CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager" + +# Uncomment to activate L3 CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector" + +# Uncomment to activate TE +#export TFS_COMPONENTS="${TFS_COMPONENTS} te" + +# Uncomment to activate Forecaster +#export TFS_COMPONENTS="${TFS_COMPONENTS} forecaster" + +# Uncomment to activate E2E Orchestrator +#export TFS_COMPONENTS="${TFS_COMPONENTS} e2e_orchestrator" + +# Uncomment to activate VNT Manager +#export TFS_COMPONENTS="${TFS_COMPONENTS} vnt_manager" + +# Uncomment to activate DLT and Interdomain +#export TFS_COMPONENTS="${TFS_COMPONENTS} interdomain dlt" +#if [[ "$TFS_COMPONENTS" == *"dlt"* ]]; then +# export KEY_DIRECTORY_PATH="src/dlt/gateway/keys/priv_sk" +# export CERT_DIRECTORY_PATH="src/dlt/gateway/keys/cert.pem" +# export TLS_CERT_PATH="src/dlt/gateway/keys/ca.crt" +#fi + +# Uncomment to activate QKD App +# To manage QKD Apps, "service" requires "qkd_app" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "qkd_app" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} qkd_app service ${AFTER}" +#fi + +# Uncomment to activate Load Generator +#export TFS_COMPONENTS="${TFS_COMPONENTS} load_generator" + + +# Set the tag you want to use for your images. +export TFS_IMAGE_TAG="dev" + +# Set the name of the Kubernetes namespace to deploy TFS to. +export TFS_K8S_NAMESPACE="tfs" + +# Set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" + +# Uncomment to monitor performance of components +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml" + +# Uncomment when deploying Optical CyberSecurity +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml" + +# Set the new Grafana admin password +export TFS_GRAFANA_PASSWORD="admin123+" + +# Disable skip-build flag to rebuild the Docker images. +export TFS_SKIP_BUILD="" + + +# ----- CockroachDB ------------------------------------------------------------ + +# Set the namespace where CockroackDB will be deployed. +export CRDB_NAMESPACE="crdb" + +# Set the external port CockroackDB Postgre SQL interface will be exposed to. +export CRDB_EXT_PORT_SQL="26257" + +# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to. +export CRDB_EXT_PORT_HTTP="8081" + +# Set the database username to be used by Context. +export CRDB_USERNAME="tfs" + +# Set the database user's password to be used by Context. +export CRDB_PASSWORD="tfs123" + +# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/crdb.sh for additional details +export CRDB_DEPLOY_MODE="single" + +# Disable flag for dropping database, if it exists. +export CRDB_DROP_DATABASE_IF_EXISTS="YES" + +# Disable flag for re-deploying CockroachDB from scratch. +export CRDB_REDEPLOY="" + + +# ----- NATS ------------------------------------------------------------------- + +# Set the namespace where NATS will be deployed. +export NATS_NAMESPACE="nats" + +# Set the external port NATS Client interface will be exposed to. +export NATS_EXT_PORT_CLIENT="4222" + +# Set the external port NATS HTTP Mgmt GUI interface will be exposed to. +export NATS_EXT_PORT_HTTP="8222" + +# Set NATS installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/nats.sh for additional details +export NATS_DEPLOY_MODE="single" + +# Disable flag for re-deploying NATS from scratch. +export NATS_REDEPLOY="" + + +# ----- Apache Kafka ----------------------------------------------------------- + +# Set the namespace where Apache Kafka will be deployed. +export KFK_NAMESPACE="kafka" + +# Set the port Apache Kafka server will be exposed to. +export KFK_EXT_PORT_CLIENT="9092" + +# Set Kafka installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/kafka.sh for additional details +export KFK_DEPLOY_MODE="single" + +# Disable flag for re-deploying Kafka from scratch. +export KFK_REDEPLOY="" + + +# ----- QuestDB ---------------------------------------------------------------- + +# Set the namespace where QuestDB will be deployed. +export QDB_NAMESPACE="qdb" + +# Set the external port QuestDB Postgre SQL interface will be exposed to. +export QDB_EXT_PORT_SQL="8812" + +# Set the external port QuestDB Influx Line Protocol interface will be exposed to. +export QDB_EXT_PORT_ILP="9009" + +# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to. +export QDB_EXT_PORT_HTTP="9000" + +# Set the database username to be used for QuestDB. +export QDB_USERNAME="admin" + +# Set the database user's password to be used for QuestDB. +export QDB_PASSWORD="quest" + +# Set the table name to be used by Monitoring for KPIs. +export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis" + +# Set the table name to be used by Slice for plotting groups. +export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups" + +# Disable flag for dropping tables if they exist. +export QDB_DROP_TABLES_IF_EXIST="" + +# Disable flag for re-deploying QuestDB from scratch. +export QDB_REDEPLOY="" + + +# ----- K8s Observability ------------------------------------------------------ + +# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to. +export PROM_EXT_PORT_HTTP="9090" + +# Set the external port Grafana HTTP Dashboards will be exposed to. +export GRAF_EXT_PORT_HTTP="3000" diff --git a/src/tests/ecoc25-f5ga-telemetry/redeploy-tfs-agg.sh b/src/tests/ecoc25-f5ga-telemetry/redeploy-tfs-agg.sh new file mode 100755 index 000000000..c407b45ac --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/redeploy-tfs-agg.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source ~/tfs-ctrl/src/tests/ecoc25-f5ga-telemetry/deploy-specs-agg.sh +./deploy/all.sh diff --git a/src/tests/ecoc25-f5ga-telemetry/redeploy-tfs-e2e.sh b/src/tests/ecoc25-f5ga-telemetry/redeploy-tfs-e2e.sh new file mode 100755 index 000000000..256bc4232 --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/redeploy-tfs-e2e.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source ~/tfs-ctrl/src/tests/ecoc25-f5ga-telemetry/deploy-specs-e2e.sh +./deploy/all.sh -- GitLab From 3a3ea35905487cbd2b11af236150f7cbbd2c65a0 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 25 Jul 2025 10:24:09 +0000 Subject: [PATCH 006/367] ECOC F5GA Telemetry Demo: - Generalized deploy scripts to select deploy specs by hostname --- .../ecoc25-f5ga-telemetry/redeploy-tfs-e2e.sh | 17 ----------------- .../ecoc25-f5ga-telemetry/redeploy-tfs-ip.sh | 17 ----------------- .../{redeploy-tfs-agg.sh => redeploy-tfs.sh} | 6 +++++- 3 files changed, 5 insertions(+), 35 deletions(-) delete mode 100755 src/tests/ecoc25-f5ga-telemetry/redeploy-tfs-e2e.sh delete mode 100755 src/tests/ecoc25-f5ga-telemetry/redeploy-tfs-ip.sh rename src/tests/ecoc25-f5ga-telemetry/{redeploy-tfs-agg.sh => redeploy-tfs.sh} (73%) diff --git a/src/tests/ecoc25-f5ga-telemetry/redeploy-tfs-e2e.sh b/src/tests/ecoc25-f5ga-telemetry/redeploy-tfs-e2e.sh deleted file mode 100755 index 256bc4232..000000000 --- a/src/tests/ecoc25-f5ga-telemetry/redeploy-tfs-e2e.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -source ~/tfs-ctrl/src/tests/ecoc25-f5ga-telemetry/deploy-specs-e2e.sh -./deploy/all.sh diff --git a/src/tests/ecoc25-f5ga-telemetry/redeploy-tfs-ip.sh b/src/tests/ecoc25-f5ga-telemetry/redeploy-tfs-ip.sh deleted file mode 100755 index 32edf24d1..000000000 --- a/src/tests/ecoc25-f5ga-telemetry/redeploy-tfs-ip.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -source ~/tfs-ctrl/src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh -./deploy/all.sh diff --git a/src/tests/ecoc25-f5ga-telemetry/redeploy-tfs-agg.sh b/src/tests/ecoc25-f5ga-telemetry/redeploy-tfs.sh similarity index 73% rename from src/tests/ecoc25-f5ga-telemetry/redeploy-tfs-agg.sh rename to src/tests/ecoc25-f5ga-telemetry/redeploy-tfs.sh index c407b45ac..d0dd29957 100755 --- a/src/tests/ecoc25-f5ga-telemetry/redeploy-tfs-agg.sh +++ b/src/tests/ecoc25-f5ga-telemetry/redeploy-tfs.sh @@ -13,5 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -source ~/tfs-ctrl/src/tests/ecoc25-f5ga-telemetry/deploy-specs-agg.sh +# assuming the instances are named as e2e-sdn-ctrl, agg-sdn-ctrl, and ip-sdn-ctrl +CTRL_NAME=$(hostname | cut -d'-' -f1) +echo "Deploying: ${CTRL_NAME}" + +source ~/tfs-ctrl/src/tests/ecoc25-f5ga-telemetry/deploy-specs-${CTRL_NAME}.sh ./deploy/all.sh -- GitLab From bb31519024c965f2f4771876bc4f2ad1f2f6f754 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 25 Jul 2025 11:21:05 +0000 Subject: [PATCH 007/367] ECOC F5GA Telemetry Demo: - Updated topology descriptors --- .../data/topology-agg.json | 2 +- .../data/topology-e2e.json | 118 ++++++++++++++++++ 2 files changed, 119 insertions(+), 1 deletion(-) create mode 100644 src/tests/ecoc25-f5ga-telemetry/data/topology-e2e.json diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json b/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json index d48c200c9..6076408f9 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json @@ -6,7 +6,7 @@ {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}} ], "devices": [ - {"device_id": {"device_uuid": {"uuid": "TFS-IP"}}, "name": "TFS-IP", "device_type": "ip-sdn-controller", + {"device_id": {"device_uuid": {"uuid": "TFS-IP"}}, "name": "TFS-IP", "device_type": "teraflowsdn", "device_drivers": ["DEVICEDRIVER_IETF_L3VPN"], "device_config": {"config_rules": [ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.6.208"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "80" }}, diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology-e2e.json b/src/tests/ecoc25-f5ga-telemetry/data/topology-e2e.json new file mode 100644 index 000000000..9f3eeee18 --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology-e2e.json @@ -0,0 +1,118 @@ +{ + "contexts": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}} + ], + "topologies": [ + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}} + ], + "devices": [ + {"device_id": {"device_uuid": {"uuid": "TFS-AGG"}}, "name": "TFS-AGG", "device_type": "teraflowsdn", + "device_drivers": ["DEVICEDRIVER_IETF_L3VPN"], "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.6.221"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "80" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { + "scheme": "http", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", + "timeout": 120, "verify": false + }}} + ]}}, + {"device_id": {"device_uuid": {"uuid": "NCE-FAN"}}, "name": "NCE-FAN", "device_type": "ip-sdn-controller", + "device_drivers": ["DEVICEDRIVER_IETF_ACTN"], "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.6.221"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8443" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { + "scheme": "https", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", + "timeout": 120, "verify": false + }}} + ]}}, + + {"device_id": {"device_uuid": {"uuid": "172.16.204.220"}}, "device_type": "emu-datacenter", + "device_drivers": ["DEVICEDRIVER_UNKNOWN"], "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "lo", "name": "lo", "type": "loopback"}, + {"uuid": "500a", "name": "500a", "type": "copper"}, + {"uuid": "500b", "name": "500b", "type": "copper"} + ]}}} + ]} + }, + {"device_id": {"device_uuid": {"uuid": "172.16.204.220"}}, "device_type": "emu-datacenter", + "device_drivers": ["DEVICEDRIVER_UNKNOWN"], "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "lo", "name": "lo", "type": "loopback"}, + {"uuid": "500a", "name": "500a", "type": "copper"}, + {"uuid": "500b", "name": "500b", "type": "copper"} + ]}}} + ]} + }, + {"device_id": {"device_uuid": {"uuid": "172.16.204.220"}}, "device_type": "emu-datacenter", + "device_drivers": ["DEVICEDRIVER_UNKNOWN"], "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "lo", "name": "lo", "type": "loopback"}, + {"uuid": "500a", "name": "500a", "type": "copper"}, + {"uuid": "500b", "name": "500b", "type": "copper"} + ]}}} + ]} + }, + {"device_id": {"device_uuid": {"uuid": "172.16.204.220"}}, "device_type": "emu-datacenter", + "device_drivers": ["DEVICEDRIVER_UNKNOWN"], "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "lo", "name": "lo", "type": "loopback"}, + {"uuid": "500a", "name": "500a", "type": "copper"}, + {"uuid": "500b", "name": "500b", "type": "copper"} + ]}}} + ]} + } + ], + "links": [ + {"link_id": {"link_uuid": {"uuid": "172.16.58.10-501"}}, "name": "172.16.58.10-501", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.58.10"}}, "endpoint_uuid": {"uuid": "501"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.125.25"}}, "endpoint_uuid": {"uuid": "200"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "172.16.125.25-200"}}, "name": "172.16.125.25-200", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.125.25"}}, "endpoint_uuid": {"uuid": "200"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.58.10"}}, "endpoint_uuid": {"uuid": "501"}} + ]}, + + {"link_id": {"link_uuid": {"uuid": "172.16.58.10-500"}}, "name": "172.16.58.10-500", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.58.10"}}, "endpoint_uuid": {"uuid": "500"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.182.25"}}, "endpoint_uuid": {"uuid": "200"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "172.16.182.25-200"}}, "name": "172.16.182.25-200", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.182.25"}}, "endpoint_uuid": {"uuid": "200"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.58.10"}}, "endpoint_uuid": {"uuid": "500"}} + ]}, + + {"link_id": {"link_uuid": {"uuid": "172.16.125.32-200"}}, "name": "172.16.125.32-200", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.125.32"}}, "endpoint_uuid": {"uuid": "200"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.204.22x"}}, "endpoint_uuid": {"uuid": "500a"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "172.16.204.22x-500a"}}, "name": "172.16.204.22x-500a", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.204.22x"}}, "endpoint_uuid": {"uuid": "500a"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.125.32"}}, "endpoint_uuid": {"uuid": "200"}} + ]}, + + {"link_id": {"link_uuid": {"uuid": "172.16.204.22x-500b"}}, "name": "172.16.204.22x-500b", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.204.22x"}}, "endpoint_uuid": {"uuid": "500b"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.185.32"}}, "endpoint_uuid": {"uuid": "200"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "172.16.185.32-200"}}, "name": "172.16.185.32-200", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.185.32"}}, "endpoint_uuid": {"uuid": "200"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.204.22x"}}, "endpoint_uuid": {"uuid": "500b"}} + ]} + ] +} -- GitLab From 619fe75fd595e4c5994e802a01c4c10b170c99c5 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 28 Jul 2025 14:20:28 +0000 Subject: [PATCH 008/367] Deploy scripts: - Added build-only.sh script - Temporarily adapted deployment to static local repository --- deploy/build-only.sh | 143 +++++++++++++++++++++++++ manifests/cockroachdb/single-node.yaml | 2 +- manifests/kafka/single-node.yaml | 2 +- manifests/questdb/manifest.yaml | 2 +- src/context/Dockerfile | 2 +- src/device/Dockerfile | 2 +- src/nbi/Dockerfile | 2 +- src/pathcomp/backend/Dockerfile | 4 +- src/pathcomp/frontend/Dockerfile | 2 +- src/service/Dockerfile | 2 +- src/webui/Dockerfile | 2 +- 11 files changed, 154 insertions(+), 11 deletions(-) create mode 100755 deploy/build-only.sh diff --git a/deploy/build-only.sh b/deploy/build-only.sh new file mode 100755 index 000000000..5db9e6e5d --- /dev/null +++ b/deploy/build-only.sh @@ -0,0 +1,143 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +######################################################################################################################## +# Read deployment settings +######################################################################################################################## + + +# ----- TeraFlowSDN ------------------------------------------------------------ + +# If not already set, set the URL of the Docker registry where the images will be uploaded to. +# By default, assume internal MicroK8s registry is used. +export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:32000/tfs/"} + +# If not already set, set the list of components, separated by spaces, you want to build images for, and deploy. +# By default, only basic components are deployed +export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device pathcomp service slice nbi webui load_generator"} + +# If not already set, set the tag you want to use for your images. +export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"dev"} + + +######################################################################################################################## +# Automated steps start here +######################################################################################################################## + +# Create a tmp folder for files modified during the deployment +TMP_LOGS_FOLDER="./tmp/build" +mkdir -p $TMP_LOGS_FOLDER + +DOCKER_BUILD="docker build" +DOCKER_MAJOR_VERSION=$(docker --version | grep -o -E "Docker version [0-9]+\." | grep -o -E "[0-9]+" | cut -c 1-3) +if [[ $DOCKER_MAJOR_VERSION -ge 23 ]]; then + # If Docker version >= 23, build command was migrated to docker-buildx + # In Ubuntu, in practice, means to install package docker-buildx together with docker.io + # Check if docker-buildx plugin is installed + docker buildx version 1>/dev/null 2>/dev/null + if [[ $? -ne 0 ]]; then + echo "Docker buildx command is not installed. Check: https://docs.docker.com/build/architecture/#install-buildx" + echo "If you installed docker through APT package docker.io, consider installing also package docker-buildx" + exit 1; + fi + DOCKER_BUILD="docker buildx build" +fi + +for COMPONENT in $TFS_COMPONENTS; do + echo "Processing '$COMPONENT' component..." + + echo " Building Docker image..." + BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log" + + if [ "$COMPONENT" == "ztp" ] || [ "$COMPONENT" == "policy" ]; then + $DOCKER_BUILD -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG" + elif [ "$COMPONENT" == "pathcomp" ] || [ "$COMPONENT" == "telemetry" ] || [ "$COMPONENT" == "analytics" ]; then + BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-frontend.log" + $DOCKER_BUILD -t "$COMPONENT-frontend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/frontend/Dockerfile . > "$BUILD_LOG" + + BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-backend.log" + $DOCKER_BUILD -t "$COMPONENT-backend:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/backend/Dockerfile . > "$BUILD_LOG" + if [ "$COMPONENT" == "pathcomp" ]; then + # next command is redundant, but helpful to keep cache updated between rebuilds + IMAGE_NAME="$COMPONENT-backend:$TFS_IMAGE_TAG-builder" + $DOCKER_BUILD -t "$IMAGE_NAME" --target builder -f ./src/"$COMPONENT"/backend/Dockerfile . >> "$BUILD_LOG" + fi + elif [ "$COMPONENT" == "dlt" ]; then + BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-connector.log" + $DOCKER_BUILD -t "$COMPONENT-connector:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/connector/Dockerfile . > "$BUILD_LOG" + + BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}-gateway.log" + $DOCKER_BUILD -t "$COMPONENT-gateway:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/gateway/Dockerfile . > "$BUILD_LOG" + else + $DOCKER_BUILD -t "$COMPONENT:$TFS_IMAGE_TAG" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG" + fi + + echo " Pushing Docker image to '$TFS_REGISTRY_IMAGES'..." + + if [ "$COMPONENT" == "pathcomp" ] || [ "$COMPONENT" == "telemetry" ] || [ "$COMPONENT" == "analytics" ] ; then + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-frontend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-frontend.log" + docker tag "$COMPONENT-frontend:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" + + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-frontend.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" + + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-backend:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-backend.log" + docker tag "$COMPONENT-backend:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" + + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-backend.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" + elif [ "$COMPONENT" == "dlt" ]; then + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-connector:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-connector.log" + docker tag "$COMPONENT-connector:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" + + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-connector.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" + + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT-gateway:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}-gateway.log" + docker tag "$COMPONENT-gateway:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" + + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}-gateway.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" + else + IMAGE_URL=$(echo "$TFS_REGISTRY_IMAGES/$COMPONENT:$TFS_IMAGE_TAG" | sed 's,//,/,g' | sed 's,http:/,,g') + + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}.log" + docker tag "$COMPONENT:$TFS_IMAGE_TAG" "$IMAGE_URL" > "$TAG_LOG" + + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" + fi + + printf "\n" +done + +echo "Pruning Docker Images..." +docker image prune --force +printf "\n\n" + +if [ "$DOCKER_BUILD" == "docker buildx build" ]; then + echo "Pruning Docker Buildx Cache..." + docker buildx prune --force + printf "\n\n" +fi diff --git a/manifests/cockroachdb/single-node.yaml b/manifests/cockroachdb/single-node.yaml index ed297d77c..2bc3d22d1 100644 --- a/manifests/cockroachdb/single-node.yaml +++ b/manifests/cockroachdb/single-node.yaml @@ -60,7 +60,7 @@ spec: restartPolicy: Always containers: - name: cockroachdb - image: cockroachdb/cockroach:latest-v22.2 + image: http://10.254.6.194:5000/cockroachdb/cockroach:latest-v22.2 imagePullPolicy: IfNotPresent args: - start-single-node diff --git a/manifests/kafka/single-node.yaml b/manifests/kafka/single-node.yaml index 4c435c11b..7ac626907 100644 --- a/manifests/kafka/single-node.yaml +++ b/manifests/kafka/single-node.yaml @@ -60,7 +60,7 @@ spec: restartPolicy: Always containers: - name: kafka - image: bitnami/kafka:latest + image: http://10.254.6.194:5000/bitnami/kafka:latest imagePullPolicy: IfNotPresent ports: - name: clients diff --git a/manifests/questdb/manifest.yaml b/manifests/questdb/manifest.yaml index 268e53ff9..0454b1311 100644 --- a/manifests/questdb/manifest.yaml +++ b/manifests/questdb/manifest.yaml @@ -31,7 +31,7 @@ spec: restartPolicy: Always containers: - name: metricsdb - image: questdb/questdb + image: http://10.254.6.194:5000/questdb/questdb:latest ports: - containerPort: 9000 - containerPort: 9009 diff --git a/src/context/Dockerfile b/src/context/Dockerfile index a4bf84153..5eea21e5e 100644 --- a/src/context/Dockerfile +++ b/src/context/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM python:3.9-slim +FROM 10.254.6.194:5000/python:3.9-slim # Install dependencies RUN apt-get --yes --quiet --quiet update && \ diff --git a/src/device/Dockerfile b/src/device/Dockerfile index d85419128..f9d87e30e 100644 --- a/src/device/Dockerfile +++ b/src/device/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM python:3.9-slim +FROM 10.254.6.194:5000/python:3.9-slim # Install dependencies RUN apt-get --yes --quiet --quiet update && \ diff --git a/src/nbi/Dockerfile b/src/nbi/Dockerfile index 63556432b..7dcd5e4b5 100644 --- a/src/nbi/Dockerfile +++ b/src/nbi/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM python:3.9-slim +FROM 10.254.6.194:5000/python:3.9-slim # Install dependencies RUN apt-get --yes --quiet --quiet update && \ diff --git a/src/pathcomp/backend/Dockerfile b/src/pathcomp/backend/Dockerfile index 2c447397d..7350fbe77 100644 --- a/src/pathcomp/backend/Dockerfile +++ b/src/pathcomp/backend/Dockerfile @@ -15,7 +15,7 @@ # Multi-stage Docker image build # Stage 1 -FROM ubuntu:20.04 AS builder +FROM 10.254.6.194:5000/ubuntu:20.04 AS builder ARG DEBIAN_FRONTEND=noninteractive # Install build software @@ -53,7 +53,7 @@ ENTRYPOINT [ "./pathComp-cvr", "config/pathcomp.conf", "screen_only" ] # Stage 2 -FROM ubuntu:20.04 AS release +FROM 10.254.6.194:5000/ubuntu:20.04 AS release ARG DEBIAN_FRONTEND=noninteractive # Install build software diff --git a/src/pathcomp/frontend/Dockerfile b/src/pathcomp/frontend/Dockerfile index 2f6d5a3bd..a9350eb34 100644 --- a/src/pathcomp/frontend/Dockerfile +++ b/src/pathcomp/frontend/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM python:3.9-slim +FROM 10.254.6.194:5000/python:3.9-slim # Install dependencies RUN apt-get --yes --quiet --quiet update && \ diff --git a/src/service/Dockerfile b/src/service/Dockerfile index 49efe9829..5bd28078b 100644 --- a/src/service/Dockerfile +++ b/src/service/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM python:3.9-slim +FROM 10.254.6.194:5000/python:3.9-slim # Install dependencies RUN apt-get --yes --quiet --quiet update && \ diff --git a/src/webui/Dockerfile b/src/webui/Dockerfile index 167280d68..07aec0eb1 100644 --- a/src/webui/Dockerfile +++ b/src/webui/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM python:3.9-slim +FROM 10.254.6.194:5000/python:3.9-slim # Ref: https://pythonspeed.com/articles/activate-virtualenv-dockerfile/ -- GitLab From 0b8a9a4107fbbc6bc4f3afcc1e6f666364d8308f Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 28 Jul 2025 14:31:39 +0000 Subject: [PATCH 009/367] ECOC F5GA Telemetry Demo: - Added build-only script --- .../ecoc25-f5ga-telemetry/rebuild-tfs.sh | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 src/tests/ecoc25-f5ga-telemetry/rebuild-tfs.sh diff --git a/src/tests/ecoc25-f5ga-telemetry/rebuild-tfs.sh b/src/tests/ecoc25-f5ga-telemetry/rebuild-tfs.sh new file mode 100644 index 000000000..e16b836b8 --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/rebuild-tfs.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:5000/tfs/"} +export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device pathcomp service nbi webui"} +export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"f5ga"} + +./deploy/build-only.sh -- GitLab From cf93738a7ea99ab2428136bfe81ba008b9d07664 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 28 Jul 2025 14:33:59 +0000 Subject: [PATCH 010/367] ECOC F5GA Telemetry Demo: - Made build-only script executable --- src/tests/ecoc25-f5ga-telemetry/rebuild-tfs.sh | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 src/tests/ecoc25-f5ga-telemetry/rebuild-tfs.sh diff --git a/src/tests/ecoc25-f5ga-telemetry/rebuild-tfs.sh b/src/tests/ecoc25-f5ga-telemetry/rebuild-tfs.sh old mode 100644 new mode 100755 -- GitLab From 34d9bb6b2a56bcb4cff675012b34664a3cb63532 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 28 Jul 2025 14:39:54 +0000 Subject: [PATCH 011/367] ECOC F5GA Telemetry Demo: - Corrected docker image names --- manifests/cockroachdb/single-node.yaml | 2 +- manifests/kafka/single-node.yaml | 2 +- manifests/questdb/manifest.yaml | 2 +- src/context/Dockerfile | 2 +- src/device/Dockerfile | 2 +- src/nbi/Dockerfile | 2 +- src/pathcomp/backend/Dockerfile | 4 ++-- src/pathcomp/frontend/Dockerfile | 2 +- src/service/Dockerfile | 2 +- src/webui/Dockerfile | 2 +- 10 files changed, 11 insertions(+), 11 deletions(-) diff --git a/manifests/cockroachdb/single-node.yaml b/manifests/cockroachdb/single-node.yaml index 2bc3d22d1..f6534dc3e 100644 --- a/manifests/cockroachdb/single-node.yaml +++ b/manifests/cockroachdb/single-node.yaml @@ -60,7 +60,7 @@ spec: restartPolicy: Always containers: - name: cockroachdb - image: http://10.254.6.194:5000/cockroachdb/cockroach:latest-v22.2 + image: localhost:5000/cockroachdb/cockroach:latest-v22.2 imagePullPolicy: IfNotPresent args: - start-single-node diff --git a/manifests/kafka/single-node.yaml b/manifests/kafka/single-node.yaml index 7ac626907..14e12f85d 100644 --- a/manifests/kafka/single-node.yaml +++ b/manifests/kafka/single-node.yaml @@ -60,7 +60,7 @@ spec: restartPolicy: Always containers: - name: kafka - image: http://10.254.6.194:5000/bitnami/kafka:latest + image: localhost:5000/bitnami/kafka:latest imagePullPolicy: IfNotPresent ports: - name: clients diff --git a/manifests/questdb/manifest.yaml b/manifests/questdb/manifest.yaml index 0454b1311..b40814b81 100644 --- a/manifests/questdb/manifest.yaml +++ b/manifests/questdb/manifest.yaml @@ -31,7 +31,7 @@ spec: restartPolicy: Always containers: - name: metricsdb - image: http://10.254.6.194:5000/questdb/questdb:latest + image: localhost:5000/questdb/questdb:latest ports: - containerPort: 9000 - containerPort: 9009 diff --git a/src/context/Dockerfile b/src/context/Dockerfile index 5eea21e5e..92b208af9 100644 --- a/src/context/Dockerfile +++ b/src/context/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM 10.254.6.194:5000/python:3.9-slim +FROM localhost:5000/python:3.9-slim # Install dependencies RUN apt-get --yes --quiet --quiet update && \ diff --git a/src/device/Dockerfile b/src/device/Dockerfile index f9d87e30e..8dfc9a4ad 100644 --- a/src/device/Dockerfile +++ b/src/device/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM 10.254.6.194:5000/python:3.9-slim +FROM localhost:5000/python:3.9-slim # Install dependencies RUN apt-get --yes --quiet --quiet update && \ diff --git a/src/nbi/Dockerfile b/src/nbi/Dockerfile index 7dcd5e4b5..3609034d3 100644 --- a/src/nbi/Dockerfile +++ b/src/nbi/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM 10.254.6.194:5000/python:3.9-slim +FROM localhost:5000/python:3.9-slim # Install dependencies RUN apt-get --yes --quiet --quiet update && \ diff --git a/src/pathcomp/backend/Dockerfile b/src/pathcomp/backend/Dockerfile index 7350fbe77..468b0d852 100644 --- a/src/pathcomp/backend/Dockerfile +++ b/src/pathcomp/backend/Dockerfile @@ -15,7 +15,7 @@ # Multi-stage Docker image build # Stage 1 -FROM 10.254.6.194:5000/ubuntu:20.04 AS builder +FROM localhost:5000/ubuntu:20.04 AS builder ARG DEBIAN_FRONTEND=noninteractive # Install build software @@ -53,7 +53,7 @@ ENTRYPOINT [ "./pathComp-cvr", "config/pathcomp.conf", "screen_only" ] # Stage 2 -FROM 10.254.6.194:5000/ubuntu:20.04 AS release +FROM localhost:5000/ubuntu:20.04 AS release ARG DEBIAN_FRONTEND=noninteractive # Install build software diff --git a/src/pathcomp/frontend/Dockerfile b/src/pathcomp/frontend/Dockerfile index a9350eb34..2647b14e9 100644 --- a/src/pathcomp/frontend/Dockerfile +++ b/src/pathcomp/frontend/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM 10.254.6.194:5000/python:3.9-slim +FROM localhost:5000/python:3.9-slim # Install dependencies RUN apt-get --yes --quiet --quiet update && \ diff --git a/src/service/Dockerfile b/src/service/Dockerfile index 5bd28078b..9b6254465 100644 --- a/src/service/Dockerfile +++ b/src/service/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM 10.254.6.194:5000/python:3.9-slim +FROM localhost:5000/python:3.9-slim # Install dependencies RUN apt-get --yes --quiet --quiet update && \ diff --git a/src/webui/Dockerfile b/src/webui/Dockerfile index 07aec0eb1..1635b8fd0 100644 --- a/src/webui/Dockerfile +++ b/src/webui/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM 10.254.6.194:5000/python:3.9-slim +FROM localhost:5000/python:3.9-slim # Ref: https://pythonspeed.com/articles/activate-virtualenv-dockerfile/ -- GitLab From fac27205cb8915c8fa49e5fe39b7ec2c6026f4bf Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 28 Jul 2025 15:49:20 +0000 Subject: [PATCH 012/367] ECOC F5GA Telemetry Demo: - Corrected docker image repositories - Corrected --- manifests/cockroachdb/single-node.yaml | 2 +- manifests/kafka/single-node.yaml | 2 +- manifests/nats/cluster.yaml | 2 ++ manifests/questdb/manifest.yaml | 2 +- src/tests/ecoc25-f5ga-telemetry/deploy-specs-agg.sh | 6 +++--- src/tests/ecoc25-f5ga-telemetry/deploy-specs-e2e.sh | 6 +++--- src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh | 6 +++--- src/tests/ecoc25-f5ga-telemetry/rebuild-tfs.sh | 2 +- 8 files changed, 15 insertions(+), 13 deletions(-) diff --git a/manifests/cockroachdb/single-node.yaml b/manifests/cockroachdb/single-node.yaml index f6534dc3e..49b12c7f2 100644 --- a/manifests/cockroachdb/single-node.yaml +++ b/manifests/cockroachdb/single-node.yaml @@ -60,7 +60,7 @@ spec: restartPolicy: Always containers: - name: cockroachdb - image: localhost:5000/cockroachdb/cockroach:latest-v22.2 + image: 10.254.6.194:5000/cockroachdb/cockroach:latest-v22.2 imagePullPolicy: IfNotPresent args: - start-single-node diff --git a/manifests/kafka/single-node.yaml b/manifests/kafka/single-node.yaml index 14e12f85d..ee7a7f6d3 100644 --- a/manifests/kafka/single-node.yaml +++ b/manifests/kafka/single-node.yaml @@ -60,7 +60,7 @@ spec: restartPolicy: Always containers: - name: kafka - image: localhost:5000/bitnami/kafka:latest + image: 10.254.6.194:5000/bitnami/kafka:latest imagePullPolicy: IfNotPresent ports: - name: clients diff --git a/manifests/nats/cluster.yaml b/manifests/nats/cluster.yaml index 40ce28fd7..d2951ba58 100644 --- a/manifests/nats/cluster.yaml +++ b/manifests/nats/cluster.yaml @@ -14,6 +14,8 @@ container: image: + registry: 10.254.6.194:5000 + repository: nats tags: 2.9-alpine env: # different from k8s units, suffix must be B, KiB, MiB, GiB, or TiB diff --git a/manifests/questdb/manifest.yaml b/manifests/questdb/manifest.yaml index b40814b81..8a9e0d99b 100644 --- a/manifests/questdb/manifest.yaml +++ b/manifests/questdb/manifest.yaml @@ -31,7 +31,7 @@ spec: restartPolicy: Always containers: - name: metricsdb - image: localhost:5000/questdb/questdb:latest + image: 10.254.6.194:5000/questdb/questdb:latest ports: - containerPort: 9000 - containerPort: 9009 diff --git a/src/tests/ecoc25-f5ga-telemetry/deploy-specs-agg.sh b/src/tests/ecoc25-f5ga-telemetry/deploy-specs-agg.sh index 0820e21b7..c691ce745 100644 --- a/src/tests/ecoc25-f5ga-telemetry/deploy-specs-agg.sh +++ b/src/tests/ecoc25-f5ga-telemetry/deploy-specs-agg.sh @@ -17,7 +17,7 @@ # ----- TeraFlowSDN ------------------------------------------------------------ # Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to. -export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" +export TFS_REGISTRY_IMAGES="http://10.254.6.194:5000/tfs/" # Set the list of components, separated by spaces, you want to build images for, and deploy. export TFS_COMPONENTS="context device pathcomp service slice nbi webui" @@ -91,7 +91,7 @@ export TFS_COMPONENTS="context device pathcomp service slice nbi webui" # Set the tag you want to use for your images. -export TFS_IMAGE_TAG="dev" +export TFS_IMAGE_TAG="f5ga" # Set the name of the Kubernetes namespace to deploy TFS to. export TFS_K8S_NAMESPACE="tfs" @@ -109,7 +109,7 @@ export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" export TFS_GRAFANA_PASSWORD="admin123+" # Disable skip-build flag to rebuild the Docker images. -export TFS_SKIP_BUILD="" +export TFS_SKIP_BUILD="YES" # ----- CockroachDB ------------------------------------------------------------ diff --git a/src/tests/ecoc25-f5ga-telemetry/deploy-specs-e2e.sh b/src/tests/ecoc25-f5ga-telemetry/deploy-specs-e2e.sh index 0820e21b7..c691ce745 100644 --- a/src/tests/ecoc25-f5ga-telemetry/deploy-specs-e2e.sh +++ b/src/tests/ecoc25-f5ga-telemetry/deploy-specs-e2e.sh @@ -17,7 +17,7 @@ # ----- TeraFlowSDN ------------------------------------------------------------ # Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to. -export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" +export TFS_REGISTRY_IMAGES="http://10.254.6.194:5000/tfs/" # Set the list of components, separated by spaces, you want to build images for, and deploy. export TFS_COMPONENTS="context device pathcomp service slice nbi webui" @@ -91,7 +91,7 @@ export TFS_COMPONENTS="context device pathcomp service slice nbi webui" # Set the tag you want to use for your images. -export TFS_IMAGE_TAG="dev" +export TFS_IMAGE_TAG="f5ga" # Set the name of the Kubernetes namespace to deploy TFS to. export TFS_K8S_NAMESPACE="tfs" @@ -109,7 +109,7 @@ export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" export TFS_GRAFANA_PASSWORD="admin123+" # Disable skip-build flag to rebuild the Docker images. -export TFS_SKIP_BUILD="" +export TFS_SKIP_BUILD="YES" # ----- CockroachDB ------------------------------------------------------------ diff --git a/src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh b/src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh index 0820e21b7..c691ce745 100644 --- a/src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh +++ b/src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh @@ -17,7 +17,7 @@ # ----- TeraFlowSDN ------------------------------------------------------------ # Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to. -export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" +export TFS_REGISTRY_IMAGES="http://10.254.6.194:5000/tfs/" # Set the list of components, separated by spaces, you want to build images for, and deploy. export TFS_COMPONENTS="context device pathcomp service slice nbi webui" @@ -91,7 +91,7 @@ export TFS_COMPONENTS="context device pathcomp service slice nbi webui" # Set the tag you want to use for your images. -export TFS_IMAGE_TAG="dev" +export TFS_IMAGE_TAG="f5ga" # Set the name of the Kubernetes namespace to deploy TFS to. export TFS_K8S_NAMESPACE="tfs" @@ -109,7 +109,7 @@ export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" export TFS_GRAFANA_PASSWORD="admin123+" # Disable skip-build flag to rebuild the Docker images. -export TFS_SKIP_BUILD="" +export TFS_SKIP_BUILD="YES" # ----- CockroachDB ------------------------------------------------------------ diff --git a/src/tests/ecoc25-f5ga-telemetry/rebuild-tfs.sh b/src/tests/ecoc25-f5ga-telemetry/rebuild-tfs.sh index e16b836b8..f8ad1abce 100755 --- a/src/tests/ecoc25-f5ga-telemetry/rebuild-tfs.sh +++ b/src/tests/ecoc25-f5ga-telemetry/rebuild-tfs.sh @@ -15,7 +15,7 @@ export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:5000/tfs/"} -export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device pathcomp service nbi webui"} +export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device pathcomp service slice nbi webui"} export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"f5ga"} ./deploy/build-only.sh -- GitLab From e66ac057f89b8ea299d207a21776812f7ad8a45b Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 31 Jul 2025 19:36:25 +0000 Subject: [PATCH 013/367] Tests - Tools - SIMAP Server - Implemented first functional version of a minimalistic SIMAP server --- src/tests/.gitlab-ci.yml | 1 + src/tests/tools/simap_server/.gitlab-ci.yml | 41 ++ src/tests/tools/simap_server/Dockerfile | 34 ++ src/tests/tools/simap_server/README.md | 34 ++ src/tests/tools/simap_server/build.sh | 21 + src/tests/tools/simap_server/deploy.sh | 27 ++ src/tests/tools/simap_server/destroy.sh | 22 + src/tests/tools/simap_server/redeploy.sh | 32 ++ src/tests/tools/simap_server/run_client.sh | 3 + src/tests/tools/simap_server/run_server.sh | 3 + .../tools/simap_server/simap-created.json | 203 ++++++++ .../simap_client/RestConfClient.py | 191 ++++++++ .../simap_server/simap_client/SimapClient.py | 191 ++++++++ .../simap_server/simap_client/__init__.py | 14 + .../simap_server/simap_client/__main__.py | 128 +++++ .../tools/simap_server/simap_client/tests.py | 50 ++ .../simap_server/simap_server/Dispatch.py | 148 ++++++ .../simap_server/simap_server/HostMeta.py | 50 ++ .../simap_server/HttpStatusCodesEnum.py | 27 ++ .../simap_server/simap_server/YangHandler.py | 131 +++++ .../simap_server/simap_server/__init__.py | 14 + .../simap_server/simap_server/__main__.py | 70 +++ .../simap_server/yang/ietf-inet-types.yang | 458 ++++++++++++++++++ .../yang/ietf-network-topology.yang | 291 +++++++++++ .../simap_server/yang/ietf-network.yang | 193 ++++++++ .../simap_server/yang/simap-telemetry.yang | 81 ++++ .../simap_server/simap_server/yang/simap.txt | 10 + src/tests/tools/simap_server/tests.sh | 66 +++ .../tools/simap_server/tests/Dispatch_old.py | 261 ++++++++++ .../simap_server/tests/create_get_object.py | 59 +++ .../simap_server/tests/libyang_examples.py | 162 +++++++ .../simap_server/tests/test_path_to_json.py | 23 + src/tests/tools/simap_server/tests/tests.sh | 15 + .../tools/simap_server/tests/walk_module.py | 45 ++ 34 files changed, 3099 insertions(+) create mode 100644 src/tests/tools/simap_server/.gitlab-ci.yml create mode 100644 src/tests/tools/simap_server/Dockerfile create mode 100644 src/tests/tools/simap_server/README.md create mode 100755 src/tests/tools/simap_server/build.sh create mode 100755 src/tests/tools/simap_server/deploy.sh create mode 100755 src/tests/tools/simap_server/destroy.sh create mode 100755 src/tests/tools/simap_server/redeploy.sh create mode 100755 src/tests/tools/simap_server/run_client.sh create mode 100755 src/tests/tools/simap_server/run_server.sh create mode 100644 src/tests/tools/simap_server/simap-created.json create mode 100644 src/tests/tools/simap_server/simap_client/RestConfClient.py create mode 100644 src/tests/tools/simap_server/simap_client/SimapClient.py create mode 100644 src/tests/tools/simap_server/simap_client/__init__.py create mode 100644 src/tests/tools/simap_server/simap_client/__main__.py create mode 100644 src/tests/tools/simap_server/simap_client/tests.py create mode 100644 src/tests/tools/simap_server/simap_server/Dispatch.py create mode 100644 src/tests/tools/simap_server/simap_server/HostMeta.py create mode 100644 src/tests/tools/simap_server/simap_server/HttpStatusCodesEnum.py create mode 100644 src/tests/tools/simap_server/simap_server/YangHandler.py create mode 100644 src/tests/tools/simap_server/simap_server/__init__.py create mode 100644 src/tests/tools/simap_server/simap_server/__main__.py create mode 100644 src/tests/tools/simap_server/simap_server/yang/ietf-inet-types.yang create mode 100644 src/tests/tools/simap_server/simap_server/yang/ietf-network-topology.yang create mode 100644 src/tests/tools/simap_server/simap_server/yang/ietf-network.yang create mode 100644 src/tests/tools/simap_server/simap_server/yang/simap-telemetry.yang create mode 100644 src/tests/tools/simap_server/simap_server/yang/simap.txt create mode 100755 src/tests/tools/simap_server/tests.sh create mode 100644 src/tests/tools/simap_server/tests/Dispatch_old.py create mode 100644 src/tests/tools/simap_server/tests/create_get_object.py create mode 100644 src/tests/tools/simap_server/tests/libyang_examples.py create mode 100644 src/tests/tools/simap_server/tests/test_path_to_json.py create mode 100755 src/tests/tools/simap_server/tests/tests.sh create mode 100644 src/tests/tools/simap_server/tests/walk_module.py diff --git a/src/tests/.gitlab-ci.yml b/src/tests/.gitlab-ci.yml index 67f3b5692..9b256f1ae 100644 --- a/src/tests/.gitlab-ci.yml +++ b/src/tests/.gitlab-ci.yml @@ -30,3 +30,4 @@ include: - local: '/src/tests/tools/mock_tfs_nbi_dependencies/.gitlab-ci.yml' - local: '/src/tests/tools/mock_qkd_node/.gitlab-ci.yml' - local: '/src/tests/tools/mock_osm_nbi/.gitlab-ci.yml' + - local: '/src/tests/tools/simap_server/.gitlab-ci.yml' diff --git a/src/tests/tools/simap_server/.gitlab-ci.yml b/src/tests/tools/simap_server/.gitlab-ci.yml new file mode 100644 index 000000000..2e1652765 --- /dev/null +++ b/src/tests/tools/simap_server/.gitlab-ci.yml @@ -0,0 +1,41 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Build, tag, and push the Docker image to the GitLab Docker registry +build simap_server: + stage: build + before_script: + - docker image prune --force + - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY + script: + - docker buildx build -t "$CI_REGISTRY_IMAGE/simap-server:test" -f ./src/tests/tools/simap_server/Dockerfile ./src/tests/tools/simap_server + - docker push "$CI_REGISTRY_IMAGE/simap-server:test" + after_script: + - docker image prune --force + rules: + - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' + - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' + - changes: + - src/common/**/*.py + - proto/*.proto + - src/src/tests/tools/simap_server/**/*.{py,in,yml,yaml,yang,sh,json} + - src/src/tests/tools/simap_server/Dockerfile + - src/src/tests/.gitlab-ci.yml + #- src/device/**/*.{py,in,yml} + #- src/device/Dockerfile + #- src/device/tests/*.py + #- src/qkd_app/**/*.{py,in,yml} + #- src/qkd_app/Dockerfile + #- src/qkd_app/tests/*.py + - .gitlab-ci.yml diff --git a/src/tests/tools/simap_server/Dockerfile b/src/tests/tools/simap_server/Dockerfile new file mode 100644 index 000000000..3ad6626af --- /dev/null +++ b/src/tests/tools/simap_server/Dockerfile @@ -0,0 +1,34 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM python:3.9-slim + +# Set Python to show logs as they occur +ENV PYTHONUNBUFFERED=0 + +# Get Python dependencies +RUN python3 -m pip install --upgrade pip +RUN python3 -m pip install --upgrade setuptools wheel +RUN python3 -m pip install https://github.com/freeconf/lang/releases/download/v0.1.0-alpha/freeconf-0.1.0-py3-none-any.whl +RUN fc-lang-install -v + +# Create component sub-folders, and copy content +RUN mkdir -p /var/simap_server/ +WORKDIR /var/simap_server +COPY ./yang ./yang +COPY ./startup.json ./startup.json +COPY ./*.py . + +# Start the service +ENTRYPOINT ["python", "simap_server.py"] diff --git a/src/tests/tools/simap_server/README.md b/src/tests/tools/simap_server/README.md new file mode 100644 index 000000000..7087fe061 --- /dev/null +++ b/src/tests/tools/simap_server/README.md @@ -0,0 +1,34 @@ +# Mock QKD Node + +This Mock implements very basic support for the software-defined QKD node information models specified in ETSI GS QKD 015 V2.1.1. + +The aim of this mock is to enable testing the TFS QKD Framework with an emulated data plane. + + +## Build the Mock QKD Node Docker image +```bash +./build.sh +``` + +## Run the Mock QKD Node as a container: +```bash +docker network create --driver bridge --subnet=172.254.252.0/24 --gateway=172.254.252.254 tfs-qkd-net-mgmt + +docker run --name qkd-node-01 --detach --publish 80:80 \ + --network=tfs-qkd-net-mgmt --ip=172.254.252.101 \ + --env "DATA_FILE_PATH=/var/teraflow/mock-qkd-node/data/database.json" \ + --volume "$PWD/src/tests/mock-qkd-node/data/database-01.json:/var/teraflow/mock-qkd-node/data/database.json" \ + mock-qkd-node:test + +docker run --name qkd-node-02 --detach --publish 80:80 \ + --network=tfs-qkd-net-mgmt --ip=172.254.252.102 \ + --env "DATA_FILE_PATH=/var/teraflow/mock-qkd-node/data/database.json" \ + --volume "$PWD/src/tests/mock-qkd-node/data/database-02.json:/var/teraflow/mock-qkd-node/data/database.json" \ + mock-qkd-node:test + +docker run --name qkd-node-03 --detach --publish 80:80 \ + --network=tfs-qkd-net-mgmt --ip=172.254.252.103 \ + --env "DATA_FILE_PATH=/var/teraflow/mock-qkd-node/data/database.json" \ + --volume "$PWD/src/tests/mock-qkd-node/data/database-03.json:/var/teraflow/mock-qkd-node/data/database.json" \ + mock-qkd-node:test +``` diff --git a/src/tests/tools/simap_server/build.sh b/src/tests/tools/simap_server/build.sh new file mode 100755 index 000000000..033570f49 --- /dev/null +++ b/src/tests/tools/simap_server/build.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Make folder containing the script the root folder for its execution +cd $(dirname $0) + +docker buildx build -t simap-server:test -f Dockerfile . +#docker tag simap-server:test localhost:32000/tfs/simap-server:test +#docker push localhost:32000/tfs/simap-server:test diff --git a/src/tests/tools/simap_server/deploy.sh b/src/tests/tools/simap_server/deploy.sh new file mode 100755 index 000000000..f13c56379 --- /dev/null +++ b/src/tests/tools/simap_server/deploy.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Cleanup +docker rm --force simap-server + +# Create SIMAP Server +docker run --detach --name simap-server --network host simap-server:test + +sleep 2 + +# Dump SIMAP Server Docker container +docker ps -a + +echo "Bye!" diff --git a/src/tests/tools/simap_server/destroy.sh b/src/tests/tools/simap_server/destroy.sh new file mode 100755 index 000000000..54345573f --- /dev/null +++ b/src/tests/tools/simap_server/destroy.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Cleanup +docker rm --force simap-server + +# Dump Docker containers +docker ps -a + +echo "Bye!" diff --git a/src/tests/tools/simap_server/redeploy.sh b/src/tests/tools/simap_server/redeploy.sh new file mode 100755 index 000000000..298d23227 --- /dev/null +++ b/src/tests/tools/simap_server/redeploy.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Cleanup +docker rm --force simap-server + +# Build +cd $(dirname $0) +docker buildx build -t simap-server:test -f Dockerfile . + +# Create SIMAP Server +docker run --detach --name simap-server --network host simap-server:test + +sleep 2 + +# Dump SIMAP Server Docker container +docker ps -a + +# Follow logs +docker logs simap-server --follow diff --git a/src/tests/tools/simap_server/run_client.sh b/src/tests/tools/simap_server/run_client.sh new file mode 100755 index 000000000..5761bb37c --- /dev/null +++ b/src/tests/tools/simap_server/run_client.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +python -m simap_client diff --git a/src/tests/tools/simap_server/run_server.sh b/src/tests/tools/simap_server/run_server.sh new file mode 100755 index 000000000..20901314b --- /dev/null +++ b/src/tests/tools/simap_server/run_server.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +python -m simap_server diff --git a/src/tests/tools/simap_server/simap-created.json b/src/tests/tools/simap_server/simap-created.json new file mode 100644 index 000000000..04d08c570 --- /dev/null +++ b/src/tests/tools/simap_server/simap-created.json @@ -0,0 +1,203 @@ +{ + "ietf-network:networks": { + "network": [ + { + "network-id": "te", + "node": [ + {"node-id": "ONT1", "ietf-network-topology:termination-point": [{"tp-id": "200"}, {"tp-id": "500"}]}, + {"node-id": "ONT2", "ietf-network-topology:termination-point": [{"tp-id": "200"}, {"tp-id": "500"}]}, + {"node-id": "OLT", "ietf-network-topology:termination-point": [{"tp-id": "200"}, {"tp-id": "201"}, {"tp-id": "500"}, {"tp-id": "501"}]}, + {"node-id": "PE1", "ietf-network-topology:termination-point": [{"tp-id": "200"}, {"tp-id": "500"}, {"tp-id": "501"}]}, + {"node-id": "P1", "ietf-network-topology:termination-point": [{"tp-id": "500"}, {"tp-id": "501"}]}, + {"node-id": "P2", "ietf-network-topology:termination-point": [{"tp-id": "500"}, {"tp-id": "501"}]}, + {"node-id": "PE2", "ietf-network-topology:termination-point": [{"tp-id": "200"}, {"tp-id": "500"}, {"tp-id": "501"}]}, + {"node-id": "OA", "ietf-network-topology:termination-point": [{"tp-id": "200"}, {"tp-id": "500"}, {"tp-id": "501"}]}, + {"node-id": "OTN1", "ietf-network-topology:termination-point": [{"tp-id": "500"}, {"tp-id": "501"}]}, + {"node-id": "OTN2", "ietf-network-topology:termination-point": [{"tp-id": "500"}, {"tp-id": "501"}]}, + {"node-id": "OE", "ietf-network-topology:termination-point": [{"tp-id": "200"}, {"tp-id": "500"}, {"tp-id": "501"}]}, + {"node-id": "POP1", "ietf-network-topology:termination-point": [{"tp-id": "200"}, {"tp-id": "201"}, {"tp-id": "500"}]}, + {"node-id": "POP2", "ietf-network-topology:termination-point": [{"tp-id": "200"}, {"tp-id": "201"}, {"tp-id": "500"}]} + ], + "ietf-network-topology:link": [ + {"link-id": "L1", "source": {"source-node": "ONT1", "source-tp": "500"}, "destination": {"dest-node": "OLT", "dest-tp": "200"}}, + {"link-id": "L2", "source": {"source-node": "ONT2", "source-tp": "500"}, "destination": {"dest-node": "OLT", "dest-tp": "201"}}, + {"link-id": "L5", "source": {"source-node": "PE1", "source-tp": "500"}, "destination": {"dest-node": "P1", "dest-tp": "500"}}, + {"link-id": "L6", "source": {"source-node": "PE1", "source-tp": "501"}, "destination": {"dest-node": "P2", "dest-tp": "500"}}, + {"link-id": "L9", "source": {"source-node": "P1", "source-tp": "501"}, "destination": {"dest-node": "PE2", "dest-tp": "500"}}, + {"link-id": "L10", "source": {"source-node": "P2", "source-tp": "501"}, "destination": {"dest-node": "PE2", "dest-tp": "501"}}, + {"link-id": "L7", "source": {"source-node": "OA", "source-tp": "500"}, "destination": {"dest-node": "OTN1", "dest-tp": "500"}}, + {"link-id": "L8", "source": {"source-node": "OA", "source-tp": "501"}, "destination": {"dest-node": "OTN2", "dest-tp": "500"}}, + {"link-id": "L11", "source": {"source-node": "OTN1", "source-tp": "501"}, "destination": {"dest-node": "OE", "dest-tp": "500"}}, + {"link-id": "L12", "source": {"source-node": "OTN2", "source-tp": "501"}, "destination": {"dest-node": "OE", "dest-tp": "501"}}, + {"link-id": "L3", "source": {"source-node": "OLT", "source-tp": "500"}, "destination": {"dest-node": "PE1", "dest-tp": "200"}}, + {"link-id": "L4", "source": {"source-node": "OLT", "source-tp": "501"}, "destination": {"dest-node": "OA", "dest-tp": "200"}}, + {"link-id": "L13", "source": {"source-node": "PE2", "source-tp": "200"}, "destination": {"dest-node": "POP1", "dest-tp": "500"}}, + {"link-id": "L14", "source": {"source-node": "OE", "source-tp": "200"}, "destination": {"dest-node": "POP2", "dest-tp": "500"}} + ] + }, + { + "network-id": "simap-trans", + "supporting-network": [{"network-ref": "te"}], + "node": [ + {"node-id": "site1", "supporting-node": [{"network-ref": "te", "node-ref": "PE1"}], "ietf-network-topology:termination-point": [ + {"tp-id": "200", "supporting-termination-point": [{"network-ref": "te", "node-ref": "PE1", "tp-ref": "200"}]}, + {"tp-id": "500", "supporting-termination-point": [{"network-ref": "te", "node-ref": "PE1", "tp-ref": "500"}]}, + {"tp-id": "501", "supporting-termination-point": [{"network-ref": "te", "node-ref": "PE1", "tp-ref": "501"}]} + ]}, + {"node-id": "site2", "supporting-node": [{"network-ref": "te", "node-ref": "PE2"}], "ietf-network-topology:termination-point": [ + {"tp-id": "200", "supporting-termination-point": [{"network-ref": "te", "node-ref": "PE2", "tp-ref": "200"}]}, + {"tp-id": "500", "supporting-termination-point": [{"network-ref": "te", "node-ref": "PE2", "tp-ref": "500"}]}, + {"tp-id": "501", "supporting-termination-point": [{"network-ref": "te", "node-ref": "PE2", "tp-ref": "501"}]} + ]} + ], + "ietf-network-topology:link": [ + {"link-id": "Trans-L1", + "source": {"source-node": "site1", "source-tp": "500"}, + "destination": {"dest-node": "site2", "dest-tp": "500"}, + "supporting-link": [ + {"network-ref": "te", "link-ref": "L5"}, + {"network-ref": "te", "link-ref": "L9"} + ] + } + ] + }, + { + "network-id": "simap-aggnet", + "supporting-network": [ + { + "network-ref": "te" + }, + { + "network-ref": "simap-trans" + } + ], + "node": [ + {"node-id": "sdp1", "supporting-node": [ + { + "network-ref": "te", + "node-ref": "OLT" + } + ], + "ietf-network-topology:termination-point": [ + {"tp-id": "200", "supporting-termination-point": [ + {"network-ref": "te", "node-ref": "OLT", "tp-ref": "200"}]}, + {"tp-id": "201", "supporting-termination-point": [ + {"network-ref": "te", "node-ref": "OLT", "tp-ref": "201"}]}, + {"tp-id": "500", "supporting-termination-point": [ + {"network-ref": "te", "node-ref": "OLT", "tp-ref": "500"}]}, + {"tp-id": "501", "supporting-termination-point": [ + {"network-ref": "te", "node-ref": "OLT", "tp-ref": "501"}]} + ] + }, + {"node-id": "sdp2", "supporting-node": [ + { + "network-ref": "te", + "node-ref": "POP1" + } + ], + "ietf-network-topology:termination-point": [ + {"tp-id": "200", "supporting-termination-point": [ + {"network-ref": "te", "node-ref": "POP1", "tp-ref": "200"}]}, + {"tp-id": "201", "supporting-termination-point": [ + {"network-ref": "te", "node-ref": "POP1", "tp-ref": "201"}]}, + {"tp-id": "500", "supporting-termination-point": [ + {"network-ref": "te", "node-ref": "POP1", "tp-ref": "500"}]} + ] + } + ], + "ietf-network-topology:link": [ + { + "link-id": "AggNet-L1", + "source": { + "source-node": "sdp1", + "source-tp": "500" + }, + "destination": { + "dest-node": "sdp2", + "dest-tp": "500" + }, + "supporting-link": [ + { + "network-ref": "te", + "link-ref": "L3" + }, + { + "network-ref": "simap-trans", + "link-ref": "Trans-L1" + }, + { + "network-ref": "te", + "link-ref": "L13" + } + ] + } + ] + }, + { + "network-id": "simap-e2e", + "supporting-network": [ + { + "network-ref": "te" + }, + { + "network-ref": "simap-trans" + } + ], + "node": [ + {"node-id": "sdp1", "supporting-node": [ + { + "network-ref": "te", + "node-ref": "ONT1" + } + ], + "ietf-network-topology:termination-point": [ + {"tp-id": "200", "supporting-termination-point": [ + {"network-ref": "te", "node-ref": "ONT1", "tp-ref": "200"}]}, + {"tp-id": "500", "supporting-termination-point": [ + {"network-ref": "te", "node-ref": "ONT1", "tp-ref": "500"}]} + ] + }, + {"node-id": "sdp2", "supporting-node": [ + { + "network-ref": "te", + "node-ref": "POP1" + } + ], + "ietf-network-topology:termination-point": [ + {"tp-id": "200", "supporting-termination-point": [ + {"network-ref": "te", "node-ref": "POP1", "tp-ref": "200"}]}, + {"tp-id": "201", "supporting-termination-point": [ + {"network-ref": "te", "node-ref": "POP1", "tp-ref": "201"}]}, + {"tp-id": "500", "supporting-termination-point": [ + {"network-ref": "te", "node-ref": "POP1", "tp-ref": "500"}]} + ] + } + ], + "ietf-network-topology:link": [ + { + "link-id": "E2E-L1", + "source": { + "source-node": "sdp1", + "source-tp": "500" + }, + "destination": { + "dest-node": "sdp2", + "dest-tp": "500" + }, + "supporting-link": [ + { + "network-ref": "te", + "link-ref": "L1" + }, + { + "network-ref": "simap-aggnet", + "link-ref": "AggNet-L1" + } + ] + } + ] + } + ] + }, + "ietf-yang-schema-mount:schema-mounts": {} +} diff --git a/src/tests/tools/simap_server/simap_client/RestConfClient.py b/src/tests/tools/simap_server/simap_client/RestConfClient.py new file mode 100644 index 000000000..b7c057a70 --- /dev/null +++ b/src/tests/tools/simap_server/simap_client/RestConfClient.py @@ -0,0 +1,191 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum, logging, requests +from requests.auth import HTTPBasicAuth +from typing import Any, Dict, Optional, Set + +class RestRequestMethod(enum.Enum): + GET = 'get' + POST = 'post' + PUT = 'put' + PATCH = 'patch' + DELETE = 'delete' + +EXPECTED_STATUS_CODES : Set[int] = { + requests.codes['OK' ], # 200 - OK + requests.codes['CREATED' ], # 201 - Created + requests.codes['ACCEPTED' ], # 202 - Accepted + requests.codes['NO_CONTENT'], # 204 - No Content +} + +def compose_basic_auth( + username : Optional[str] = None, password : Optional[str] = None +) -> Optional[HTTPBasicAuth]: + if username is None or password is None: return None + return HTTPBasicAuth(username, password) + +class SchemeEnum(enum.Enum): + HTTP = 'http' + HTTPS = 'https' + +def check_scheme(scheme : str) -> str: + str_scheme = str(scheme).lower() + enm_scheme = SchemeEnum._value2member_map_[str_scheme] + return enm_scheme.value + +HOST_META_URL = '{:s}://{:s}:{:d}/.well-known/host-meta' +RESTCONF_URL = '{:s}://{:s}:{:d}/{:s}' + +class RestConfClient: + def __init__( + self, address : str, port : int = 8080, scheme : str = 'http', + username : Optional[str] = None, password : Optional[str] = None, + timeout : int = 10, verify_certs : bool = True, allow_redirects : bool = True, + logger : Optional[logging.Logger] = None + ) -> None: + self._address = address + self._port = int(port) + self._scheme = check_scheme(scheme) + self._auth = compose_basic_auth(username=username, password=password) + self._base_url = '' + self._timeout = int(timeout) + self._verify_certs = verify_certs + self._allow_redirects = allow_redirects + self._logger = logger + + self._discover_base_url() + + def _discover_base_url(self) -> None: + host_meta_url = HOST_META_URL.format(self._scheme, self._address, self._port) + host_meta : Dict = self.get(host_meta_url, expected_status_codes={requests.codes['OK']}) + + links = host_meta.get('links') + if links is None: raise AttributeError('Missing attribute "links" in host-meta reply') + if not isinstance(links, list): raise AttributeError('Attribute "links" must be a list') + if len(links) != 1: raise AttributeError('Attribute "links" is expected to have exactly 1 item') + + link = links[0] + if not isinstance(link, dict): raise AttributeError('Attribute "links[0]" must be a dict') + + rel = link.get('rel') + if rel is None: raise AttributeError('Missing attribute "links[0].rel" in host-meta reply') + if not isinstance(rel, str): raise AttributeError('Attribute "links[0].rel" must be a str') + if rel != 'restconf': raise AttributeError('Attribute "links[0].rel" != "restconf"') + + href = link.get('href') + if href is None: raise AttributeError('Missing attribute "links[0]" in host-meta reply') + if not isinstance(href, str): raise AttributeError('Attribute "links[0].href" must be a str') + + self._base_url = str(href + '/data').replace('//', '/') + + def _log_msg_request( + self, method : RestRequestMethod, request_url : str, body : Optional[Any], + log_level : int = logging.INFO + ) -> str: + msg = 'Request: {:s} {:s}'.format(str(method.value).upper(), str(request_url)) + if body is not None: msg += ' body={:s}'.format(str(body)) + if self._logger is not None: self._logger.log(log_level, msg) + return msg + + def _log_msg_check_reply( + self, method : RestRequestMethod, request_url : str, body : Optional[Any], + reply : requests.Response, expected_status_codes : Set[int], + log_level : int = logging.INFO + ) -> str: + msg = 'Reply: {:s}'.format(str(reply.text)) + if self._logger is not None: self._logger.log(log_level, msg) + http_status_code = reply.status_code + if http_status_code in expected_status_codes: return msg + MSG = 'Request failed. method={:s} url={:s} body={:s} status_code={:s} reply={:s}' + msg = MSG.format( + str(method.value).upper(), str(request_url), str(body), + str(http_status_code), str(reply.text) + ) + self._logger.error(msg) + raise Exception(msg) + + def _do_rest_request( + self, method : RestRequestMethod, endpoint : str, body : Optional[Any] = None, + expected_status_codes : Set[int] = EXPECTED_STATUS_CODES + ) -> Optional[Any]: + candidate_schemes = tuple(['{:s}://'.format(m).lower() for m in SchemeEnum.__members__.keys()]) + if endpoint.lower().startswith(candidate_schemes): + request_url = endpoint.lstrip('/') + else: + endpoint = str(self._base_url + '/' + endpoint).replace('//', '/').lstrip('/') + request_url = '{:s}://{:s}:{:d}/{:s}'.format( + self._scheme, self._address, self._port, endpoint.lstrip('/') + ) + self._log_msg_request(method, request_url, body) + try: + headers = {'accept': 'application/json'} + reply = requests.request( + method.value, request_url, headers=headers, json=body, + auth=self._auth, verify=self._verify_certs, timeout=self._timeout, + allow_redirects=self._allow_redirects + ) + except Exception as e: + MSG = 'Request failed. method={:s} url={:s} body={:s}' + msg = MSG.format(str(method.value).upper(), request_url, str(body)) + self._logger.exception(msg) + raise Exception(msg) from e + self._log_msg_check_reply(method, request_url, body, reply, expected_status_codes) + if reply.content and len(reply.content) > 0: return reply.json() + return None + + def get( + self, endpoint : str, + expected_status_codes : Set[int] = {requests.codes['OK']} + ) -> Optional[Any]: + return self._do_rest_request( + RestRequestMethod.GET, endpoint, + expected_status_codes=expected_status_codes + ) + + def post( + self, endpoint : str, body : Optional[Any] = None, + expected_status_codes : Set[int] = {requests.codes['CREATED']} + ) -> Optional[Any]: + return self._do_rest_request( + RestRequestMethod.POST, endpoint, body=body, + expected_status_codes=expected_status_codes + ) + + def put( + self, endpoint : str, body : Optional[Any] = None, + expected_status_codes : Set[int] = {requests.codes['CREATED'], requests.codes['NO_CONTENT']} + ) -> Optional[Any]: + return self._do_rest_request( + RestRequestMethod.PUT, endpoint, body=body, + expected_status_codes=expected_status_codes + ) + + def patch( + self, endpoint : str, body : Optional[Any] = None, + expected_status_codes : Set[int] = {requests.codes['NO_CONTENT']} + ) -> Optional[Any]: + return self._do_rest_request( + RestRequestMethod.PATCH, endpoint, body=body, + expected_status_codes=expected_status_codes + ) + + def delete( + self, endpoint : str, body : Optional[Any] = None, + expected_status_codes : Set[int] = {requests.codes['NO_CONTENT']} + ) -> Optional[Any]: + return self._do_rest_request( + RestRequestMethod.DELETE, endpoint, body=body, + expected_status_codes=expected_status_codes + ) diff --git a/src/tests/tools/simap_server/simap_client/SimapClient.py b/src/tests/tools/simap_server/simap_client/SimapClient.py new file mode 100644 index 000000000..26713ac5e --- /dev/null +++ b/src/tests/tools/simap_server/simap_client/SimapClient.py @@ -0,0 +1,191 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import Dict, List, Tuple +from .RestConfClient import RestConfClient + + +class TerminationPoint: + ENDPOINT_NO_ID = '/ietf-network:networks/network[network-id="{:s}"]/node[node-id="{:s}"]' + ENDPOINT_ID = ENDPOINT_NO_ID + '/ietf-network-topology:termination-point[tp-id="{:s}"]' + + def __init__(self, restconf_client : RestConfClient, network_id : str, node_id : str, tp_id : str): + self._restconf_client = restconf_client + self._network_id = network_id + self._node_id = node_id + self._tp_id = tp_id + + def create(self, supporting_termination_point_ids : List[Tuple[str, str, str]] = []) -> None: + endpoint = TerminationPoint.ENDPOINT_ID.format(self._network_id, self._node_id, self._tp_id) + tp = {'tp-id': self._tp_id} + stps = [ + {'network-ref': snet_id, 'node-ref': snode_id, 'tp-ref': stp_id} + for snet_id,snode_id,stp_id in supporting_termination_point_ids + ] + if len(stps) > 0: tp['supporting-termination-point'] = stps + node = {'node-id': self._node_id, 'ietf-network-topology:termination-point': [tp]} + network = {'network-id': self._network_id, 'node': [node]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.post(endpoint, payload) + + def get(self) -> Dict: + endpoint = TerminationPoint.ENDPOINT_ID.format(self._network_id, self._node_id, self._tp_id) + node : Dict = self._restconf_client.get(endpoint) + return node['ietf-network-topology:termination-point'][0] + + def delete(self) -> None: + endpoint = TerminationPoint.ENDPOINT_ID.format(self._network_id, self._node_id, self._tp_id) + self._restconf_client.delete(endpoint) + +class Node: + ENDPOINT_NO_ID = '/ietf-network:networks/network[network-id="{:s}"]' + ENDPOINT_ID = ENDPOINT_NO_ID + '/node[node-id="{:s}"]' + + def __init__(self, restconf_client : RestConfClient, network_id : str, node_id : str): + self._restconf_client = restconf_client + self._network_id = network_id + self._node_id = node_id + self._tps : Dict[str, TerminationPoint] = dict() + + def termination_points(self) -> List[Dict]: + tps : Dict = self._restconf_client.get(TerminationPoint.ENDPOINT_NO_ID) + return tps['ietf-network-topology:termination-point'].get('termination-point', list()) + + def termination_point(self, tp_id : str) -> TerminationPoint: + _tp = self._tps.get(tp_id) + if _tp is not None: return _tp + _tp = TerminationPoint(self._restconf_client, self._network_id, self._node_id, tp_id) + return self._tps.setdefault(tp_id, _tp) + + def create( + self, termination_point_ids : List[str] = [], + supporting_node_ids : List[Tuple[str, str]] = [] + ) -> None: + endpoint = Node.ENDPOINT_ID.format(self._network_id, self._node_id) + node = {'node-id': self._node_id} + tps = [{'tp-id': tp_id} for tp_id in termination_point_ids] + if len(tps) > 0: node['ietf-network-topology:termination-point'] = tps + sns = [{'network-ref': snet_id, 'node-ref': snode_id} for snet_id,snode_id in supporting_node_ids] + if len(sns) > 0: node['supporting-node'] = sns + network = {'network-id': self._network_id, 'node': [node]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.post(endpoint, payload) + + def get(self) -> Dict: + endpoint = Node.ENDPOINT_ID.format(self._network_id, self._node_id) + node : Dict = self._restconf_client.get(endpoint) + return node['ietf-network:node'][0] + + def delete(self) -> None: + endpoint = Node.ENDPOINT_ID.format(self._network_id, self._node_id) + self._restconf_client.delete(endpoint) + +class Link: + ENDPOINT_NO_ID = '/ietf-network:networks/network[network-id="{:s}"]' + ENDPOINT_ID = ENDPOINT_NO_ID + '/ietf-network-topology:link[link-id="{:s}"]' + + def __init__(self, restconf_client : RestConfClient, network_id : str, link_id : str): + self._restconf_client = restconf_client + self._network_id = network_id + self._link_id = link_id + + def create( + self, src_node_id : str, src_tp_id : str, dst_node_id : str, dst_tp_id : str, + supporting_link_ids : List[Tuple[str, str]] = [] + ) -> None: + endpoint = Link.ENDPOINT_ID.format(self._network_id, self._link_id) + link = { + 'link-id' : self._link_id, + 'source' : {'source-node': src_node_id, 'source-tp': src_tp_id}, + 'destination': {'dest-node' : dst_node_id, 'dest-tp' : dst_tp_id}, + } + sls = [{'network-ref': snet_id, 'link-ref': slink_id} for snet_id,slink_id in supporting_link_ids] + if len(sls) > 0: link['supporting-link'] = sls + network = {'network-id': self._network_id, 'ietf-network-topology:link': [link]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.post(endpoint, payload) + + def get(self) -> Dict: + endpoint = Link.ENDPOINT_ID.format(self._network_id, self._link_id) + link : Dict = self._restconf_client.get(endpoint) + return link['ietf-network-topology:link'][0] + + def delete(self) -> None: + endpoint = Link.ENDPOINT_ID.format(self._network_id, self._link_id) + self._restconf_client.delete(endpoint) + + +class Network: + ENDPOINT_NO_ID = '/ietf-network:networks' + ENDPOINT_ID = ENDPOINT_NO_ID + '/network[network-id="{:s}"]' + + def __init__(self, restconf_client : RestConfClient, network_id : str): + self._restconf_client = restconf_client + self._network_id = network_id + self._nodes : Dict[str, Node] = dict() + self._links : Dict[str, Link] = dict() + + def nodes(self) -> List[Dict]: + reply : Dict = self._restconf_client.get(Node.ENDPOINT_NO_ID.format(self._network_id)) + return reply['ietf-network:network'][0].get('node', list()) + + def links(self) -> List[Dict]: + reply : Dict = self._restconf_client.get(Link.ENDPOINT_NO_ID.format(self._network_id)) + return reply['ietf-network:network'][0].get('ietf-network-topology:link', list()) + + def node(self, node_id : str) -> Node: + _node = self._nodes.get(node_id) + if _node is not None: return _node + _node = Node(self._restconf_client, self._network_id, node_id) + return self._nodes.setdefault(node_id, _node) + + def link(self, link_id : str) -> Link: + _link = self._links.get(link_id) + if _link is not None: return _link + _link = Link(self._restconf_client, self._network_id, link_id) + return self._links.setdefault(link_id, _link) + + def create(self, supporting_network_ids : List[str] = []) -> None: + endpoint = Network.ENDPOINT_ID.format(self._network_id) + network = {'network-id': self._network_id} + sns = [{'network-ref': sn_id} for sn_id in supporting_network_ids] + if len(sns) > 0: network['supporting-network'] = sns + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.post(endpoint, payload) + + def get(self) -> Dict: + endpoint = Network.ENDPOINT_ID.format(self._network_id) + networks : Dict = self._restconf_client.get(endpoint) + return networks['ietf-network:network'][0] + + def delete(self) -> None: + endpoint = Network.ENDPOINT_ID.format(self._network_id) + self._restconf_client.delete(endpoint) + + +class SimapClient: + def __init__(self, restconf_client : RestConfClient) -> None: + self._restconf_client = restconf_client + self._networks : Dict[str, Network] = dict() + + def networks(self) -> List[Dict]: + reply : Dict = self._restconf_client.get(Network.ENDPOINT_NO_ID) + return reply['ietf-network:networks'].get('network', list()) + + def network(self, network_id : str) -> Network: + _network = self._networks.get(network_id) + if _network is not None: return _network + _network = Network(self._restconf_client, network_id) + return self._networks.setdefault(network_id, _network) diff --git a/src/tests/tools/simap_server/simap_client/__init__.py b/src/tests/tools/simap_server/simap_client/__init__.py new file mode 100644 index 000000000..3ccc21c7d --- /dev/null +++ b/src/tests/tools/simap_server/simap_client/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/tests/tools/simap_server/simap_client/__main__.py b/src/tests/tools/simap_server/simap_client/__main__.py new file mode 100644 index 000000000..8c7ecd67d --- /dev/null +++ b/src/tests/tools/simap_server/simap_client/__main__.py @@ -0,0 +1,128 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging +from .RestConfClient import RestConfClient +from .SimapClient import SimapClient + +logging.basicConfig(level=logging.INFO) +LOGGER = logging.getLogger(__name__) + +logging.getLogger('RestConfClient').setLevel(logging.WARN) + +def main() -> None: + restconf_client = RestConfClient('127.0.0.1', logger=logging.getLogger('RestConfClient')) + simap_client = SimapClient(restconf_client) + + te_topo = simap_client.network('te') + te_topo.create() + + te_topo.node('ONT1').create(termination_point_ids=['200', '500']) + te_topo.node('ONT2').create(termination_point_ids=['200', '500']) + te_topo.node('OLT' ).create(termination_point_ids=['200', '201', '500', '501']) + te_topo.link('L1').create('ONT1', '500', 'OLT', '200') + te_topo.link('L2').create('ONT2', '500', 'OLT', '201') + + te_topo.node('PE1').create(termination_point_ids=['200', '500', '501']) + te_topo.node('P1' ).create(termination_point_ids=['500', '501']) + te_topo.node('P2' ).create(termination_point_ids=['500', '501']) + te_topo.node('PE2').create(termination_point_ids=['200', '500', '501']) + te_topo.link('L5' ).create('PE1', '500', 'P1', '500') + te_topo.link('L6' ).create('PE1', '501', 'P2', '500') + te_topo.link('L9' ).create('P1', '501', 'PE2', '500') + te_topo.link('L10').create('P2', '501', 'PE2', '501') + + te_topo.node('OA' ).create(termination_point_ids=['200', '500', '501']) + te_topo.node('OTN1').create(termination_point_ids=['500', '501']) + te_topo.node('OTN2').create(termination_point_ids=['500', '501']) + te_topo.node('OE' ).create(termination_point_ids=['200', '500', '501']) + te_topo.link('L7' ).create('OA', '500', 'OTN1', '500') + te_topo.link('L8' ).create('OA', '501', 'OTN2', '500') + te_topo.link('L11' ).create('OTN1', '501', 'OE', '500') + te_topo.link('L12' ).create('OTN2', '501', 'OE', '501') + + te_topo.link('L3').create('OLT', '500', 'PE1', '200') + te_topo.link('L4').create('OLT', '501', 'OA', '200') + + te_topo.node('POP1').create(termination_point_ids=['200', '201', '500']) + te_topo.link('L13').create('PE2', '200', 'POP1', '500') + + te_topo.node('POP2').create(termination_point_ids=['200', '201', '500']) + te_topo.link('L14').create('OE', '200', 'POP2', '500') + + + + simap_trans = simap_client.network('simap-trans') + simap_trans.create(supporting_network_ids=['te']) + + site_1 = simap_trans.node('site1') + site_1.create(supporting_node_ids=[('te', 'PE1')]) + site_1.termination_point('200').create(supporting_termination_point_ids=[('te', 'PE1', '200')]) + site_1.termination_point('500').create(supporting_termination_point_ids=[('te', 'PE1', '500')]) + site_1.termination_point('501').create(supporting_termination_point_ids=[('te', 'PE1', '501')]) + + site_2 = simap_trans.node('site2') + site_2.create(supporting_node_ids=[('te', 'PE2')]) + site_2.termination_point('200').create(supporting_termination_point_ids=[('te', 'PE2', '200')]) + site_2.termination_point('500').create(supporting_termination_point_ids=[('te', 'PE2', '500')]) + site_2.termination_point('501').create(supporting_termination_point_ids=[('te', 'PE2', '501')]) + + simap_trans.link('Trans-L1').create('site1', '500', 'site2', '500', supporting_link_ids=[('te', 'L5'), ('te', 'L9')]) + + + + + simap_aggnet = simap_client.network('simap-aggnet') + simap_aggnet.create(supporting_network_ids=['te', 'simap-trans']) + + sdp_1 = simap_aggnet.node('sdp1') + sdp_1.create(supporting_node_ids=[('te', 'OLT')]) + sdp_1.termination_point('200').create(supporting_termination_point_ids=[('te', 'OLT', '200')]) + sdp_1.termination_point('201').create(supporting_termination_point_ids=[('te', 'OLT', '201')]) + sdp_1.termination_point('500').create(supporting_termination_point_ids=[('te', 'OLT', '500')]) + sdp_1.termination_point('501').create(supporting_termination_point_ids=[('te', 'OLT', '501')]) + + sdp_2 = simap_aggnet.node('sdp2') + sdp_2.create(supporting_node_ids=[('te', 'POP1')]) + sdp_2.termination_point('200').create(supporting_termination_point_ids=[('te', 'POP1', '200')]) + sdp_2.termination_point('201').create(supporting_termination_point_ids=[('te', 'POP1', '201')]) + sdp_2.termination_point('500').create(supporting_termination_point_ids=[('te', 'POP1', '500')]) + + simap_aggnet.link('AggNet-L1').create('sdp1', '500', 'sdp2', '500', supporting_link_ids=[('te', 'L3'), ('simap-trans', 'Trans-L1'), ('te', 'L13')]) + + + + + simap_e2e = simap_client.network('simap-e2e') + simap_e2e.create(supporting_network_ids=['te', 'simap-trans']) + + sdp_1 = simap_e2e.node('sdp1') + sdp_1.create(supporting_node_ids=[('te', 'ONT1')]) + sdp_1.termination_point('200').create(supporting_termination_point_ids=[('te', 'ONT1', '200')]) + sdp_1.termination_point('500').create(supporting_termination_point_ids=[('te', 'ONT1', '500')]) + + sdp_2 = simap_e2e.node('sdp2') + sdp_2.create(supporting_node_ids=[('te', 'POP1')]) + sdp_2.termination_point('200').create(supporting_termination_point_ids=[('te', 'POP1', '200')]) + sdp_2.termination_point('201').create(supporting_termination_point_ids=[('te', 'POP1', '201')]) + sdp_2.termination_point('500').create(supporting_termination_point_ids=[('te', 'POP1', '500')]) + + simap_e2e.link('E2E-L1').create('sdp1', '500', 'sdp2', '500', supporting_link_ids=[('te', 'L1'), ('simap-aggnet', 'AggNet-L1')]) + + + print('networks=', simap_client.networks()) + +if __name__ == '__main__': + main() diff --git a/src/tests/tools/simap_server/simap_client/tests.py b/src/tests/tools/simap_server/simap_client/tests.py new file mode 100644 index 000000000..d12770513 --- /dev/null +++ b/src/tests/tools/simap_server/simap_client/tests.py @@ -0,0 +1,50 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging +from .RestConfClient import RestConfClient +from .SimapClient import SimapClient + +logging.basicConfig(level=logging.INFO) +LOGGER = logging.getLogger(__name__) + +def main() -> None: + restconf_client = RestConfClient('127.0.0.1', logger=LOGGER) + simap_client = SimapClient(restconf_client) + + simap_client.network('topology').create() + + simap_client.network('topology').node('r1').create() + simap_client.network('topology').node('r1').termination_point('100').create() + simap_client.network('topology').node('r1').termination_point('101').create() + simap_client.network('topology').node('r1').termination_point('102').create() + + simap_client.network('topology').node('r2').create(tp_ids=['200', '201', '202']) + + simap_client.network('topology').link('l1').create('r1', '102', 'r2', '201') + + print('networks=', simap_client.networks()) + print('network[topology].nodes=', simap_client.network('topology').nodes()) + print('network[topology].node[r2]=', simap_client.network('topology').node('r2').get()) + print('network[topology].links=', simap_client.network('topology').links()) + print('network[topology].link[l1]=', simap_client.network('topology').link('l1').get()) + + simap_client.network('topology').link('l1').delete() + simap_client.network('topology').node('r2').delete() + simap_client.network('topology').node('r1').delete() + print('networks=', simap_client.networks()) + +if __name__ == '__main__': + main() diff --git a/src/tests/tools/simap_server/simap_server/Dispatch.py b/src/tests/tools/simap_server/simap_server/Dispatch.py new file mode 100644 index 000000000..319aa9f7b --- /dev/null +++ b/src/tests/tools/simap_server/simap_server/Dispatch.py @@ -0,0 +1,148 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import json, logging +from flask import Response, abort, jsonify, request +from flask_restful import Resource +from .HttpStatusCodesEnum import HttpStatusCodesEnum +from .YangHandler import YangHandler + +LOGGER = logging.getLogger(__name__) + +class RestConfDispatch(Resource): + def __init__(self, yang_handler : YangHandler) -> None: + super().__init__() + self._yang_handler = yang_handler + + def get(self, subpath : str = '/') -> Response: + data = self._yang_handler.get(subpath) + if data is None: + abort( + HttpStatusCodesEnum.CLI_ERR_NOT_FOUND.value, + description='Path({:s}) not found'.format(str(subpath)) + ) + + LOGGER.info('[GET] {:s} => {:s}'.format(subpath, str(data))) + + response = jsonify(json.loads(data)) + response.status_code = HttpStatusCodesEnum.SUCCESS_OK.value + return response + + def post(self, subpath : str) -> Response: + # TODO: client should not provide identifier of element to be created, add it to subpath + try: + payload = request.get_json(force=True) + except Exception: + LOGGER.exception('Invalid JSON') + abort(HttpStatusCodesEnum.CLI_ERR_BAD_REQUEST.value, desctiption='Invalid JSON') + + data = self._yang_handler.get(subpath) + if data is not None: + abort( + HttpStatusCodesEnum.CLI_ERR_CONFLICT.value, + description='Path({:s}) already exists'.format(str(subpath)) + ) + + try: + json_data = self._yang_handler.create(subpath, payload) + except Exception as e: + LOGGER.exception('Create failed') + abort( + HttpStatusCodesEnum.CLI_ERR_NOT_ACCEPTABLE.value, + description=str(e) + ) + + LOGGER.info('[POST] {:s} {:s} => {:s}'.format(subpath, str(payload), str(json_data))) + + response = jsonify({'status': 'created'}) + response.status_code = HttpStatusCodesEnum.SUCCESS_CREATED.value + return response + + def put(self, subpath : str) -> Response: + # NOTE: client should provide identifier of element to be created/replaced + try: + payload = request.get_json(force=True) + except Exception: + LOGGER.exception('Invalid JSON') + abort(HttpStatusCodesEnum.CLI_ERR_BAD_REQUEST.value, desctiption='Invalid JSON') + + try: + json_data = self._yang_handler.update(subpath, payload) + except Exception as e: + LOGGER.exception('Update failed') + abort( + HttpStatusCodesEnum.CLI_ERR_NOT_ACCEPTABLE.value, + description=str(e) + ) + + LOGGER.info('[PUT] {:s} {:s} => {:s}'.format(subpath, str(payload), str(json_data))) + updated = False # TODO: compute if create or update + + response = jsonify({'status': ( + 'updated' if updated else 'created' + )}) + response.status_code = ( + HttpStatusCodesEnum.SUCCESS_NO_CONTENT.value + if updated else + HttpStatusCodesEnum.SUCCESS_CREATED.value + ) + return response + + def patch(self, subpath : str) -> Response: + # NOTE: client should provide identifier of element to be patched + try: + payload = request.get_json(force=True) + except Exception: + LOGGER.exception('Invalid JSON') + abort(HttpStatusCodesEnum.CLI_ERR_BAD_REQUEST.value, desctiption='Invalid JSON') + + try: + json_data = self._yang_handler.update(subpath, payload) + except Exception as e: + LOGGER.exception('Update failed') + abort( + HttpStatusCodesEnum.CLI_ERR_NOT_ACCEPTABLE.value, + description=str(e) + ) + + LOGGER.info('[PATCH] {:s} {:s} => {:s}'.format(subpath, str(payload), str(json_data))) + + response = jsonify({'status': 'patched'}) + response.status_code = HttpStatusCodesEnum.SUCCESS_NO_CONTENT.value + return response + + def delete(self, subpath : str) -> Response: + # NOTE: client should provide identifier of element to be patched + + try: + deleted_node = self._yang_handler.delete(subpath) + except Exception as e: + LOGGER.exception('Delete failed') + abort( + HttpStatusCodesEnum.CLI_ERR_NOT_ACCEPTABLE.value, + description=str(e) + ) + + LOGGER.info('[DELETE] {:s} => {:s}'.format(subpath, str(deleted_node))) + + if deleted_node is None: + abort( + HttpStatusCodesEnum.CLI_ERR_NOT_FOUND.value, + description='Path({:s}) not found'.format(str(subpath)) + ) + + response = jsonify({}) + response.status_code = HttpStatusCodesEnum.SUCCESS_NO_CONTENT.value + return response diff --git a/src/tests/tools/simap_server/simap_server/HostMeta.py b/src/tests/tools/simap_server/simap_server/HostMeta.py new file mode 100644 index 000000000..95ef34b19 --- /dev/null +++ b/src/tests/tools/simap_server/simap_server/HostMeta.py @@ -0,0 +1,50 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import xml.etree.ElementTree as ET +from flask import abort, jsonify, make_response, request +from flask_restful import Resource +from .HttpStatusCodesEnum import HttpStatusCodesEnum + +XRD_NS = 'http://docs.oasis-open.org/ns/xri/xrd-1.0' +ET.register_namespace('', XRD_NS) + +class HostMeta(Resource): + def __init__(self, restconf_prefix : str) -> None: + super().__init__() + self._restconf_prefix = restconf_prefix + + def get(self): + best = request.accept_mimetypes.best_match([ + 'application/xrd+xml', 'application/json' + ], default='application/xrd+xml') + + if best == 'application/xrd+xml': + xrd = ET.Element('{{{:s}}}XRD'.format(str(XRD_NS))) + ET.SubElement(xrd, '{{{:s}}}Link'.format(str(XRD_NS)), attrib={ + 'rel': 'restconf', 'href': self._restconf_prefix + }) + xml_string = ET.tostring(xrd, encoding='utf-8', xml_declaration=True).decode() + response = make_response(str(xml_string)) + response.status_code = 200 + response.content_type = best + return response + elif best == 'application/json': + response = jsonify({'links': [{'rel': 'restconf', 'href': self._restconf_prefix}]}) + response.status_code = 200 + response.content_type = best + return response + else: + abort(HttpStatusCodesEnum.CLI_ERR_NOT_ACCEPTABLE) diff --git a/src/tests/tools/simap_server/simap_server/HttpStatusCodesEnum.py b/src/tests/tools/simap_server/simap_server/HttpStatusCodesEnum.py new file mode 100644 index 000000000..c44d135c0 --- /dev/null +++ b/src/tests/tools/simap_server/simap_server/HttpStatusCodesEnum.py @@ -0,0 +1,27 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import enum + +class HttpStatusCodesEnum(enum.IntEnum): + SUCCESS_OK = 200 + SUCCESS_CREATED = 201 + SUCCESS_ACCEPTED = 202 + SUCCESS_NO_CONTENT = 204 + CLI_ERR_BAD_REQUEST = 400 + CLI_ERR_NOT_FOUND = 404 + CLI_ERR_NOT_ACCEPTABLE = 406 + CLI_ERR_CONFLICT = 409 + SVR_ERR_NOT_IMPLEMENTED = 501 diff --git a/src/tests/tools/simap_server/simap_server/YangHandler.py b/src/tests/tools/simap_server/simap_server/YangHandler.py new file mode 100644 index 000000000..8a30b696d --- /dev/null +++ b/src/tests/tools/simap_server/simap_server/YangHandler.py @@ -0,0 +1,131 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import json, libyang, logging +from typing import Dict, List, Optional, Type + +LOGGER = logging.getLogger(__name__) + +def walk_schema(node : libyang.SNode, path : str = '') -> Dict[str, Type]: + schema_paths : Dict[str, Type] = dict() + current_path = f'{path}/{node.name()}' + schema_paths[current_path] = type(node) + for child in node.children(): + if isinstance(child, (libyang.SLeaf, libyang.SLeafList)): continue + schema_paths.update(walk_schema(child, current_path)) + return schema_paths + +def extract_schema_paths(yang_module : libyang.Module) -> Dict[str, Type]: + schema_paths : Dict[str, Type] = dict() + for node in yang_module.children(): + schema_paths.update(walk_schema(node)) + return schema_paths + +class YangHandler: + def __init__( + self, yang_search_path : str, yang_module_names : List[str], + yang_startup_data : Dict + ) -> None: + self._yang_context = libyang.Context(yang_search_path) + self._loaded_modules = set() + self._yang_module_paths : Dict[str, Type] = dict() + for yang_module_name in yang_module_names: + LOGGER.info('Loading module: {:s}'.format(str(yang_module_name))) + yang_module = self._yang_context.load_module(yang_module_name) + yang_module.feature_enable_all() + self._loaded_modules.add(yang_module_name) + self._yang_module_paths.update(extract_schema_paths(yang_module)) + + self._datastore = self._yang_context.parse_data_mem( + json.dumps(yang_startup_data), fmt='json' + ) + + def destroy(self) -> None: + self._yang_context.destroy() + + def get_module_paths(self) -> Dict[str, Type]: + return self._yang_module_paths + + def get(self, path : str) -> Optional[str]: + if not path.startswith('/'): path = '/' + path + data = self._datastore.find_path(path) + if data is None: return None + json_data = data.print_mem( + fmt='json', with_siblings=True, pretty=True, + keep_empty_containers=True, include_implicit_defaults=True + ) + return json_data + + def create(self, path : str, payload : Dict) -> str: + if not path.startswith('/'): path = '/' + path + # TODO: client should not provide identifier of element to be created, add it to subpath + dnode_parsed : Optional[libyang.DNode] = self._yang_context.parse_data_mem( + json.dumps(payload), 'json', strict=True, parse_only=False, + validate_present=True, validate_multi_error=True + ) + if dnode_parsed is None: raise Exception('Unable to parse Data({:s})'.format(str(payload))) + #LOGGER.info('parsed = {:s}'.format(json.dumps(dnode.print_dict()))) + + dnode : Optional[libyang.DNode] = self._yang_context.create_data_path( + path, parent=self._datastore, value=dnode_parsed, update=False + ) + self._datastore.merge(dnode_parsed, with_siblings=True, defaults=True) + + json_data = dnode.print_mem( + fmt='json', with_siblings=True, pretty=True, + keep_empty_containers=True, include_implicit_defaults=True + ) + return json_data + + def update(self, path : str, payload : Dict) -> str: + if not path.startswith('/'): path = '/' + path + # NOTE: client should provide identifier of element to be updated + dnode_parsed : Optional[libyang.DNode] = self._yang_context.parse_data_mem( + json.dumps(payload), 'json', strict=True, parse_only=False, + validate_present=True, validate_multi_error=True + ) + if dnode_parsed is None: raise Exception('Unable to parse Data({:s})'.format(str(payload))) + #LOGGER.info('parsed = {:s}'.format(json.dumps(dnode.print_dict()))) + + dnode = self._yang_context.create_data_path( + path, parent=self._datastore, value=dnode_parsed, update=True + ) + self._datastore.merge(dnode_parsed, with_siblings=True, defaults=True) + + json_data = dnode.print_mem( + fmt='json', with_siblings=True, pretty=True, + keep_empty_containers=True, include_implicit_defaults=True + ) + return json_data + + def delete(self, path : str) -> Optional[str]: + if not path.startswith('/'): path = '/' + path + + # NOTE: client should provide identifier of element to be deleted + + node : libyang.DNode = self._datastore.find_path(path) + if node is None: return None + + LOGGER.info('node = {:s}'.format(str(node))) + json_data = str(node.print_mem( + fmt='json', with_siblings=True, pretty=True, + keep_empty_containers=True, include_implicit_defaults=True + )) + LOGGER.info('json_data = {:s}'.format(json_data)) + + node.unlink() + node.free() + + return json_data diff --git a/src/tests/tools/simap_server/simap_server/__init__.py b/src/tests/tools/simap_server/simap_server/__init__.py new file mode 100644 index 000000000..3ccc21c7d --- /dev/null +++ b/src/tests/tools/simap_server/simap_server/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/tests/tools/simap_server/simap_server/__main__.py b/src/tests/tools/simap_server/simap_server/__main__.py new file mode 100644 index 000000000..b2fbe2628 --- /dev/null +++ b/src/tests/tools/simap_server/simap_server/__main__.py @@ -0,0 +1,70 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging +from flask import Flask +from flask_restful import Api +from .Dispatch import RestConfDispatch +from .HostMeta import HostMeta +from .YangHandler import YangHandler + +RESTCONF_PREFIX = '/restconf' +SECRET_KEY = '28dfce787f4d2dd9e2f7462ce493d3c6da46864d83e67f6b4f4765398c4155ce' +BIND_ADDRESS = '0.0.0.0' +BIND_PORT = 8080 + +YANG_SEARCH_PATH = './simap_server/yang' +YANG_MODULE_NAMES = [ + 'ietf-inet-types', + 'simap-telemetry', + 'ietf-network-topology', + 'ietf-network', +] +YANG_STARTUP_DATA = {} + +logging.basicConfig(level=logging.INFO) +LOGGER = logging.getLogger(__name__) + +def main() -> None: + yang_handler = YangHandler( + YANG_SEARCH_PATH, YANG_MODULE_NAMES, YANG_STARTUP_DATA + ) + restconf_paths = yang_handler.get_module_paths() + + app = Flask(__name__) + app.config['SECRET_KEY'] = SECRET_KEY + + api = Api(app) + api.add_resource( + HostMeta, + '/.well-known/host-meta', + resource_class_args=(RESTCONF_PREFIX,) + ) + api.add_resource( + RestConfDispatch, + RESTCONF_PREFIX + '/data', + RESTCONF_PREFIX + '/data/', + RESTCONF_PREFIX + '/data/', + resource_class_args=(yang_handler,) + ) + + LOGGER.info('Available RESTCONF paths:') + for restconf_path in restconf_paths: + LOGGER.info('- {:s}'.format(str(restconf_path))) + + app.run(host=BIND_ADDRESS, port=BIND_PORT) + +if __name__ == '__main__': + main() diff --git a/src/tests/tools/simap_server/simap_server/yang/ietf-inet-types.yang b/src/tests/tools/simap_server/simap_server/yang/ietf-inet-types.yang new file mode 100644 index 000000000..eacefb636 --- /dev/null +++ b/src/tests/tools/simap_server/simap_server/yang/ietf-inet-types.yang @@ -0,0 +1,458 @@ +module ietf-inet-types { + + namespace "urn:ietf:params:xml:ns:yang:ietf-inet-types"; + prefix "inet"; + + organization + "IETF NETMOD (NETCONF Data Modeling Language) Working Group"; + + contact + "WG Web: + WG List: + + WG Chair: David Kessens + + + WG Chair: Juergen Schoenwaelder + + + Editor: Juergen Schoenwaelder + "; + + description + "This module contains a collection of generally useful derived + YANG data types for Internet addresses and related things. + + Copyright (c) 2013 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (http://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC 6991; see + the RFC itself for full legal notices."; + + revision 2013-07-15 { + description + "This revision adds the following new data types: + - ip-address-no-zone + - ipv4-address-no-zone + - ipv6-address-no-zone"; + reference + "RFC 6991: Common YANG Data Types"; + } + + revision 2010-09-24 { + description + "Initial revision."; + reference + "RFC 6021: Common YANG Data Types"; + } + + /*** collection of types related to protocol fields ***/ + + typedef ip-version { + type enumeration { + enum unknown { + value "0"; + description + "An unknown or unspecified version of the Internet + protocol."; + } + enum ipv4 { + value "1"; + description + "The IPv4 protocol as defined in RFC 791."; + } + enum ipv6 { + value "2"; + description + "The IPv6 protocol as defined in RFC 2460."; + } + } + description + "This value represents the version of the IP protocol. + + In the value set and its semantics, this type is equivalent + to the InetVersion textual convention of the SMIv2."; + reference + "RFC 791: Internet Protocol + RFC 2460: Internet Protocol, Version 6 (IPv6) Specification + RFC 4001: Textual Conventions for Internet Network Addresses"; + } + + typedef dscp { + type uint8 { + range "0..63"; + } + description + "The dscp type represents a Differentiated Services Code Point + that may be used for marking packets in a traffic stream. + In the value set and its semantics, this type is equivalent + to the Dscp textual convention of the SMIv2."; + reference + "RFC 3289: Management Information Base for the Differentiated + Services Architecture + RFC 2474: Definition of the Differentiated Services Field + (DS Field) in the IPv4 and IPv6 Headers + RFC 2780: IANA Allocation Guidelines For Values In + the Internet Protocol and Related Headers"; + } + + typedef ipv6-flow-label { + type uint32 { + range "0..1048575"; + } + description + "The ipv6-flow-label type represents the flow identifier or Flow + Label in an IPv6 packet header that may be used to + discriminate traffic flows. + + In the value set and its semantics, this type is equivalent + to the IPv6FlowLabel textual convention of the SMIv2."; + reference + "RFC 3595: Textual Conventions for IPv6 Flow Label + RFC 2460: Internet Protocol, Version 6 (IPv6) Specification"; + } + + typedef port-number { + type uint16 { + range "0..65535"; + } + description + "The port-number type represents a 16-bit port number of an + Internet transport-layer protocol such as UDP, TCP, DCCP, or + SCTP. Port numbers are assigned by IANA. A current list of + all assignments is available from . + + Note that the port number value zero is reserved by IANA. In + situations where the value zero does not make sense, it can + be excluded by subtyping the port-number type. + In the value set and its semantics, this type is equivalent + to the InetPortNumber textual convention of the SMIv2."; + reference + "RFC 768: User Datagram Protocol + RFC 793: Transmission Control Protocol + RFC 4960: Stream Control Transmission Protocol + RFC 4340: Datagram Congestion Control Protocol (DCCP) + RFC 4001: Textual Conventions for Internet Network Addresses"; + } + + /*** collection of types related to autonomous systems ***/ + + typedef as-number { + type uint32; + description + "The as-number type represents autonomous system numbers + which identify an Autonomous System (AS). An AS is a set + of routers under a single technical administration, using + an interior gateway protocol and common metrics to route + packets within the AS, and using an exterior gateway + protocol to route packets to other ASes. IANA maintains + the AS number space and has delegated large parts to the + regional registries. + + Autonomous system numbers were originally limited to 16 + bits. BGP extensions have enlarged the autonomous system + number space to 32 bits. This type therefore uses an uint32 + base type without a range restriction in order to support + a larger autonomous system number space. + + In the value set and its semantics, this type is equivalent + to the InetAutonomousSystemNumber textual convention of + the SMIv2."; + reference + "RFC 1930: Guidelines for creation, selection, and registration + of an Autonomous System (AS) + RFC 4271: A Border Gateway Protocol 4 (BGP-4) + RFC 4001: Textual Conventions for Internet Network Addresses + RFC 6793: BGP Support for Four-Octet Autonomous System (AS) + Number Space"; + } + + /*** collection of types related to IP addresses and hostnames ***/ + + typedef ip-address { + type union { + type inet:ipv4-address; + type inet:ipv6-address; + } + description + "The ip-address type represents an IP address and is IP + version neutral. The format of the textual representation + implies the IP version. This type supports scoped addresses + by allowing zone identifiers in the address format."; + reference + "RFC 4007: IPv6 Scoped Address Architecture"; + } + + typedef ipv4-address { + type string { + pattern + '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}' + + '([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])' + + '(%[\p{N}\p{L}]+)?'; + } + description + "The ipv4-address type represents an IPv4 address in + dotted-quad notation. The IPv4 address may include a zone + index, separated by a % sign. + + The zone index is used to disambiguate identical address + values. For link-local addresses, the zone index will + typically be the interface index number or the name of an + interface. If the zone index is not present, the default + zone of the device will be used. + + The canonical format for the zone index is the numerical + format"; + } + + typedef ipv6-address { + type string { + pattern '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}' + + '((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|' + + '(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\.){3}' + + '(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))' + + '(%[\p{N}\p{L}]+)?'; + pattern '(([^:]+:){6}(([^:]+:[^:]+)|(.*\..*)))|' + + '((([^:]+:)*[^:]+)?::(([^:]+:)*[^:]+)?)' + + '(%.+)?'; + } + description + "The ipv6-address type represents an IPv6 address in full, + mixed, shortened, and shortened-mixed notation. The IPv6 + address may include a zone index, separated by a % sign. + + The zone index is used to disambiguate identical address + values. For link-local addresses, the zone index will + typically be the interface index number or the name of an + interface. If the zone index is not present, the default + zone of the device will be used. + + The canonical format of IPv6 addresses uses the textual + representation defined in Section 4 of RFC 5952. The + canonical format for the zone index is the numerical + format as described in Section 11.2 of RFC 4007."; + reference + "RFC 4291: IP Version 6 Addressing Architecture + RFC 4007: IPv6 Scoped Address Architecture + RFC 5952: A Recommendation for IPv6 Address Text + Representation"; + } + + typedef ip-address-no-zone { + type union { + type inet:ipv4-address-no-zone; + type inet:ipv6-address-no-zone; + } + description + "The ip-address-no-zone type represents an IP address and is + IP version neutral. The format of the textual representation + implies the IP version. This type does not support scoped + addresses since it does not allow zone identifiers in the + address format."; + reference + "RFC 4007: IPv6 Scoped Address Architecture"; + } + + typedef ipv4-address-no-zone { + type inet:ipv4-address { + pattern '[0-9\.]*'; + } + description + "An IPv4 address without a zone index. This type, derived from + ipv4-address, may be used in situations where the zone is + known from the context and hence no zone index is needed."; + } + + typedef ipv6-address-no-zone { + type inet:ipv6-address { + pattern '[0-9a-fA-F:\.]*'; + } + description + "An IPv6 address without a zone index. This type, derived from + ipv6-address, may be used in situations where the zone is + known from the context and hence no zone index is needed."; + reference + "RFC 4291: IP Version 6 Addressing Architecture + RFC 4007: IPv6 Scoped Address Architecture + RFC 5952: A Recommendation for IPv6 Address Text + Representation"; + } + + typedef ip-prefix { + type union { + type inet:ipv4-prefix; + type inet:ipv6-prefix; + } + description + "The ip-prefix type represents an IP prefix and is IP + version neutral. The format of the textual representations + implies the IP version."; + } + + typedef ipv4-prefix { + type string { + pattern + '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}' + + '([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])' + + '/(([0-9])|([1-2][0-9])|(3[0-2]))'; + } + description + "The ipv4-prefix type represents an IPv4 address prefix. + The prefix length is given by the number following the + slash character and must be less than or equal to 32. + + A prefix length value of n corresponds to an IP address + mask that has n contiguous 1-bits from the most + significant bit (MSB) and all other bits set to 0. + + The canonical format of an IPv4 prefix has all bits of + the IPv4 address set to zero that are not part of the + IPv4 prefix."; + } + + typedef ipv6-prefix { + type string { + pattern '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}' + + '((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|' + + '(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\.){3}' + + '(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))' + + '(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'; + pattern '(([^:]+:){6}(([^:]+:[^:]+)|(.*\..*)))|' + + '((([^:]+:)*[^:]+)?::(([^:]+:)*[^:]+)?)' + + '(/.+)'; + } + + description + "The ipv6-prefix type represents an IPv6 address prefix. + The prefix length is given by the number following the + slash character and must be less than or equal to 128. + + A prefix length value of n corresponds to an IP address + mask that has n contiguous 1-bits from the most + significant bit (MSB) and all other bits set to 0. + + The IPv6 address should have all bits that do not belong + to the prefix set to zero. + + The canonical format of an IPv6 prefix has all bits of + the IPv6 address set to zero that are not part of the + IPv6 prefix. Furthermore, the IPv6 address is represented + as defined in Section 4 of RFC 5952."; + reference + "RFC 5952: A Recommendation for IPv6 Address Text + Representation"; + } + + /*** collection of domain name and URI types ***/ + + typedef domain-name { + type string { + pattern + '((([a-zA-Z0-9_]([a-zA-Z0-9\-_]){0,61})?[a-zA-Z0-9]\.)*' + + '([a-zA-Z0-9_]([a-zA-Z0-9\-_]){0,61})?[a-zA-Z0-9]\.?)' + + '|\.'; + length "1..253"; + } + description + "The domain-name type represents a DNS domain name. The + name SHOULD be fully qualified whenever possible. + + Internet domain names are only loosely specified. Section + 3.5 of RFC 1034 recommends a syntax (modified in Section + 2.1 of RFC 1123). The pattern above is intended to allow + for current practice in domain name use, and some possible + future expansion. It is designed to hold various types of + domain names, including names used for A or AAAA records + (host names) and other records, such as SRV records. Note + that Internet host names have a stricter syntax (described + in RFC 952) than the DNS recommendations in RFCs 1034 and + 1123, and that systems that want to store host names in + schema nodes using the domain-name type are recommended to + adhere to this stricter standard to ensure interoperability. + + The encoding of DNS names in the DNS protocol is limited + to 255 characters. Since the encoding consists of labels + prefixed by a length bytes and there is a trailing NULL + byte, only 253 characters can appear in the textual dotted + notation. + + The description clause of schema nodes using the domain-name + type MUST describe when and how these names are resolved to + IP addresses. Note that the resolution of a domain-name value + may require to query multiple DNS records (e.g., A for IPv4 + and AAAA for IPv6). The order of the resolution process and + which DNS record takes precedence can either be defined + explicitly or may depend on the configuration of the + resolver. + + Domain-name values use the US-ASCII encoding. Their canonical + format uses lowercase US-ASCII characters. Internationalized + domain names MUST be A-labels as per RFC 5890."; + reference + "RFC 952: DoD Internet Host Table Specification + RFC 1034: Domain Names - Concepts and Facilities + RFC 1123: Requirements for Internet Hosts -- Application + and Support + RFC 2782: A DNS RR for specifying the location of services + (DNS SRV) + RFC 5890: Internationalized Domain Names in Applications + (IDNA): Definitions and Document Framework"; + } + + typedef host { + type union { + type inet:ip-address; + type inet:domain-name; + } + description + "The host type represents either an IP address or a DNS + domain name."; + } + + typedef uri { + type string; + description + "The uri type represents a Uniform Resource Identifier + (URI) as defined by STD 66. + + Objects using the uri type MUST be in US-ASCII encoding, + and MUST be normalized as described by RFC 3986 Sections + 6.2.1, 6.2.2.1, and 6.2.2.2. All unnecessary + percent-encoding is removed, and all case-insensitive + characters are set to lowercase except for hexadecimal + digits, which are normalized to uppercase as described in + Section 6.2.2.1. + + The purpose of this normalization is to help provide + unique URIs. Note that this normalization is not + sufficient to provide uniqueness. Two URIs that are + textually distinct after this normalization may still be + equivalent. + + Objects using the uri type may restrict the schemes that + they permit. For example, 'data:' and 'urn:' schemes + might not be appropriate. + + A zero-length URI is not a valid URI. This can be used to + express 'URI absent' where required. + + In the value set and its semantics, this type is equivalent + to the Uri SMIv2 textual convention defined in RFC 5017."; + reference + "RFC 3986: Uniform Resource Identifier (URI): Generic Syntax + RFC 3305: Report from the Joint W3C/IETF URI Planning Interest + Group: Uniform Resource Identifiers (URIs), URLs, + and Uniform Resource Names (URNs): Clarifications + and Recommendations + RFC 5017: MIB Textual Conventions for Uniform Resource + Identifiers (URIs)"; + } + +} diff --git a/src/tests/tools/simap_server/simap_server/yang/ietf-network-topology.yang b/src/tests/tools/simap_server/simap_server/yang/ietf-network-topology.yang new file mode 100644 index 000000000..3b1114aa0 --- /dev/null +++ b/src/tests/tools/simap_server/simap_server/yang/ietf-network-topology.yang @@ -0,0 +1,291 @@ +module ietf-network-topology { + yang-version 1.1; + namespace "urn:ietf:params:xml:ns:yang:ietf-network-topology"; + prefix nt; + + import ietf-inet-types { + prefix inet; + reference + "RFC 6991: Common YANG Data Types"; + } + import ietf-network { + prefix nw; + reference + "RFC 8345: A YANG Data Model for Network Topologies"; + } + + organization + "IETF I2RS (Interface to the Routing System) Working Group"; + + contact + "WG Web: + WG List: + + Editor: Alexander Clemm + + + Editor: Jan Medved + + + Editor: Robert Varga + + + Editor: Nitin Bahadur + + + Editor: Hariharan Ananthakrishnan + + + Editor: Xufeng Liu + "; + + description + "This module defines a common base model for a network topology, + augmenting the base network data model with links to connect + nodes, as well as termination points to terminate links + on nodes. + + Copyright (c) 2018 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC 8345; + see the RFC itself for full legal notices."; + + revision 2018-02-26 { + description + "Initial revision."; + reference + "RFC 8345: A YANG Data Model for Network Topologies"; + } + + typedef link-id { + type inet:uri; + description + "An identifier for a link in a topology. The precise + structure of the link-id will be up to the implementation. + The identifier SHOULD be chosen such that the same link in a + real network topology will always be identified through the + same identifier, even if the data model is instantiated in + separate datastores. An implementation MAY choose to capture + semantics in the identifier -- for example, to indicate the + type of link and/or the type of topology of which the link is + a part."; + } + + typedef tp-id { + type inet:uri; + description + "An identifier for termination points on a node. The precise + structure of the tp-id will be up to the implementation. + The identifier SHOULD be chosen such that the same termination + point in a real network topology will always be identified + through the same identifier, even if the data model is + instantiated in separate datastores. An implementation MAY + choose to capture semantics in the identifier -- for example, + to indicate the type of termination point and/or the type of + node that contains the termination point."; + } + + grouping link-ref { + description + "This grouping can be used to reference a link in a specific + network. Although it is not used in this module, it is + defined here for the convenience of augmenting modules."; + leaf link-ref { + type leafref { + path "/nw:networks/nw:network[nw:network-id=current()/../"+ + "network-ref]/nt:link/nt:link-id"; + require-instance false; + } + description + "A type for an absolute reference to a link instance. + (This type should not be used for relative references. + In such a case, a relative path should be used instead.)"; + } + uses nw:network-ref; + } + + grouping tp-ref { + description + "This grouping can be used to reference a termination point + in a specific node. Although it is not used in this module, + it is defined here for the convenience of augmenting + modules."; + leaf tp-ref { + type leafref { + path "/nw:networks/nw:network[nw:network-id=current()/../"+ + "network-ref]/nw:node[nw:node-id=current()/../"+ + "node-ref]/nt:termination-point/nt:tp-id"; + require-instance false; + } + description + "A type for an absolute reference to a termination point. + (This type should not be used for relative references. + In such a case, a relative path should be used instead.)"; + } + uses nw:node-ref; + } + + augment "/nw:networks/nw:network" { + description + "Add links to the network data model."; + list link { + key "link-id"; + description + "A network link connects a local (source) node and + a remote (destination) node via a set of the respective + node's termination points. It is possible to have several + links between the same source and destination nodes. + Likewise, a link could potentially be re-homed between + termination points. Therefore, in order to ensure that we + would always know to distinguish between links, every link + is identified by a dedicated link identifier. Note that a + link models a point-to-point link, not a multipoint link."; + leaf link-id { + type link-id; + description + "The identifier of a link in the topology. + A link is specific to a topology to which it belongs."; + } + container source { + description + "This container holds the logical source of a particular + link."; + leaf source-node { + type leafref { + path "../../../nw:node/nw:node-id"; + require-instance false; + } + description + "Source node identifier. Must be in the same topology."; + } + leaf source-tp { + type leafref { + path "../../../nw:node[nw:node-id=current()/../"+ + "source-node]/termination-point/tp-id"; + require-instance false; + } + description + "This termination point is located within the source node + and terminates the link."; + } + } + container destination { + description + "This container holds the logical destination of a + particular link."; + leaf dest-node { + type leafref { + path "../../../nw:node/nw:node-id"; + require-instance false; + } + description + "Destination node identifier. Must be in the same + network."; + } + leaf dest-tp { + type leafref { + path "../../../nw:node[nw:node-id=current()/../"+ + "dest-node]/termination-point/tp-id"; + require-instance false; + } + description + "This termination point is located within the + destination node and terminates the link."; + } + } + list supporting-link { + key "network-ref link-ref"; + description + "Identifies the link or links on which this link depends."; + leaf network-ref { + type leafref { + path "../../../nw:supporting-network/nw:network-ref"; + require-instance false; + } + description + "This leaf identifies in which underlay topology + the supporting link is present."; + } + leaf link-ref { + type leafref { + path "/nw:networks/nw:network[nw:network-id=current()/"+ + "../network-ref]/link/link-id"; + require-instance false; + } + description + "This leaf identifies a link that is a part + of this link's underlay. Reference loops in which + a link identifies itself as its underlay, either + directly or transitively, are not allowed."; + } + } + } + } + augment "/nw:networks/nw:network/nw:node" { + description + "Augments termination points that terminate links. + Termination points can ultimately be mapped to interfaces."; + list termination-point { + key "tp-id"; + description + "A termination point can terminate a link. + Depending on the type of topology, a termination point + could, for example, refer to a port or an interface."; + leaf tp-id { + type tp-id; + description + "Termination point identifier."; + } + list supporting-termination-point { + key "network-ref node-ref tp-ref"; + description + "This list identifies any termination points on which a + given termination point depends or onto which it maps. + Those termination points will themselves be contained + in a supporting node. This dependency information can be + inferred from the dependencies between links. Therefore, + this item is not separately configurable. Hence, no + corresponding constraint needs to be articulated. + The corresponding information is simply provided by the + implementing system."; + leaf network-ref { + type leafref { + path "../../../nw:supporting-node/nw:network-ref"; + require-instance false; + } + description + "This leaf identifies in which topology the + supporting termination point is present."; + } + leaf node-ref { + type leafref { + path "../../../nw:supporting-node/nw:node-ref"; + require-instance false; + } + description + "This leaf identifies in which node the supporting + termination point is present."; + } + leaf tp-ref { + type leafref { + path "/nw:networks/nw:network[nw:network-id=current()/"+ + "../network-ref]/nw:node[nw:node-id=current()/../"+ + "node-ref]/termination-point/tp-id"; + require-instance false; + } + description + "Reference to the underlay node (the underlay node must + be in a different topology)."; + } + } + } + } +} diff --git a/src/tests/tools/simap_server/simap_server/yang/ietf-network.yang b/src/tests/tools/simap_server/simap_server/yang/ietf-network.yang new file mode 100644 index 000000000..960973401 --- /dev/null +++ b/src/tests/tools/simap_server/simap_server/yang/ietf-network.yang @@ -0,0 +1,193 @@ +module ietf-network { + yang-version 1.1; + namespace "urn:ietf:params:xml:ns:yang:ietf-network"; + prefix nw; + + import ietf-inet-types { + prefix inet; + reference + "RFC 6991: Common YANG Data Types"; + } + + organization + "IETF I2RS (Interface to the Routing System) Working Group"; + + contact + "WG Web: + WG List: + + Editor: Alexander Clemm + + + Editor: Jan Medved + + + Editor: Robert Varga + + + Editor: Nitin Bahadur + + + Editor: Hariharan Ananthakrishnan + + + Editor: Xufeng Liu + "; + + description + "This module defines a common base data model for a collection + of nodes in a network. Node definitions are further used + in network topologies and inventories. + + Copyright (c) 2018 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC 8345; + see the RFC itself for full legal notices."; + + revision 2018-02-26 { + description + "Initial revision."; + reference + "RFC 8345: A YANG Data Model for Network Topologies"; + } + + typedef node-id { + type inet:uri; + description + "Identifier for a node. The precise structure of the node-id + will be up to the implementation. For example, some + implementations MAY pick a URI that includes the network-id + as part of the path. The identifier SHOULD be chosen + such that the same node in a real network topology will + always be identified through the same identifier, even if + the data model is instantiated in separate datastores. An + implementation MAY choose to capture semantics in the + identifier -- for example, to indicate the type of node."; + } + + typedef network-id { + type inet:uri; + description + "Identifier for a network. The precise structure of the + network-id will be up to the implementation. The identifier + SHOULD be chosen such that the same network will always be + identified through the same identifier, even if the data model + is instantiated in separate datastores. An implementation MAY + choose to capture semantics in the identifier -- for example, + to indicate the type of network."; + } + + grouping network-ref { + description + "Contains the information necessary to reference a network -- + for example, an underlay network."; + leaf network-ref { + type leafref { + path "/nw:networks/nw:network/nw:network-id"; + require-instance false; + } + description + "Used to reference a network -- for example, an underlay + network."; + } + } + + grouping node-ref { + description + "Contains the information necessary to reference a node."; + leaf node-ref { + type leafref { + path "/nw:networks/nw:network[nw:network-id=current()/../"+ + "network-ref]/nw:node/nw:node-id"; + require-instance false; + } + description + "Used to reference a node. + Nodes are identified relative to the network that + contains them."; + } + uses network-ref; + } + + container networks { + description + "Serves as a top-level container for a list of networks."; + list network { + key "network-id"; + description + "Describes a network. + A network typically contains an inventory of nodes, + topological information (augmented through the + network-topology data model), and layering information."; + leaf network-id { + type network-id; + description + "Identifies a network."; + } + container network-types { + description + "Serves as an augmentation target. + The network type is indicated through corresponding + presence containers augmented into this container."; + } + list supporting-network { + key "network-ref"; + description + "An underlay network, used to represent layered network + topologies."; + leaf network-ref { + type leafref { + path "/nw:networks/nw:network/nw:network-id"; + require-instance false; + } + description + "References the underlay network."; + } + } + + list node { + key "node-id"; + description + "The inventory of nodes of this network."; + leaf node-id { + type node-id; + description + "Uniquely identifies a node within the containing + network."; + } + list supporting-node { + key "network-ref node-ref"; + description + "Represents another node that is in an underlay network + and that supports this node. Used to represent layering + structure."; + leaf network-ref { + type leafref { + path "../../../nw:supporting-network/nw:network-ref"; + require-instance false; + } + description + "References the underlay network of which the + underlay node is a part."; + } + leaf node-ref { + type leafref { + path "/nw:networks/nw:network/nw:node/nw:node-id"; + require-instance false; + } + description + "References the underlay node itself."; + } + } + } + } + } +} diff --git a/src/tests/tools/simap_server/simap_server/yang/simap-telemetry.yang b/src/tests/tools/simap_server/simap_server/yang/simap-telemetry.yang new file mode 100644 index 000000000..7ce09b13d --- /dev/null +++ b/src/tests/tools/simap_server/simap_server/yang/simap-telemetry.yang @@ -0,0 +1,81 @@ +module simap-telemetry { + yang-version 1.1; + namespace "urn:simap:telemetry"; + prefix simap; + + import ietf-network { + prefix nw; + reference "RFC 8345"; + } + import ietf-network-topology { + prefix nt; + reference "RFC 8345"; + } + + organization + "SIMAP (example)"; + contact + "ops@simap.example"; + description + "Augments RFC 8345 network and topology objects with simple telemetry."; + + revision "2025-07-24" { + description "Initial revision."; + } + + /* --- Local typedefs --- */ + typedef percent { + type decimal64 { + fraction-digits 2; + range "0 .. 100"; + } + units "percent"; + description "0–100 percent value."; + } + + typedef milliseconds { + type decimal64 { + fraction-digits 3; + } + units "milliseconds"; + description "Latency expressed in milliseconds."; + } + + /* --- Augment link --- */ + augment "/nw:networks/nw:network/nt:link" { + description + "Add telemetry to links."; + container simap-telemetry { + description "SIMAP link telemetry."; + leaf bandwidth-utilization { + type percent; + description "Current bandwidth utilization."; + } + leaf latency { + type milliseconds; + description "One-way latency over the link."; + } + leaf-list related-service-ids { + type string; + description "Service identifiers associated with this link."; + } + } + } + + /* --- Augment node --- */ + augment "/nw:networks/nw:network/nw:node" { + description + "Add telemetry to nodes."; + container simap-telemetry { + description "SIMAP node telemetry."; + leaf cpu-utilization { + type percent; + description "Node CPU utilization."; + } + leaf-list related-service-ids { + type string; + description "Service identifiers associated with this node."; + } + } + } +} diff --git a/src/tests/tools/simap_server/simap_server/yang/simap.txt b/src/tests/tools/simap_server/simap_server/yang/simap.txt new file mode 100644 index 000000000..ca35944de --- /dev/null +++ b/src/tests/tools/simap_server/simap_server/yang/simap.txt @@ -0,0 +1,10 @@ +module: simap-telemetry-augmentation + augment /nw:networks/nw:network/nw:link: + +--rw simap-telemetry + +--rw bandwidth-utilization? decimal64 (percent) + +--rw latency? decimal64 (milliseconds) + +--rw related-service-ids* string + augment /nw:networks/nw:network/nw:node: + +--rw simap-telemetry + +--rw cpu-utilization? decimal64 (percent) + +--rw related-service-ids* string \ No newline at end of file diff --git a/src/tests/tools/simap_server/tests.sh b/src/tests/tools/simap_server/tests.sh new file mode 100755 index 000000000..de0eeef50 --- /dev/null +++ b/src/tests/tools/simap_server/tests.sh @@ -0,0 +1,66 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +echo "Read data on startup..." +curl http://127.0.0.1:8080/restconf/data/ietf-network-topology + + +curl http://127.0.0.1:8080/restconf/data/simap-telemetry + +echo +echo + +echo "Initializing networks..." +curl -X POST -d '{"networks":{"network":[]}}' http://127.0.0.1:8080/restconf/data/simap-telemetry: +# or +curl -X POST -d '{"networks":{}}' http://127.0.0.1:8080/restconf/data/ietf-network-topology: +curl -X PUT -d '{"networks":{"network":[]}}' http://127.0.0.1:8080/restconf/data/ietf-network:networks +echo +echo + +echo "Adding a network..." +curl -X POST -d '{"network":[{"network-id":"simap-1", "node": []}]}' http://127.0.0.1:8080/restconf/data/simap-telemetry:networks/network +echo + +echo "Adding a node..." +curl -X POST -d '{"node":[{"node-id":"r1", "termination-point":[]}]}' http://127.0.0.1:8080/restconf/data/simap-telemetry:networks/network=simap-1/node +echo + +curl -X POST -d '{"termination-point":[{"tp-id":"201"}]}' http://127.0.0.1:8080/restconf/data/ietf-network:networks/network=simap-1/node=r1/termination-point + + +curl -X POST -d '{"ietf-network:network": {"network-id":"simap-1"}}' http://127.0.0.1:8080/restconf/data/ietf-network:networks/network + + +curl -X POST -d '{"node":[{"node-id":"r2", "simap:simap-telemetry":{}}]}' http://127.0.0.1:8080/restconf/data/simap-telemetry:networks/network=simap-1/node + +curl -X POST -d '{"simap-telemetry":{"cpu-utilization": 98.3}}' http://127.0.0.1:8080/restconf/data/simap-telemetry:networks/network=simap-1/node=r1/simap-telemetry + + + +echo "Read data after update 1..." +curl http://127.0.0.1:8080/restconf/data/ietf-network-topology +echo +echo + +echo "Updating location (from path)..." +curl -X PATCH -d '{"qkdn_location_id":"new-loc-2"}' http://127.0.0.1:8080/restconf/data/ietf-network-topology:qkd_node +echo + +echo "Read final value..." +curl http://127.0.0.1:8080/restconf/data/ietf-network-topology +echo +echo diff --git a/src/tests/tools/simap_server/tests/Dispatch_old.py b/src/tests/tools/simap_server/tests/Dispatch_old.py new file mode 100644 index 000000000..80876e3b2 --- /dev/null +++ b/src/tests/tools/simap_server/tests/Dispatch_old.py @@ -0,0 +1,261 @@ +import json, libyang, logging +from typing import Any, Dict +from flask import Response, abort, jsonify, request +from flask_restful import Resource +from simap_server.YangHandler import YangHandler +from ..resources.HttpStatusCodesEnum import HttpStatusCodesEnum + +LOGGER = logging.getLogger(__name__) + +def get_key_path(key_str): + ''' Support composite keys separated by commas ''' + return tuple(key_str.split(',')) if ',' in key_str else key_str + +class RestConfDispatch(Resource): + def __init__(self, datastore : Any, yang_handler : YangHandler) -> None: + super().__init__() + self._datastore = datastore + self._yang_handler = yang_handler + + def get(self, subpath : str = '') -> Response: + data = self._yang_handler.get(subpath) + if data is None: abort(HttpStatusCodesEnum.CLI_ERR_NOT_FOUND, description='Not Found') + response = jsonify(data) + response.status_code = HttpStatusCodesEnum.SUCCESS_OK + return response + +# LOGGER.info('[get] subpath={:s}'.format(str(subpath))) +# if len(subpath) == 0: abort(HttpStatusCodesEnum.SVR_ERR_NOT_IMPLEMENTED) +# +# subpath_parts = subpath.split('/') +# root_node_path = '/{:s}'.format(str(subpath_parts[0])) +# root_node : libyang.DContainer = self._yang_handler.get_data_path(root_node_path) +# LOGGER.info('[get] root_node={:s}'.format(str(root_node.print_mem('json')))) +# +# yang_obj = root_node.find_path(subpath) +# if yang_obj is None: +# LOGGER.exception('Not Found') +# abort(HttpStatusCodesEnum.CLI_ERR_NOT_FOUND, description='Not Found') +# +# json_data = json.loads(str(yang_obj.print_mem('json'))) +# LOGGER.info('json_data={:s}'.format(str(json_data))) +# #yang_obj.unlink() +# #yang_obj.free() +# +# response = jsonify(json_data) +# response.status_code = HttpStatusCodesEnum.SUCCESS_OK +# return response + + +# #str_data = yang_if.print_mem('json') +# #json_data = json.loads(str_data) +# #json_data = json_data['openconfig-interfaces:interface'][0] +# #str_data = json.dumps(json_data, indent=4) +# #LOGGER.info('Resulting Request (after unlink): {:s}'.format(str_data)) +# +# module_paths = self._yang_handler.get_module_paths() +# +# +# LOGGER.info('parts={:s}'.format(str(parts))) +# +# # Traverse to the last element +# current : Dict[str, Any] = self._datastore +# for part in parts: +# element_type = module_paths.get(part) +# #if isinstance(element_type, ) +# current : Dict[str, Any] = current.setdefault(part, {}) +# +# LOGGER.info('datastore={:s}'.format(str(self._datastore))) +# +# leaf = parts[-2] +# leaf_or_key = parts[-1] # key for list or value for leaf-list +# key = get_key_path(leaf_or_key) +# +# if key is None: +# if leaf not in current: +# LOGGER.exception('Not Found') +# abort(HttpStatusCodesEnum.CLI_ERR_NOT_FOUND, description='Not Found') +# item = current[leaf] +# else: +# item = current.get(leaf, {}).get(key) +# if item is None: +# LOGGER.exception('Not Found') +# abort(HttpStatusCodesEnum.CLI_ERR_NOT_FOUND, description='Not Found') +# +# response = jsonify(item) +# response.status_code = HttpStatusCodesEnum.SUCCESS_OK +# return response + + def post(self, subpath : str) -> Response: + try: + payload = json.dumps(request.get_json(force=True)) + except Exception: + LOGGER.exception('Invalid JSON') + abort(HttpStatusCodesEnum.CLI_ERR_BAD_REQUEST) + + created = self._yang_handler.create(subpath, payload) + if not created: abort(HttpStatusCodesEnum.CLI_ERR_BAD_REQUEST) + response = jsonify({'status': 'created'}) + response.status_code = HttpStatusCodesEnum.SUCCESS_CREATED + return response + + +# parts = subpath.strip('/').split('/') +# +# # Traverse to the last container or list +# current = self._datastore +# for part in parts[:-2]: +# current = current.setdefault(part, {}) +# +# leaf = parts[-2] +# leaf_or_key = parts[-1] # key for list or value for leaf-list +# key = get_key_path(leaf_or_key) +# if key is None: +# LOGGER.exception('Missing key') +# abort(HttpStatusCodesEnum.CLI_ERR_CONFLICT, description='Missing key') +# if key in current.get(leaf, {}): +# LOGGER.exception('Key already exists') +# abort(HttpStatusCodesEnum.CLI_ERR_CONFLICT, description='Key already exists') +# +# +# try: +# json_data = json.dumps({leaf: [payload]} if isinstance(key, tuple) else {leaf: payload}) +# self._yang_handler._yang_context.parse_data_mem(json_data, format='json', config=True, trusted=True) +# +# if key is None: +# if leaf not in current: +# current[leaf] = payload +# else: +# current.setdefault(leaf, {}) +# if key not in current[leaf]: +# current[leaf][key] = payload +# +# response = jsonify({'status': 'validated'}) +# response.status_code = HttpStatusCodesEnum.SUCCESS_CREATED +# return response +# except Exception as e: +# LOGGER.exception('YANG validation failed') +# abort(HttpStatusCodesEnum.CLI_ERR_BAD_REQUEST, description=str(e)) + + def put(self, subpath : str) -> Response: + parts = subpath.strip('/').split('/') + + # Traverse to the last container or list + current = self._datastore + for part in parts[:-2]: + current = current.setdefault(part, {}) + + leaf = parts[-2] + leaf_or_key = parts[-1] # key for list or value for leaf-list + key = get_key_path(leaf_or_key) + + try: + payload = request.get_json(force=True) + except Exception: + LOGGER.exception('Invalid JSON') + abort(HttpStatusCodesEnum.CLI_ERR_BAD_REQUEST, description='Invalid JSON') + + try: + json_data = json.dumps({leaf: [payload]} if isinstance(key, tuple) else {leaf: payload}) + self._yang_handler._yang_context.parse_data_mem(json_data, format='json', config=True, trusted=True) + + if key is None: + current[leaf] = payload + else: + current.setdefault(leaf, {}) + current[leaf][key] = payload + + response = jsonify({'status': 'validated'}) + response.status_code = HttpStatusCodesEnum.SUCCESS_CREATED + return response + except Exception as e: + LOGGER.exception('YANG validation failed') + abort(HttpStatusCodesEnum.CLI_ERR_BAD_REQUEST, description=str(e)) + + def patch(self, subpath : str) -> Response: + parts = subpath.strip('/').split('/') + + # Traverse to the last container or list + current = self._datastore + for part in parts[:-2]: + current = current.setdefault(part, {}) + + leaf = parts[-2] + leaf_or_key = parts[-1] # key for list or value for leaf-list + key = get_key_path(leaf_or_key) + + try: + payload = request.get_json(force=True) + except Exception: + LOGGER.exception('Invalid JSON') + abort(HttpStatusCodesEnum.CLI_ERR_BAD_REQUEST, description='Invalid JSON') + + try: + json_data = json.dumps({leaf: [payload]} if isinstance(key, tuple) else {leaf: payload}) + self._yang_handler._yang_context.parse_data_mem(json_data, format='json', config=True, trusted=True) + + if key is None: + if leaf not in current: + LOGGER.exception('Not Found') + abort(HttpStatusCodesEnum.CLI_ERR_NOT_FOUND, description='Not Found') + current[leaf].update(payload) + else: + if key not in current.get(leaf, {}): + LOGGER.exception('Not Found') + abort(HttpStatusCodesEnum.CLI_ERR_NOT_FOUND, description='Not Found') + current[leaf][key].update(payload) + + response = jsonify({'status': 'patched'}) + response.status_code = HttpStatusCodesEnum.SUCCESS_OK + return response + except Exception as e: + LOGGER.exception('YANG validation failed') + abort(HttpStatusCodesEnum.CLI_ERR_BAD_REQUEST, description=str(e)) + + def delete(self, subpath : str) -> Response: + parts = subpath.strip('/').split('/') + + # Traverse to the last container or list + current = self._datastore + for part in parts[:-2]: + current = current.setdefault(part, {}) + + leaf = parts[-2] + leaf_or_key = parts[-1] # key for list or value for leaf-list + key = None + leaf_list_item = None + + # Determine if it's a leaf-list or a list key + if leaf in current and isinstance(current[leaf], list): + # It's a leaf-list + leaf_list_item = leaf_or_key + else: + key = get_key_path(leaf_or_key) + + if leaf not in current: + LOGGER.exception('Not Found') + abort(HttpStatusCodesEnum.CLI_ERR_NOT_FOUND, description='Not Found') + + if key is not None: + if key in current[leaf]: + del current[leaf][key] + response = jsonify({}) + response.status_code = HttpStatusCodesEnum.SUCCESS_NO_CONTENT + return response + else: + LOGGER.exception('Not Found') + abort(HttpStatusCodesEnum.CLI_ERR_NOT_FOUND, description='Not Found') + elif leaf_list_item is not None: + try: + current[leaf].remove(leaf_list_item) + response = jsonify({}) + response.status_code = HttpStatusCodesEnum.SUCCESS_NO_CONTENT + return response + except ValueError: + LOGGER.exception('Not Found') + abort(HttpStatusCodesEnum.CLI_ERR_NOT_FOUND, description='Not Found') + else: + del current[leaf] + response = jsonify({}) + response.status_code = HttpStatusCodesEnum.SUCCESS_NO_CONTENT + return response diff --git a/src/tests/tools/simap_server/tests/create_get_object.py b/src/tests/tools/simap_server/tests/create_get_object.py new file mode 100644 index 000000000..6cb0b735f --- /dev/null +++ b/src/tests/tools/simap_server/tests/create_get_object.py @@ -0,0 +1,59 @@ +import json, libyang, logging + +YANG_SEARCH_PATH = './yang' +YANG_MODULES = [ + 'ietf-inet-types', + 'simap-telemetry', + 'ietf-network-topology', + 'ietf-network', +] + +logging.basicConfig(level=logging.INFO) +LOGGER = logging.getLogger(__name__) + +def main() -> None: + yang_context = libyang.Context(YANG_SEARCH_PATH) + for yang_module_name in YANG_MODULES: + LOGGER.info('Loading module: {:s}'.format(str(yang_module_name))) + yang_module = yang_context.load_module(yang_module_name) + yang_module.feature_enable_all() + + startup = {} + datastore = yang_context.parse_data_mem(json.dumps(startup), fmt='json') + + path = '/ietf-network:networks' + result = datastore.find_path(path) + json_result = result.print_mem(fmt='json', with_siblings=True, pretty=True, keep_empty_containers=True, include_implicit_defaults=True) + LOGGER.info('{:s} => {:s}'.format(path, str(json_result))) + + path = '/ietf-network:networks' + payload = json.dumps({"networks":{"network":[]}}) + yang_context.create_data_path( + path, parent=datastore, value=json.dumps(payload), update=False + ) + + path = '/ietf-network:networks' + result = datastore.find_path(path) + json_result = result.print_mem(fmt='json', with_siblings=True, pretty=True, keep_empty_containers=True, include_implicit_defaults=True) + LOGGER.info('{:s} => {:s}'.format(path, str(json_result))) + + path = '/ietf-network:networks/network[network-id="simap-1"]' + payload = json.dumps({"network":[{"network-id":"simap-1"}]}) + yang_context.create_data_path( + path, parent=datastore, value=json.dumps(payload), update=False + ) + + path = '/ietf-network:networks' + result = datastore.find_path(path) + json_result = result.print_mem(fmt='json', with_siblings=True, pretty=True, keep_empty_containers=True, include_implicit_defaults=True) + LOGGER.info('{:s} => {:s}'.format(path, str(json_result))) + + path = '/ietf-network:networks/network[network-id="simap-1"]' + result = datastore.find_path(path) + json_result = result.print_mem(fmt='json', with_siblings=True, pretty=True, keep_empty_containers=True, include_implicit_defaults=True) + LOGGER.info('{:s} => {:s}'.format(path, str(json_result))) + + yang_context.destroy() + +if __name__ == '__main__': + main() diff --git a/src/tests/tools/simap_server/tests/libyang_examples.py b/src/tests/tools/simap_server/tests/libyang_examples.py new file mode 100644 index 000000000..0a6eb9ccf --- /dev/null +++ b/src/tests/tools/simap_server/tests/libyang_examples.py @@ -0,0 +1,162 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, libyang, logging, os +from typing import Dict + +logging.basicConfig(level=logging.DEBUG) +LOGGER = logging.getLogger(__name__) + +YANG_BASE_PATH = '/home/tfs/tfs-ctrl/src/device/service/drivers/gnmi_openconfig/git/openconfig/public' +YANG_SEARCH_PATHS = ':'.join([ + os.path.join(YANG_BASE_PATH, 'release'), + os.path.join(YANG_BASE_PATH, 'third_party'), +]) + +YANG_MODULES = [ + 'iana-if-type', + 'openconfig-bgp-types', + 'openconfig-vlan-types', + + 'openconfig-interfaces', + 'openconfig-if-8021x', + 'openconfig-if-aggregate', + 'openconfig-if-ethernet-ext', + 'openconfig-if-ethernet', + 'openconfig-if-ip-ext', + 'openconfig-if-ip', + 'openconfig-if-poe', + 'openconfig-if-sdn-ext', + 'openconfig-if-tunnel', + + 'openconfig-vlan', + + 'openconfig-types', + 'openconfig-policy-types', + 'openconfig-mpls-types', + 'openconfig-network-instance-types', + 'openconfig-network-instance', + + 'openconfig-platform', + 'openconfig-platform-controller-card', + 'openconfig-platform-cpu', + 'openconfig-platform-ext', + 'openconfig-platform-fabric', + 'openconfig-platform-fan', + 'openconfig-platform-integrated-circuit', + 'openconfig-platform-linecard', + 'openconfig-platform-pipeline-counters', + 'openconfig-platform-port', + 'openconfig-platform-psu', + 'openconfig-platform-software', + 'openconfig-platform-transceiver', + 'openconfig-platform-types', +] + +class YangHandler: + def __init__(self) -> None: + self._yang_context = libyang.Context(YANG_SEARCH_PATHS) + self._loaded_modules = set() + for yang_module_name in YANG_MODULES: + LOGGER.info('Loading module: {:s}'.format(str(yang_module_name))) + self._yang_context.load_module(yang_module_name).feature_enable_all() + self._loaded_modules.add(yang_module_name) + self._data_path_instances = dict() + + def get_data_paths(self) -> Dict[str, libyang.DNode]: + return self._data_path_instances + + def get_data_path(self, path : str) -> libyang.DNode: + data_path_instance = self._data_path_instances.get(path) + if data_path_instance is None: + data_path_instance = self._yang_context.create_data_path(path) + self._data_path_instances[path] = data_path_instance + return data_path_instance + + def destroy(self) -> None: + self._yang_context.destroy() + +def main(): + yang_handler = YangHandler() + + LOGGER.info('YangHandler Data (before):') + for path, dnode in yang_handler.get_data_paths().items(): + LOGGER.info('|-> {:s}: {:s}'.format(str(path), json.dumps(dnode.print_dict()))) + + if_name = 'eth1' + sif_index = 0 + enabled = True + address_ip = '172.16.0.1' + address_ip2 = '192.168.0.1' + address_prefix = 24 + mtu = 1500 + + yang_ifs : libyang.DContainer = yang_handler.get_data_path('/openconfig-interfaces:interfaces') + yang_if_path = 'interface[name="{:s}"]'.format(if_name) + yang_if : libyang.DContainer = yang_ifs.create_path(yang_if_path) + yang_if.create_path('config/name', if_name) + yang_if.create_path('config/enabled', enabled) + yang_if.create_path('config/mtu', mtu ) + + yang_sifs : libyang.DContainer = yang_if.create_path('subinterfaces') + yang_sif_path = 'subinterface[index="{:d}"]'.format(sif_index) + yang_sif : libyang.DContainer = yang_sifs.create_path(yang_sif_path) + yang_sif.create_path('config/index', sif_index) + yang_sif.create_path('config/enabled', enabled ) + + yang_ipv4 : libyang.DContainer = yang_sif.create_path('openconfig-if-ip:ipv4') + yang_ipv4.create_path('config/enabled', enabled) + + yang_ipv4_addrs : libyang.DContainer = yang_ipv4.create_path('addresses') + yang_ipv4_addr_path = 'address[ip="{:s}"]'.format(address_ip) + yang_ipv4_addr : libyang.DContainer = yang_ipv4_addrs.create_path(yang_ipv4_addr_path) + yang_ipv4_addr.create_path('config/ip', address_ip ) + yang_ipv4_addr.create_path('config/prefix-length', address_prefix) + + yang_ipv4_addr_path2 = 'address[ip="{:s}"]'.format(address_ip2) + yang_ipv4_addr2 : libyang.DContainer = yang_ipv4_addrs.create_path(yang_ipv4_addr_path2) + yang_ipv4_addr2.create_path('config/ip', address_ip2 ) + yang_ipv4_addr2.create_path('config/prefix-length', address_prefix) + + str_data = yang_if.print_mem('json') + json_data = json.loads(str_data) + json_data = json_data['openconfig-interfaces:interface'][0] + str_data = json.dumps(json_data, indent=4) + LOGGER.info('Resulting Request (before unlink): {:s}'.format(str_data)) + + yang_ipv4_addr2.unlink() + + root_node : libyang.DContainer = yang_handler.get_data_path('/openconfig-interfaces:interfaces') + LOGGER.info('root_node={:s}'.format(str(root_node.print_mem('json')))) + + for s in root_node.siblings(): + LOGGER.info('sibling: {:s}'.format(str(s))) + + PATH_TMPL = '/openconfig-interfaces:interfaces/interface[name="{:s}"]/subinterfaces/subinterface[index="{:d}"]' + yang_sif = root_node.find_path(PATH_TMPL.format(if_name, sif_index)) + if yang_sif is not None: + LOGGER.info('yang_sif={:s}'.format(str(yang_sif.print_mem('json')))) + yang_sif.unlink() + yang_sif.free() + + str_data = yang_if.print_mem('json') + json_data = json.loads(str_data) + json_data = json_data['openconfig-interfaces:interface'][0] + str_data = json.dumps(json_data, indent=4) + LOGGER.info('Resulting Request (after unlink): {:s}'.format(str_data)) + + yang_handler.destroy() + +if __name__ == '__main__': + main() diff --git a/src/tests/tools/simap_server/tests/test_path_to_json.py b/src/tests/tools/simap_server/tests/test_path_to_json.py new file mode 100644 index 000000000..b0d44644c --- /dev/null +++ b/src/tests/tools/simap_server/tests/test_path_to_json.py @@ -0,0 +1,23 @@ +import re + +path = '/ietf-network:networks/network[network-id="simap1"]/node[node-id="n1"]' +payload = {'ietf-network:node': {'node-id': 'n1', 'ietf-network-topology:termination-point': [{'tp-id': '201'}]}} + +if not path.startswith('/'): + raise ValueError('Path must start with "/"') + +for elem in path.strip('/').split('/'): + match = re.match(r"(?P[^\[]+)(?P(\[[^\]]+\])*)", elem) + if not match: + raise ValueError(f'Invalid path segment: {elem}') + + tag = match.group('name') + predicates_raw = match.group('predicates') + + if + + predicates = dict() + for pred in re.findall(r"\[([^\]=]+)='([^']+)'\]", predicates_raw): + predicates[pred[0]] = pred[1] + + if len(predicates) > 0: diff --git a/src/tests/tools/simap_server/tests/tests.sh b/src/tests/tools/simap_server/tests/tests.sh new file mode 100755 index 000000000..5dbe1877a --- /dev/null +++ b/src/tests/tools/simap_server/tests/tests.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +curl http://127.0.0.1:8080/restconf/data/ietf-network:networks + +curl -X POST -d '{"networks":{"network":[]}}' http://127.0.0.1:8080/restconf/data/ietf-network:networks +curl -X POST -d '{"network":[{"network-id":"simap-1"}]}' http://127.0.0.1:8080/restconf/data/ietf-network:networks/network +curl -X POST -d '{"network":[{"network-id":"simap-1"}]}' 'http://127.0.0.1:8080/restconf/data/ietf-network:networks/network\[network-id="simap-1"\]' +curl -X POST -d '{"node":[{"node-id":"r1"}]}' 'http://127.0.0.1:8080/restconf/data/ietf-network:networks/network\[network-id="simap-1"\]/node\[node-id="r1"\]' + + + +curl -X POST -d '{"termination-point":[{"tp-id":"201"}]}' http://127.0.0.1:8080/restconf/data/ietf-network:networks/network=simap-1/node=r1/termination-point +curl -X POST -d '{"ietf-network:network": {"network-id":"simap-1"}}' http://127.0.0.1:8080/restconf/data/ietf-network:networks/network +curl -X POST -d '{"node":[{"node-id":"r2", "simap:simap-telemetry":{}}]}' http://127.0.0.1:8080/restconf/data/simap-telemetry:networks/network=simap-1/node +curl -X POST -d '{"simap-telemetry":{"cpu-utilization": 98.3}}' http://127.0.0.1:8080/restconf/data/simap-telemetry:networks/network=simap-1/node=r1/simap-telemetry diff --git a/src/tests/tools/simap_server/tests/walk_module.py b/src/tests/tools/simap_server/tests/walk_module.py new file mode 100644 index 000000000..d373b7394 --- /dev/null +++ b/src/tests/tools/simap_server/tests/walk_module.py @@ -0,0 +1,45 @@ +import libyang, logging +from typing import List + +YANG_SEARCH_PATH = './yang' +YANG_MODULES = [ + 'ietf-inet-types', + 'simap-telemetry', + 'ietf-network-topology', + 'ietf-network', +] + +logging.basicConfig(level=logging.INFO) +LOGGER = logging.getLogger(__name__) + +def walk_schema(node : libyang.SNode, path : str = ''): + paths = [] + current_path = f'{path}/{node.name()}' + paths.append(current_path) + for child in node.children(): + if isinstance(child, (libyang.SLeaf, libyang.SLeafList)): continue + paths.extend(walk_schema(child, current_path)) + return paths + +def extract_schema_paths(yang_module : libyang.Module) -> List[str]: + schema_paths = list() + for node in yang_module.children(): + schema_paths.extend(walk_schema(node)) + return schema_paths + +def main() -> None: + restconf_paths = list() + + yang_context = libyang.Context(YANG_SEARCH_PATH) + for yang_module_name in YANG_MODULES: + LOGGER.info('Loading module: {:s}'.format(str(yang_module_name))) + yang_module = yang_context.load_module(yang_module_name) + yang_module.feature_enable_all() + restconf_paths.extend(extract_schema_paths(yang_module)) + + LOGGER.info(str(restconf_paths)) + + yang_context.destroy() + +if __name__ == '__main__': + main() -- GitLab From 496149e20b665bcc9042752ea1c09adc3cc0f3a2 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 25 Aug 2025 06:56:35 +0000 Subject: [PATCH 014/367] Tests - Tools - SIMAP Server/Client - Implemented GET XPath functionality in YangHandler - Beautified SIMAP Client output --- src/tests/tools/simap_server/simap_client/__main__.py | 4 ++-- .../tools/simap_server/simap_server/YangHandler.py | 11 +++++++++++ 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/src/tests/tools/simap_server/simap_client/__main__.py b/src/tests/tools/simap_server/simap_client/__main__.py index 8c7ecd67d..3aecad42e 100644 --- a/src/tests/tools/simap_server/simap_client/__main__.py +++ b/src/tests/tools/simap_server/simap_client/__main__.py @@ -13,7 +13,7 @@ # limitations under the License. -import logging +import json, logging from .RestConfClient import RestConfClient from .SimapClient import SimapClient @@ -122,7 +122,7 @@ def main() -> None: simap_e2e.link('E2E-L1').create('sdp1', '500', 'sdp2', '500', supporting_link_ids=[('te', 'L1'), ('simap-aggnet', 'AggNet-L1')]) - print('networks=', simap_client.networks()) + print('networks=', json.dumps(simap_client.networks())) if __name__ == '__main__': main() diff --git a/src/tests/tools/simap_server/simap_server/YangHandler.py b/src/tests/tools/simap_server/simap_server/YangHandler.py index 8a30b696d..8745c0b5e 100644 --- a/src/tests/tools/simap_server/simap_server/YangHandler.py +++ b/src/tests/tools/simap_server/simap_server/YangHandler.py @@ -68,6 +68,17 @@ class YangHandler: ) return json_data + def get_xpath(self, xpath : str) -> List[str]: + if not path.startswith('/'): path = '/' + path + nodes = self._datastore.find_all(xpath) + result = list() + for node in nodes: + result.append(node.print_mem( + fmt='json', with_siblings=True, pretty=True, + keep_empty_containers=True, include_implicit_defaults=True + )) + return result + def create(self, path : str, payload : Dict) -> str: if not path.startswith('/'): path = '/' + path # TODO: client should not provide identifier of element to be created, add it to subpath -- GitLab From 55c6d4ea78b73c1ef91522660cf578344d901fa1 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 25 Aug 2025 07:24:58 +0000 Subject: [PATCH 015/367] Tests - Tools - SIMAP Server/Client - Code cleanup --- .../tools/simap_server/simap-created.json | 203 -------------- .../tools/simap_server/tests/Dispatch_old.py | 261 ------------------ .../simap_server/tests/create_get_object.py | 59 ---- .../simap_server/tests/libyang_examples.py | 162 ----------- .../simap_server/tests/test_path_to_json.py | 23 -- src/tests/tools/simap_server/tests/tests.sh | 15 - .../tools/simap_server/tests/walk_module.py | 45 --- 7 files changed, 768 deletions(-) delete mode 100644 src/tests/tools/simap_server/simap-created.json delete mode 100644 src/tests/tools/simap_server/tests/Dispatch_old.py delete mode 100644 src/tests/tools/simap_server/tests/create_get_object.py delete mode 100644 src/tests/tools/simap_server/tests/libyang_examples.py delete mode 100644 src/tests/tools/simap_server/tests/test_path_to_json.py delete mode 100755 src/tests/tools/simap_server/tests/tests.sh delete mode 100644 src/tests/tools/simap_server/tests/walk_module.py diff --git a/src/tests/tools/simap_server/simap-created.json b/src/tests/tools/simap_server/simap-created.json deleted file mode 100644 index 04d08c570..000000000 --- a/src/tests/tools/simap_server/simap-created.json +++ /dev/null @@ -1,203 +0,0 @@ -{ - "ietf-network:networks": { - "network": [ - { - "network-id": "te", - "node": [ - {"node-id": "ONT1", "ietf-network-topology:termination-point": [{"tp-id": "200"}, {"tp-id": "500"}]}, - {"node-id": "ONT2", "ietf-network-topology:termination-point": [{"tp-id": "200"}, {"tp-id": "500"}]}, - {"node-id": "OLT", "ietf-network-topology:termination-point": [{"tp-id": "200"}, {"tp-id": "201"}, {"tp-id": "500"}, {"tp-id": "501"}]}, - {"node-id": "PE1", "ietf-network-topology:termination-point": [{"tp-id": "200"}, {"tp-id": "500"}, {"tp-id": "501"}]}, - {"node-id": "P1", "ietf-network-topology:termination-point": [{"tp-id": "500"}, {"tp-id": "501"}]}, - {"node-id": "P2", "ietf-network-topology:termination-point": [{"tp-id": "500"}, {"tp-id": "501"}]}, - {"node-id": "PE2", "ietf-network-topology:termination-point": [{"tp-id": "200"}, {"tp-id": "500"}, {"tp-id": "501"}]}, - {"node-id": "OA", "ietf-network-topology:termination-point": [{"tp-id": "200"}, {"tp-id": "500"}, {"tp-id": "501"}]}, - {"node-id": "OTN1", "ietf-network-topology:termination-point": [{"tp-id": "500"}, {"tp-id": "501"}]}, - {"node-id": "OTN2", "ietf-network-topology:termination-point": [{"tp-id": "500"}, {"tp-id": "501"}]}, - {"node-id": "OE", "ietf-network-topology:termination-point": [{"tp-id": "200"}, {"tp-id": "500"}, {"tp-id": "501"}]}, - {"node-id": "POP1", "ietf-network-topology:termination-point": [{"tp-id": "200"}, {"tp-id": "201"}, {"tp-id": "500"}]}, - {"node-id": "POP2", "ietf-network-topology:termination-point": [{"tp-id": "200"}, {"tp-id": "201"}, {"tp-id": "500"}]} - ], - "ietf-network-topology:link": [ - {"link-id": "L1", "source": {"source-node": "ONT1", "source-tp": "500"}, "destination": {"dest-node": "OLT", "dest-tp": "200"}}, - {"link-id": "L2", "source": {"source-node": "ONT2", "source-tp": "500"}, "destination": {"dest-node": "OLT", "dest-tp": "201"}}, - {"link-id": "L5", "source": {"source-node": "PE1", "source-tp": "500"}, "destination": {"dest-node": "P1", "dest-tp": "500"}}, - {"link-id": "L6", "source": {"source-node": "PE1", "source-tp": "501"}, "destination": {"dest-node": "P2", "dest-tp": "500"}}, - {"link-id": "L9", "source": {"source-node": "P1", "source-tp": "501"}, "destination": {"dest-node": "PE2", "dest-tp": "500"}}, - {"link-id": "L10", "source": {"source-node": "P2", "source-tp": "501"}, "destination": {"dest-node": "PE2", "dest-tp": "501"}}, - {"link-id": "L7", "source": {"source-node": "OA", "source-tp": "500"}, "destination": {"dest-node": "OTN1", "dest-tp": "500"}}, - {"link-id": "L8", "source": {"source-node": "OA", "source-tp": "501"}, "destination": {"dest-node": "OTN2", "dest-tp": "500"}}, - {"link-id": "L11", "source": {"source-node": "OTN1", "source-tp": "501"}, "destination": {"dest-node": "OE", "dest-tp": "500"}}, - {"link-id": "L12", "source": {"source-node": "OTN2", "source-tp": "501"}, "destination": {"dest-node": "OE", "dest-tp": "501"}}, - {"link-id": "L3", "source": {"source-node": "OLT", "source-tp": "500"}, "destination": {"dest-node": "PE1", "dest-tp": "200"}}, - {"link-id": "L4", "source": {"source-node": "OLT", "source-tp": "501"}, "destination": {"dest-node": "OA", "dest-tp": "200"}}, - {"link-id": "L13", "source": {"source-node": "PE2", "source-tp": "200"}, "destination": {"dest-node": "POP1", "dest-tp": "500"}}, - {"link-id": "L14", "source": {"source-node": "OE", "source-tp": "200"}, "destination": {"dest-node": "POP2", "dest-tp": "500"}} - ] - }, - { - "network-id": "simap-trans", - "supporting-network": [{"network-ref": "te"}], - "node": [ - {"node-id": "site1", "supporting-node": [{"network-ref": "te", "node-ref": "PE1"}], "ietf-network-topology:termination-point": [ - {"tp-id": "200", "supporting-termination-point": [{"network-ref": "te", "node-ref": "PE1", "tp-ref": "200"}]}, - {"tp-id": "500", "supporting-termination-point": [{"network-ref": "te", "node-ref": "PE1", "tp-ref": "500"}]}, - {"tp-id": "501", "supporting-termination-point": [{"network-ref": "te", "node-ref": "PE1", "tp-ref": "501"}]} - ]}, - {"node-id": "site2", "supporting-node": [{"network-ref": "te", "node-ref": "PE2"}], "ietf-network-topology:termination-point": [ - {"tp-id": "200", "supporting-termination-point": [{"network-ref": "te", "node-ref": "PE2", "tp-ref": "200"}]}, - {"tp-id": "500", "supporting-termination-point": [{"network-ref": "te", "node-ref": "PE2", "tp-ref": "500"}]}, - {"tp-id": "501", "supporting-termination-point": [{"network-ref": "te", "node-ref": "PE2", "tp-ref": "501"}]} - ]} - ], - "ietf-network-topology:link": [ - {"link-id": "Trans-L1", - "source": {"source-node": "site1", "source-tp": "500"}, - "destination": {"dest-node": "site2", "dest-tp": "500"}, - "supporting-link": [ - {"network-ref": "te", "link-ref": "L5"}, - {"network-ref": "te", "link-ref": "L9"} - ] - } - ] - }, - { - "network-id": "simap-aggnet", - "supporting-network": [ - { - "network-ref": "te" - }, - { - "network-ref": "simap-trans" - } - ], - "node": [ - {"node-id": "sdp1", "supporting-node": [ - { - "network-ref": "te", - "node-ref": "OLT" - } - ], - "ietf-network-topology:termination-point": [ - {"tp-id": "200", "supporting-termination-point": [ - {"network-ref": "te", "node-ref": "OLT", "tp-ref": "200"}]}, - {"tp-id": "201", "supporting-termination-point": [ - {"network-ref": "te", "node-ref": "OLT", "tp-ref": "201"}]}, - {"tp-id": "500", "supporting-termination-point": [ - {"network-ref": "te", "node-ref": "OLT", "tp-ref": "500"}]}, - {"tp-id": "501", "supporting-termination-point": [ - {"network-ref": "te", "node-ref": "OLT", "tp-ref": "501"}]} - ] - }, - {"node-id": "sdp2", "supporting-node": [ - { - "network-ref": "te", - "node-ref": "POP1" - } - ], - "ietf-network-topology:termination-point": [ - {"tp-id": "200", "supporting-termination-point": [ - {"network-ref": "te", "node-ref": "POP1", "tp-ref": "200"}]}, - {"tp-id": "201", "supporting-termination-point": [ - {"network-ref": "te", "node-ref": "POP1", "tp-ref": "201"}]}, - {"tp-id": "500", "supporting-termination-point": [ - {"network-ref": "te", "node-ref": "POP1", "tp-ref": "500"}]} - ] - } - ], - "ietf-network-topology:link": [ - { - "link-id": "AggNet-L1", - "source": { - "source-node": "sdp1", - "source-tp": "500" - }, - "destination": { - "dest-node": "sdp2", - "dest-tp": "500" - }, - "supporting-link": [ - { - "network-ref": "te", - "link-ref": "L3" - }, - { - "network-ref": "simap-trans", - "link-ref": "Trans-L1" - }, - { - "network-ref": "te", - "link-ref": "L13" - } - ] - } - ] - }, - { - "network-id": "simap-e2e", - "supporting-network": [ - { - "network-ref": "te" - }, - { - "network-ref": "simap-trans" - } - ], - "node": [ - {"node-id": "sdp1", "supporting-node": [ - { - "network-ref": "te", - "node-ref": "ONT1" - } - ], - "ietf-network-topology:termination-point": [ - {"tp-id": "200", "supporting-termination-point": [ - {"network-ref": "te", "node-ref": "ONT1", "tp-ref": "200"}]}, - {"tp-id": "500", "supporting-termination-point": [ - {"network-ref": "te", "node-ref": "ONT1", "tp-ref": "500"}]} - ] - }, - {"node-id": "sdp2", "supporting-node": [ - { - "network-ref": "te", - "node-ref": "POP1" - } - ], - "ietf-network-topology:termination-point": [ - {"tp-id": "200", "supporting-termination-point": [ - {"network-ref": "te", "node-ref": "POP1", "tp-ref": "200"}]}, - {"tp-id": "201", "supporting-termination-point": [ - {"network-ref": "te", "node-ref": "POP1", "tp-ref": "201"}]}, - {"tp-id": "500", "supporting-termination-point": [ - {"network-ref": "te", "node-ref": "POP1", "tp-ref": "500"}]} - ] - } - ], - "ietf-network-topology:link": [ - { - "link-id": "E2E-L1", - "source": { - "source-node": "sdp1", - "source-tp": "500" - }, - "destination": { - "dest-node": "sdp2", - "dest-tp": "500" - }, - "supporting-link": [ - { - "network-ref": "te", - "link-ref": "L1" - }, - { - "network-ref": "simap-aggnet", - "link-ref": "AggNet-L1" - } - ] - } - ] - } - ] - }, - "ietf-yang-schema-mount:schema-mounts": {} -} diff --git a/src/tests/tools/simap_server/tests/Dispatch_old.py b/src/tests/tools/simap_server/tests/Dispatch_old.py deleted file mode 100644 index 80876e3b2..000000000 --- a/src/tests/tools/simap_server/tests/Dispatch_old.py +++ /dev/null @@ -1,261 +0,0 @@ -import json, libyang, logging -from typing import Any, Dict -from flask import Response, abort, jsonify, request -from flask_restful import Resource -from simap_server.YangHandler import YangHandler -from ..resources.HttpStatusCodesEnum import HttpStatusCodesEnum - -LOGGER = logging.getLogger(__name__) - -def get_key_path(key_str): - ''' Support composite keys separated by commas ''' - return tuple(key_str.split(',')) if ',' in key_str else key_str - -class RestConfDispatch(Resource): - def __init__(self, datastore : Any, yang_handler : YangHandler) -> None: - super().__init__() - self._datastore = datastore - self._yang_handler = yang_handler - - def get(self, subpath : str = '') -> Response: - data = self._yang_handler.get(subpath) - if data is None: abort(HttpStatusCodesEnum.CLI_ERR_NOT_FOUND, description='Not Found') - response = jsonify(data) - response.status_code = HttpStatusCodesEnum.SUCCESS_OK - return response - -# LOGGER.info('[get] subpath={:s}'.format(str(subpath))) -# if len(subpath) == 0: abort(HttpStatusCodesEnum.SVR_ERR_NOT_IMPLEMENTED) -# -# subpath_parts = subpath.split('/') -# root_node_path = '/{:s}'.format(str(subpath_parts[0])) -# root_node : libyang.DContainer = self._yang_handler.get_data_path(root_node_path) -# LOGGER.info('[get] root_node={:s}'.format(str(root_node.print_mem('json')))) -# -# yang_obj = root_node.find_path(subpath) -# if yang_obj is None: -# LOGGER.exception('Not Found') -# abort(HttpStatusCodesEnum.CLI_ERR_NOT_FOUND, description='Not Found') -# -# json_data = json.loads(str(yang_obj.print_mem('json'))) -# LOGGER.info('json_data={:s}'.format(str(json_data))) -# #yang_obj.unlink() -# #yang_obj.free() -# -# response = jsonify(json_data) -# response.status_code = HttpStatusCodesEnum.SUCCESS_OK -# return response - - -# #str_data = yang_if.print_mem('json') -# #json_data = json.loads(str_data) -# #json_data = json_data['openconfig-interfaces:interface'][0] -# #str_data = json.dumps(json_data, indent=4) -# #LOGGER.info('Resulting Request (after unlink): {:s}'.format(str_data)) -# -# module_paths = self._yang_handler.get_module_paths() -# -# -# LOGGER.info('parts={:s}'.format(str(parts))) -# -# # Traverse to the last element -# current : Dict[str, Any] = self._datastore -# for part in parts: -# element_type = module_paths.get(part) -# #if isinstance(element_type, ) -# current : Dict[str, Any] = current.setdefault(part, {}) -# -# LOGGER.info('datastore={:s}'.format(str(self._datastore))) -# -# leaf = parts[-2] -# leaf_or_key = parts[-1] # key for list or value for leaf-list -# key = get_key_path(leaf_or_key) -# -# if key is None: -# if leaf not in current: -# LOGGER.exception('Not Found') -# abort(HttpStatusCodesEnum.CLI_ERR_NOT_FOUND, description='Not Found') -# item = current[leaf] -# else: -# item = current.get(leaf, {}).get(key) -# if item is None: -# LOGGER.exception('Not Found') -# abort(HttpStatusCodesEnum.CLI_ERR_NOT_FOUND, description='Not Found') -# -# response = jsonify(item) -# response.status_code = HttpStatusCodesEnum.SUCCESS_OK -# return response - - def post(self, subpath : str) -> Response: - try: - payload = json.dumps(request.get_json(force=True)) - except Exception: - LOGGER.exception('Invalid JSON') - abort(HttpStatusCodesEnum.CLI_ERR_BAD_REQUEST) - - created = self._yang_handler.create(subpath, payload) - if not created: abort(HttpStatusCodesEnum.CLI_ERR_BAD_REQUEST) - response = jsonify({'status': 'created'}) - response.status_code = HttpStatusCodesEnum.SUCCESS_CREATED - return response - - -# parts = subpath.strip('/').split('/') -# -# # Traverse to the last container or list -# current = self._datastore -# for part in parts[:-2]: -# current = current.setdefault(part, {}) -# -# leaf = parts[-2] -# leaf_or_key = parts[-1] # key for list or value for leaf-list -# key = get_key_path(leaf_or_key) -# if key is None: -# LOGGER.exception('Missing key') -# abort(HttpStatusCodesEnum.CLI_ERR_CONFLICT, description='Missing key') -# if key in current.get(leaf, {}): -# LOGGER.exception('Key already exists') -# abort(HttpStatusCodesEnum.CLI_ERR_CONFLICT, description='Key already exists') -# -# -# try: -# json_data = json.dumps({leaf: [payload]} if isinstance(key, tuple) else {leaf: payload}) -# self._yang_handler._yang_context.parse_data_mem(json_data, format='json', config=True, trusted=True) -# -# if key is None: -# if leaf not in current: -# current[leaf] = payload -# else: -# current.setdefault(leaf, {}) -# if key not in current[leaf]: -# current[leaf][key] = payload -# -# response = jsonify({'status': 'validated'}) -# response.status_code = HttpStatusCodesEnum.SUCCESS_CREATED -# return response -# except Exception as e: -# LOGGER.exception('YANG validation failed') -# abort(HttpStatusCodesEnum.CLI_ERR_BAD_REQUEST, description=str(e)) - - def put(self, subpath : str) -> Response: - parts = subpath.strip('/').split('/') - - # Traverse to the last container or list - current = self._datastore - for part in parts[:-2]: - current = current.setdefault(part, {}) - - leaf = parts[-2] - leaf_or_key = parts[-1] # key for list or value for leaf-list - key = get_key_path(leaf_or_key) - - try: - payload = request.get_json(force=True) - except Exception: - LOGGER.exception('Invalid JSON') - abort(HttpStatusCodesEnum.CLI_ERR_BAD_REQUEST, description='Invalid JSON') - - try: - json_data = json.dumps({leaf: [payload]} if isinstance(key, tuple) else {leaf: payload}) - self._yang_handler._yang_context.parse_data_mem(json_data, format='json', config=True, trusted=True) - - if key is None: - current[leaf] = payload - else: - current.setdefault(leaf, {}) - current[leaf][key] = payload - - response = jsonify({'status': 'validated'}) - response.status_code = HttpStatusCodesEnum.SUCCESS_CREATED - return response - except Exception as e: - LOGGER.exception('YANG validation failed') - abort(HttpStatusCodesEnum.CLI_ERR_BAD_REQUEST, description=str(e)) - - def patch(self, subpath : str) -> Response: - parts = subpath.strip('/').split('/') - - # Traverse to the last container or list - current = self._datastore - for part in parts[:-2]: - current = current.setdefault(part, {}) - - leaf = parts[-2] - leaf_or_key = parts[-1] # key for list or value for leaf-list - key = get_key_path(leaf_or_key) - - try: - payload = request.get_json(force=True) - except Exception: - LOGGER.exception('Invalid JSON') - abort(HttpStatusCodesEnum.CLI_ERR_BAD_REQUEST, description='Invalid JSON') - - try: - json_data = json.dumps({leaf: [payload]} if isinstance(key, tuple) else {leaf: payload}) - self._yang_handler._yang_context.parse_data_mem(json_data, format='json', config=True, trusted=True) - - if key is None: - if leaf not in current: - LOGGER.exception('Not Found') - abort(HttpStatusCodesEnum.CLI_ERR_NOT_FOUND, description='Not Found') - current[leaf].update(payload) - else: - if key not in current.get(leaf, {}): - LOGGER.exception('Not Found') - abort(HttpStatusCodesEnum.CLI_ERR_NOT_FOUND, description='Not Found') - current[leaf][key].update(payload) - - response = jsonify({'status': 'patched'}) - response.status_code = HttpStatusCodesEnum.SUCCESS_OK - return response - except Exception as e: - LOGGER.exception('YANG validation failed') - abort(HttpStatusCodesEnum.CLI_ERR_BAD_REQUEST, description=str(e)) - - def delete(self, subpath : str) -> Response: - parts = subpath.strip('/').split('/') - - # Traverse to the last container or list - current = self._datastore - for part in parts[:-2]: - current = current.setdefault(part, {}) - - leaf = parts[-2] - leaf_or_key = parts[-1] # key for list or value for leaf-list - key = None - leaf_list_item = None - - # Determine if it's a leaf-list or a list key - if leaf in current and isinstance(current[leaf], list): - # It's a leaf-list - leaf_list_item = leaf_or_key - else: - key = get_key_path(leaf_or_key) - - if leaf not in current: - LOGGER.exception('Not Found') - abort(HttpStatusCodesEnum.CLI_ERR_NOT_FOUND, description='Not Found') - - if key is not None: - if key in current[leaf]: - del current[leaf][key] - response = jsonify({}) - response.status_code = HttpStatusCodesEnum.SUCCESS_NO_CONTENT - return response - else: - LOGGER.exception('Not Found') - abort(HttpStatusCodesEnum.CLI_ERR_NOT_FOUND, description='Not Found') - elif leaf_list_item is not None: - try: - current[leaf].remove(leaf_list_item) - response = jsonify({}) - response.status_code = HttpStatusCodesEnum.SUCCESS_NO_CONTENT - return response - except ValueError: - LOGGER.exception('Not Found') - abort(HttpStatusCodesEnum.CLI_ERR_NOT_FOUND, description='Not Found') - else: - del current[leaf] - response = jsonify({}) - response.status_code = HttpStatusCodesEnum.SUCCESS_NO_CONTENT - return response diff --git a/src/tests/tools/simap_server/tests/create_get_object.py b/src/tests/tools/simap_server/tests/create_get_object.py deleted file mode 100644 index 6cb0b735f..000000000 --- a/src/tests/tools/simap_server/tests/create_get_object.py +++ /dev/null @@ -1,59 +0,0 @@ -import json, libyang, logging - -YANG_SEARCH_PATH = './yang' -YANG_MODULES = [ - 'ietf-inet-types', - 'simap-telemetry', - 'ietf-network-topology', - 'ietf-network', -] - -logging.basicConfig(level=logging.INFO) -LOGGER = logging.getLogger(__name__) - -def main() -> None: - yang_context = libyang.Context(YANG_SEARCH_PATH) - for yang_module_name in YANG_MODULES: - LOGGER.info('Loading module: {:s}'.format(str(yang_module_name))) - yang_module = yang_context.load_module(yang_module_name) - yang_module.feature_enable_all() - - startup = {} - datastore = yang_context.parse_data_mem(json.dumps(startup), fmt='json') - - path = '/ietf-network:networks' - result = datastore.find_path(path) - json_result = result.print_mem(fmt='json', with_siblings=True, pretty=True, keep_empty_containers=True, include_implicit_defaults=True) - LOGGER.info('{:s} => {:s}'.format(path, str(json_result))) - - path = '/ietf-network:networks' - payload = json.dumps({"networks":{"network":[]}}) - yang_context.create_data_path( - path, parent=datastore, value=json.dumps(payload), update=False - ) - - path = '/ietf-network:networks' - result = datastore.find_path(path) - json_result = result.print_mem(fmt='json', with_siblings=True, pretty=True, keep_empty_containers=True, include_implicit_defaults=True) - LOGGER.info('{:s} => {:s}'.format(path, str(json_result))) - - path = '/ietf-network:networks/network[network-id="simap-1"]' - payload = json.dumps({"network":[{"network-id":"simap-1"}]}) - yang_context.create_data_path( - path, parent=datastore, value=json.dumps(payload), update=False - ) - - path = '/ietf-network:networks' - result = datastore.find_path(path) - json_result = result.print_mem(fmt='json', with_siblings=True, pretty=True, keep_empty_containers=True, include_implicit_defaults=True) - LOGGER.info('{:s} => {:s}'.format(path, str(json_result))) - - path = '/ietf-network:networks/network[network-id="simap-1"]' - result = datastore.find_path(path) - json_result = result.print_mem(fmt='json', with_siblings=True, pretty=True, keep_empty_containers=True, include_implicit_defaults=True) - LOGGER.info('{:s} => {:s}'.format(path, str(json_result))) - - yang_context.destroy() - -if __name__ == '__main__': - main() diff --git a/src/tests/tools/simap_server/tests/libyang_examples.py b/src/tests/tools/simap_server/tests/libyang_examples.py deleted file mode 100644 index 0a6eb9ccf..000000000 --- a/src/tests/tools/simap_server/tests/libyang_examples.py +++ /dev/null @@ -1,162 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json, libyang, logging, os -from typing import Dict - -logging.basicConfig(level=logging.DEBUG) -LOGGER = logging.getLogger(__name__) - -YANG_BASE_PATH = '/home/tfs/tfs-ctrl/src/device/service/drivers/gnmi_openconfig/git/openconfig/public' -YANG_SEARCH_PATHS = ':'.join([ - os.path.join(YANG_BASE_PATH, 'release'), - os.path.join(YANG_BASE_PATH, 'third_party'), -]) - -YANG_MODULES = [ - 'iana-if-type', - 'openconfig-bgp-types', - 'openconfig-vlan-types', - - 'openconfig-interfaces', - 'openconfig-if-8021x', - 'openconfig-if-aggregate', - 'openconfig-if-ethernet-ext', - 'openconfig-if-ethernet', - 'openconfig-if-ip-ext', - 'openconfig-if-ip', - 'openconfig-if-poe', - 'openconfig-if-sdn-ext', - 'openconfig-if-tunnel', - - 'openconfig-vlan', - - 'openconfig-types', - 'openconfig-policy-types', - 'openconfig-mpls-types', - 'openconfig-network-instance-types', - 'openconfig-network-instance', - - 'openconfig-platform', - 'openconfig-platform-controller-card', - 'openconfig-platform-cpu', - 'openconfig-platform-ext', - 'openconfig-platform-fabric', - 'openconfig-platform-fan', - 'openconfig-platform-integrated-circuit', - 'openconfig-platform-linecard', - 'openconfig-platform-pipeline-counters', - 'openconfig-platform-port', - 'openconfig-platform-psu', - 'openconfig-platform-software', - 'openconfig-platform-transceiver', - 'openconfig-platform-types', -] - -class YangHandler: - def __init__(self) -> None: - self._yang_context = libyang.Context(YANG_SEARCH_PATHS) - self._loaded_modules = set() - for yang_module_name in YANG_MODULES: - LOGGER.info('Loading module: {:s}'.format(str(yang_module_name))) - self._yang_context.load_module(yang_module_name).feature_enable_all() - self._loaded_modules.add(yang_module_name) - self._data_path_instances = dict() - - def get_data_paths(self) -> Dict[str, libyang.DNode]: - return self._data_path_instances - - def get_data_path(self, path : str) -> libyang.DNode: - data_path_instance = self._data_path_instances.get(path) - if data_path_instance is None: - data_path_instance = self._yang_context.create_data_path(path) - self._data_path_instances[path] = data_path_instance - return data_path_instance - - def destroy(self) -> None: - self._yang_context.destroy() - -def main(): - yang_handler = YangHandler() - - LOGGER.info('YangHandler Data (before):') - for path, dnode in yang_handler.get_data_paths().items(): - LOGGER.info('|-> {:s}: {:s}'.format(str(path), json.dumps(dnode.print_dict()))) - - if_name = 'eth1' - sif_index = 0 - enabled = True - address_ip = '172.16.0.1' - address_ip2 = '192.168.0.1' - address_prefix = 24 - mtu = 1500 - - yang_ifs : libyang.DContainer = yang_handler.get_data_path('/openconfig-interfaces:interfaces') - yang_if_path = 'interface[name="{:s}"]'.format(if_name) - yang_if : libyang.DContainer = yang_ifs.create_path(yang_if_path) - yang_if.create_path('config/name', if_name) - yang_if.create_path('config/enabled', enabled) - yang_if.create_path('config/mtu', mtu ) - - yang_sifs : libyang.DContainer = yang_if.create_path('subinterfaces') - yang_sif_path = 'subinterface[index="{:d}"]'.format(sif_index) - yang_sif : libyang.DContainer = yang_sifs.create_path(yang_sif_path) - yang_sif.create_path('config/index', sif_index) - yang_sif.create_path('config/enabled', enabled ) - - yang_ipv4 : libyang.DContainer = yang_sif.create_path('openconfig-if-ip:ipv4') - yang_ipv4.create_path('config/enabled', enabled) - - yang_ipv4_addrs : libyang.DContainer = yang_ipv4.create_path('addresses') - yang_ipv4_addr_path = 'address[ip="{:s}"]'.format(address_ip) - yang_ipv4_addr : libyang.DContainer = yang_ipv4_addrs.create_path(yang_ipv4_addr_path) - yang_ipv4_addr.create_path('config/ip', address_ip ) - yang_ipv4_addr.create_path('config/prefix-length', address_prefix) - - yang_ipv4_addr_path2 = 'address[ip="{:s}"]'.format(address_ip2) - yang_ipv4_addr2 : libyang.DContainer = yang_ipv4_addrs.create_path(yang_ipv4_addr_path2) - yang_ipv4_addr2.create_path('config/ip', address_ip2 ) - yang_ipv4_addr2.create_path('config/prefix-length', address_prefix) - - str_data = yang_if.print_mem('json') - json_data = json.loads(str_data) - json_data = json_data['openconfig-interfaces:interface'][0] - str_data = json.dumps(json_data, indent=4) - LOGGER.info('Resulting Request (before unlink): {:s}'.format(str_data)) - - yang_ipv4_addr2.unlink() - - root_node : libyang.DContainer = yang_handler.get_data_path('/openconfig-interfaces:interfaces') - LOGGER.info('root_node={:s}'.format(str(root_node.print_mem('json')))) - - for s in root_node.siblings(): - LOGGER.info('sibling: {:s}'.format(str(s))) - - PATH_TMPL = '/openconfig-interfaces:interfaces/interface[name="{:s}"]/subinterfaces/subinterface[index="{:d}"]' - yang_sif = root_node.find_path(PATH_TMPL.format(if_name, sif_index)) - if yang_sif is not None: - LOGGER.info('yang_sif={:s}'.format(str(yang_sif.print_mem('json')))) - yang_sif.unlink() - yang_sif.free() - - str_data = yang_if.print_mem('json') - json_data = json.loads(str_data) - json_data = json_data['openconfig-interfaces:interface'][0] - str_data = json.dumps(json_data, indent=4) - LOGGER.info('Resulting Request (after unlink): {:s}'.format(str_data)) - - yang_handler.destroy() - -if __name__ == '__main__': - main() diff --git a/src/tests/tools/simap_server/tests/test_path_to_json.py b/src/tests/tools/simap_server/tests/test_path_to_json.py deleted file mode 100644 index b0d44644c..000000000 --- a/src/tests/tools/simap_server/tests/test_path_to_json.py +++ /dev/null @@ -1,23 +0,0 @@ -import re - -path = '/ietf-network:networks/network[network-id="simap1"]/node[node-id="n1"]' -payload = {'ietf-network:node': {'node-id': 'n1', 'ietf-network-topology:termination-point': [{'tp-id': '201'}]}} - -if not path.startswith('/'): - raise ValueError('Path must start with "/"') - -for elem in path.strip('/').split('/'): - match = re.match(r"(?P[^\[]+)(?P(\[[^\]]+\])*)", elem) - if not match: - raise ValueError(f'Invalid path segment: {elem}') - - tag = match.group('name') - predicates_raw = match.group('predicates') - - if - - predicates = dict() - for pred in re.findall(r"\[([^\]=]+)='([^']+)'\]", predicates_raw): - predicates[pred[0]] = pred[1] - - if len(predicates) > 0: diff --git a/src/tests/tools/simap_server/tests/tests.sh b/src/tests/tools/simap_server/tests/tests.sh deleted file mode 100755 index 5dbe1877a..000000000 --- a/src/tests/tools/simap_server/tests/tests.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -curl http://127.0.0.1:8080/restconf/data/ietf-network:networks - -curl -X POST -d '{"networks":{"network":[]}}' http://127.0.0.1:8080/restconf/data/ietf-network:networks -curl -X POST -d '{"network":[{"network-id":"simap-1"}]}' http://127.0.0.1:8080/restconf/data/ietf-network:networks/network -curl -X POST -d '{"network":[{"network-id":"simap-1"}]}' 'http://127.0.0.1:8080/restconf/data/ietf-network:networks/network\[network-id="simap-1"\]' -curl -X POST -d '{"node":[{"node-id":"r1"}]}' 'http://127.0.0.1:8080/restconf/data/ietf-network:networks/network\[network-id="simap-1"\]/node\[node-id="r1"\]' - - - -curl -X POST -d '{"termination-point":[{"tp-id":"201"}]}' http://127.0.0.1:8080/restconf/data/ietf-network:networks/network=simap-1/node=r1/termination-point -curl -X POST -d '{"ietf-network:network": {"network-id":"simap-1"}}' http://127.0.0.1:8080/restconf/data/ietf-network:networks/network -curl -X POST -d '{"node":[{"node-id":"r2", "simap:simap-telemetry":{}}]}' http://127.0.0.1:8080/restconf/data/simap-telemetry:networks/network=simap-1/node -curl -X POST -d '{"simap-telemetry":{"cpu-utilization": 98.3}}' http://127.0.0.1:8080/restconf/data/simap-telemetry:networks/network=simap-1/node=r1/simap-telemetry diff --git a/src/tests/tools/simap_server/tests/walk_module.py b/src/tests/tools/simap_server/tests/walk_module.py deleted file mode 100644 index d373b7394..000000000 --- a/src/tests/tools/simap_server/tests/walk_module.py +++ /dev/null @@ -1,45 +0,0 @@ -import libyang, logging -from typing import List - -YANG_SEARCH_PATH = './yang' -YANG_MODULES = [ - 'ietf-inet-types', - 'simap-telemetry', - 'ietf-network-topology', - 'ietf-network', -] - -logging.basicConfig(level=logging.INFO) -LOGGER = logging.getLogger(__name__) - -def walk_schema(node : libyang.SNode, path : str = ''): - paths = [] - current_path = f'{path}/{node.name()}' - paths.append(current_path) - for child in node.children(): - if isinstance(child, (libyang.SLeaf, libyang.SLeafList)): continue - paths.extend(walk_schema(child, current_path)) - return paths - -def extract_schema_paths(yang_module : libyang.Module) -> List[str]: - schema_paths = list() - for node in yang_module.children(): - schema_paths.extend(walk_schema(node)) - return schema_paths - -def main() -> None: - restconf_paths = list() - - yang_context = libyang.Context(YANG_SEARCH_PATH) - for yang_module_name in YANG_MODULES: - LOGGER.info('Loading module: {:s}'.format(str(yang_module_name))) - yang_module = yang_context.load_module(yang_module_name) - yang_module.feature_enable_all() - restconf_paths.extend(extract_schema_paths(yang_module)) - - LOGGER.info(str(restconf_paths)) - - yang_context.destroy() - -if __name__ == '__main__': - main() -- GitLab From 6b221a33049dbff2a2324288b56d6c2a1db1e777 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 25 Aug 2025 14:31:05 +0000 Subject: [PATCH 016/367] Manifests: - Disable WebUI/Grafana --- manifests/webuiservice.yaml | 118 ++++++++++++++++++------------------ 1 file changed, 59 insertions(+), 59 deletions(-) diff --git a/manifests/webuiservice.yaml b/manifests/webuiservice.yaml index 0a6213e99..3e1c13422 100644 --- a/manifests/webuiservice.yaml +++ b/manifests/webuiservice.yaml @@ -12,17 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: grafana-pvc -spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi ---- +#apiVersion: v1 +#kind: PersistentVolumeClaim +#metadata: +# name: grafana-pvc +#spec: +# accessModes: +# - ReadWriteOnce +# resources: +# requests: +# storage: 1Gi +#--- apiVersion: apps/v1 kind: Deployment metadata: @@ -72,51 +72,51 @@ spec: limits: cpu: 1000m memory: 1024Mi - - name: grafana - image: grafana/grafana:8.5.22 - imagePullPolicy: IfNotPresent - ports: - - containerPort: 3000 - name: http-grafana - protocol: TCP - env: - - name: GF_SERVER_ROOT_URL - value: "http://0.0.0.0:3000/grafana/" - - name: GF_SERVER_SERVE_FROM_SUB_PATH - value: "true" - readinessProbe: - failureThreshold: 60 - httpGet: - #path: /robots.txt - path: /login - port: 3000 - scheme: HTTP - initialDelaySeconds: 1 - periodSeconds: 1 - successThreshold: 1 - timeoutSeconds: 2 - livenessProbe: - failureThreshold: 60 - initialDelaySeconds: 1 - periodSeconds: 1 - successThreshold: 1 - tcpSocket: - port: 3000 - timeoutSeconds: 1 - resources: - requests: - cpu: 250m - memory: 512Mi - limits: - cpu: 500m - memory: 1024Mi - volumeMounts: - - mountPath: /var/lib/grafana - name: grafana-pv - volumes: - - name: grafana-pv - persistentVolumeClaim: - claimName: grafana-pvc + # - name: grafana + # image: grafana/grafana:8.5.22 + # imagePullPolicy: IfNotPresent + # ports: + # - containerPort: 3000 + # name: http-grafana + # protocol: TCP + # env: + # - name: GF_SERVER_ROOT_URL + # value: "http://0.0.0.0:3000/grafana/" + # - name: GF_SERVER_SERVE_FROM_SUB_PATH + # value: "true" + # readinessProbe: + # failureThreshold: 60 + # httpGet: + # #path: /robots.txt + # path: /login + # port: 3000 + # scheme: HTTP + # initialDelaySeconds: 1 + # periodSeconds: 1 + # successThreshold: 1 + # timeoutSeconds: 2 + # livenessProbe: + # failureThreshold: 60 + # initialDelaySeconds: 1 + # periodSeconds: 1 + # successThreshold: 1 + # tcpSocket: + # port: 3000 + # timeoutSeconds: 1 + # resources: + # requests: + # cpu: 250m + # memory: 512Mi + # limits: + # cpu: 500m + # memory: 1024Mi + # volumeMounts: + # - mountPath: /var/lib/grafana + # name: grafana-pv + #volumes: + # - name: grafana-pv + # persistentVolumeClaim: + # claimName: grafana-pvc --- apiVersion: v1 kind: Service @@ -132,9 +132,9 @@ spec: - name: webui port: 8004 targetPort: 8004 - - name: grafana - port: 3000 - targetPort: 3000 + #- name: grafana + # port: 3000 + # targetPort: 3000 --- apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler -- GitLab From a7b078a7cceb1c49af061e7103777d4ca484b8f3 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 25 Aug 2025 14:31:33 +0000 Subject: [PATCH 017/367] Code cleanup --- src/tests/ecoc25-camara-e2e-telemetry/report_onboarding.xml | 1 - 1 file changed, 1 deletion(-) delete mode 100644 src/tests/ecoc25-camara-e2e-telemetry/report_onboarding.xml diff --git a/src/tests/ecoc25-camara-e2e-telemetry/report_onboarding.xml b/src/tests/ecoc25-camara-e2e-telemetry/report_onboarding.xml deleted file mode 100644 index 47645515b..000000000 --- a/src/tests/ecoc25-camara-e2e-telemetry/report_onboarding.xml +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file -- GitLab From 77d5ce5fe067ec2731b871542090fa59abe3b3de Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 25 Aug 2025 14:44:01 +0000 Subject: [PATCH 018/367] Code cleanup --- manifests/cockroachdb/single-node.yaml | 2 +- manifests/kafka/single-node.yaml | 2 +- manifests/nats/cluster.yaml | 2 -- manifests/questdb/manifest.yaml | 2 +- 4 files changed, 3 insertions(+), 5 deletions(-) diff --git a/manifests/cockroachdb/single-node.yaml b/manifests/cockroachdb/single-node.yaml index 49b12c7f2..ed297d77c 100644 --- a/manifests/cockroachdb/single-node.yaml +++ b/manifests/cockroachdb/single-node.yaml @@ -60,7 +60,7 @@ spec: restartPolicy: Always containers: - name: cockroachdb - image: 10.254.6.194:5000/cockroachdb/cockroach:latest-v22.2 + image: cockroachdb/cockroach:latest-v22.2 imagePullPolicy: IfNotPresent args: - start-single-node diff --git a/manifests/kafka/single-node.yaml b/manifests/kafka/single-node.yaml index ee7a7f6d3..4c435c11b 100644 --- a/manifests/kafka/single-node.yaml +++ b/manifests/kafka/single-node.yaml @@ -60,7 +60,7 @@ spec: restartPolicy: Always containers: - name: kafka - image: 10.254.6.194:5000/bitnami/kafka:latest + image: bitnami/kafka:latest imagePullPolicy: IfNotPresent ports: - name: clients diff --git a/manifests/nats/cluster.yaml b/manifests/nats/cluster.yaml index d2951ba58..40ce28fd7 100644 --- a/manifests/nats/cluster.yaml +++ b/manifests/nats/cluster.yaml @@ -14,8 +14,6 @@ container: image: - registry: 10.254.6.194:5000 - repository: nats tags: 2.9-alpine env: # different from k8s units, suffix must be B, KiB, MiB, GiB, or TiB diff --git a/manifests/questdb/manifest.yaml b/manifests/questdb/manifest.yaml index 8a9e0d99b..268e53ff9 100644 --- a/manifests/questdb/manifest.yaml +++ b/manifests/questdb/manifest.yaml @@ -31,7 +31,7 @@ spec: restartPolicy: Always containers: - name: metricsdb - image: 10.254.6.194:5000/questdb/questdb:latest + image: questdb/questdb ports: - containerPort: 9000 - containerPort: 9009 -- GitLab From 991a2a0caf41b2b859a545da96de45fc15710c9f Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 25 Aug 2025 14:46:46 +0000 Subject: [PATCH 019/367] Code cleanup --- src/context/Dockerfile | 2 +- src/device/Dockerfile | 2 +- src/nbi/Dockerfile | 2 +- src/pathcomp/backend/Dockerfile | 4 ++-- src/pathcomp/frontend/Dockerfile | 2 +- src/service/Dockerfile | 2 +- src/webui/Dockerfile | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/context/Dockerfile b/src/context/Dockerfile index 92b208af9..a4bf84153 100644 --- a/src/context/Dockerfile +++ b/src/context/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM localhost:5000/python:3.9-slim +FROM python:3.9-slim # Install dependencies RUN apt-get --yes --quiet --quiet update && \ diff --git a/src/device/Dockerfile b/src/device/Dockerfile index 8dfc9a4ad..d85419128 100644 --- a/src/device/Dockerfile +++ b/src/device/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM localhost:5000/python:3.9-slim +FROM python:3.9-slim # Install dependencies RUN apt-get --yes --quiet --quiet update && \ diff --git a/src/nbi/Dockerfile b/src/nbi/Dockerfile index 3609034d3..63556432b 100644 --- a/src/nbi/Dockerfile +++ b/src/nbi/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM localhost:5000/python:3.9-slim +FROM python:3.9-slim # Install dependencies RUN apt-get --yes --quiet --quiet update && \ diff --git a/src/pathcomp/backend/Dockerfile b/src/pathcomp/backend/Dockerfile index 468b0d852..2c447397d 100644 --- a/src/pathcomp/backend/Dockerfile +++ b/src/pathcomp/backend/Dockerfile @@ -15,7 +15,7 @@ # Multi-stage Docker image build # Stage 1 -FROM localhost:5000/ubuntu:20.04 AS builder +FROM ubuntu:20.04 AS builder ARG DEBIAN_FRONTEND=noninteractive # Install build software @@ -53,7 +53,7 @@ ENTRYPOINT [ "./pathComp-cvr", "config/pathcomp.conf", "screen_only" ] # Stage 2 -FROM localhost:5000/ubuntu:20.04 AS release +FROM ubuntu:20.04 AS release ARG DEBIAN_FRONTEND=noninteractive # Install build software diff --git a/src/pathcomp/frontend/Dockerfile b/src/pathcomp/frontend/Dockerfile index 2647b14e9..2f6d5a3bd 100644 --- a/src/pathcomp/frontend/Dockerfile +++ b/src/pathcomp/frontend/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM localhost:5000/python:3.9-slim +FROM python:3.9-slim # Install dependencies RUN apt-get --yes --quiet --quiet update && \ diff --git a/src/service/Dockerfile b/src/service/Dockerfile index 9b6254465..49efe9829 100644 --- a/src/service/Dockerfile +++ b/src/service/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM localhost:5000/python:3.9-slim +FROM python:3.9-slim # Install dependencies RUN apt-get --yes --quiet --quiet update && \ diff --git a/src/webui/Dockerfile b/src/webui/Dockerfile index 1635b8fd0..167280d68 100644 --- a/src/webui/Dockerfile +++ b/src/webui/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM localhost:5000/python:3.9-slim +FROM python:3.9-slim # Ref: https://pythonspeed.com/articles/activate-virtualenv-dockerfile/ -- GitLab From f42895425ab9f5a20a7ba9bedc5b51936041134d Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 25 Aug 2025 14:51:35 +0000 Subject: [PATCH 020/367] Added missing copyright headers --- src/nbi/service/database/Engine.py | 1 + src/nbi/service/database/base.py | 15 +++++++++++++++ .../service/sse_telemetry/create_subscription.py | 1 + .../sse_telemetry/database/Subscription.py | 1 + .../sse_telemetry/database/models/Subscription.py | 1 + src/nbi/service/sse_telemetry/database_tmp.py | 14 ++++++++++++++ .../service/sse_telemetry/delete_subscription.py | 1 + src/nbi/service/sse_telemetry/topology.py | 15 +++++++++++++++ 8 files changed, 49 insertions(+) diff --git a/src/nbi/service/database/Engine.py b/src/nbi/service/database/Engine.py index 57f4b4db5..dd6916aed 100644 --- a/src/nbi/service/database/Engine.py +++ b/src/nbi/service/database/Engine.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. + import logging, sqlalchemy, sqlalchemy_utils from typing import Optional from common.Settings import get_setting diff --git a/src/nbi/service/database/base.py b/src/nbi/service/database/base.py index b12f8f20c..3cacca994 100644 --- a/src/nbi/service/database/base.py +++ b/src/nbi/service/database/base.py @@ -1,3 +1,18 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + import sqlalchemy from sqlalchemy.orm import declarative_base diff --git a/src/nbi/service/sse_telemetry/create_subscription.py b/src/nbi/service/sse_telemetry/create_subscription.py index b7f3b6505..8283bad9b 100644 --- a/src/nbi/service/sse_telemetry/create_subscription.py +++ b/src/nbi/service/sse_telemetry/create_subscription.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. + import json, logging from random import choice from sys import warnoptions diff --git a/src/nbi/service/sse_telemetry/database/Subscription.py b/src/nbi/service/sse_telemetry/database/Subscription.py index 79abe4871..f654dfec6 100644 --- a/src/nbi/service/sse_telemetry/database/Subscription.py +++ b/src/nbi/service/sse_telemetry/database/Subscription.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. + import logging from sqlalchemy.dialects.postgresql import insert from sqlalchemy.engine import Engine diff --git a/src/nbi/service/sse_telemetry/database/models/Subscription.py b/src/nbi/service/sse_telemetry/database/models/Subscription.py index 53fa57e81..9312a514a 100644 --- a/src/nbi/service/sse_telemetry/database/models/Subscription.py +++ b/src/nbi/service/sse_telemetry/database/models/Subscription.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. + import sqlalchemy from sqlalchemy import Column, Integer, String, JSON, Boolean from sqlalchemy.dialects.postgresql import UUID diff --git a/src/nbi/service/sse_telemetry/database_tmp.py b/src/nbi/service/sse_telemetry/database_tmp.py index ba6f7d8f6..fb8778b9b 100644 --- a/src/nbi/service/sse_telemetry/database_tmp.py +++ b/src/nbi/service/sse_telemetry/database_tmp.py @@ -1,2 +1,16 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + SERVICE_ID = 'simap1' diff --git a/src/nbi/service/sse_telemetry/delete_subscription.py b/src/nbi/service/sse_telemetry/delete_subscription.py index 29d8c205e..e7a030735 100644 --- a/src/nbi/service/sse_telemetry/delete_subscription.py +++ b/src/nbi/service/sse_telemetry/delete_subscription.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. + import logging from typing import Optional from flask import jsonify, request diff --git a/src/nbi/service/sse_telemetry/topology.py b/src/nbi/service/sse_telemetry/topology.py index eb60f30c1..d7af66aab 100644 --- a/src/nbi/service/sse_telemetry/topology.py +++ b/src/nbi/service/sse_telemetry/topology.py @@ -1,3 +1,18 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + import json import logging import os -- GitLab From eef0f4e743dc6748435504e170754b256b0c102b Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 25 Aug 2025 14:57:12 +0000 Subject: [PATCH 021/367] ECOC F5GA Telemetry Demo: - Corrected deploy scripts --- src/tests/ecoc25-f5ga-telemetry/deploy-specs-agg.sh | 2 +- src/tests/ecoc25-f5ga-telemetry/deploy-specs-e2e.sh | 2 +- src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh | 2 +- src/tests/ecoc25-f5ga-telemetry/redeploy-tfs.sh | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/tests/ecoc25-f5ga-telemetry/deploy-specs-agg.sh b/src/tests/ecoc25-f5ga-telemetry/deploy-specs-agg.sh index c691ce745..a9ce26d40 100644 --- a/src/tests/ecoc25-f5ga-telemetry/deploy-specs-agg.sh +++ b/src/tests/ecoc25-f5ga-telemetry/deploy-specs-agg.sh @@ -17,7 +17,7 @@ # ----- TeraFlowSDN ------------------------------------------------------------ # Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to. -export TFS_REGISTRY_IMAGES="http://10.254.6.194:5000/tfs/" +export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" # Set the list of components, separated by spaces, you want to build images for, and deploy. export TFS_COMPONENTS="context device pathcomp service slice nbi webui" diff --git a/src/tests/ecoc25-f5ga-telemetry/deploy-specs-e2e.sh b/src/tests/ecoc25-f5ga-telemetry/deploy-specs-e2e.sh index c691ce745..a9ce26d40 100644 --- a/src/tests/ecoc25-f5ga-telemetry/deploy-specs-e2e.sh +++ b/src/tests/ecoc25-f5ga-telemetry/deploy-specs-e2e.sh @@ -17,7 +17,7 @@ # ----- TeraFlowSDN ------------------------------------------------------------ # Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to. -export TFS_REGISTRY_IMAGES="http://10.254.6.194:5000/tfs/" +export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" # Set the list of components, separated by spaces, you want to build images for, and deploy. export TFS_COMPONENTS="context device pathcomp service slice nbi webui" diff --git a/src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh b/src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh index c691ce745..a9ce26d40 100644 --- a/src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh +++ b/src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh @@ -17,7 +17,7 @@ # ----- TeraFlowSDN ------------------------------------------------------------ # Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to. -export TFS_REGISTRY_IMAGES="http://10.254.6.194:5000/tfs/" +export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" # Set the list of components, separated by spaces, you want to build images for, and deploy. export TFS_COMPONENTS="context device pathcomp service slice nbi webui" diff --git a/src/tests/ecoc25-f5ga-telemetry/redeploy-tfs.sh b/src/tests/ecoc25-f5ga-telemetry/redeploy-tfs.sh index d0dd29957..9e9416b78 100755 --- a/src/tests/ecoc25-f5ga-telemetry/redeploy-tfs.sh +++ b/src/tests/ecoc25-f5ga-telemetry/redeploy-tfs.sh @@ -13,8 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -# assuming the instances are named as e2e-sdn-ctrl, agg-sdn-ctrl, and ip-sdn-ctrl -CTRL_NAME=$(hostname | cut -d'-' -f1) +# assuming the instances are named as tfs-e2e-ctrl, tfs-agg-ctrl, and tfs-ip-ctrl +CTRL_NAME=$(hostname | cut -d'-' -f2) echo "Deploying: ${CTRL_NAME}" source ~/tfs-ctrl/src/tests/ecoc25-f5ga-telemetry/deploy-specs-${CTRL_NAME}.sh -- GitLab From bc128f8bbb5c60b7ae3f897f5a48934bd9607787 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 25 Aug 2025 15:03:46 +0000 Subject: [PATCH 022/367] Tests - Tools - SIMAP Server - Added missing startup.json file - Added pre-load of data in startup.json --- src/tests/tools/simap_server/simap_server/__main__.py | 7 +++++-- src/tests/tools/simap_server/startup.json | 1 + 2 files changed, 6 insertions(+), 2 deletions(-) create mode 100644 src/tests/tools/simap_server/startup.json diff --git a/src/tests/tools/simap_server/simap_server/__main__.py b/src/tests/tools/simap_server/simap_server/__main__.py index b2fbe2628..59fbe17ba 100644 --- a/src/tests/tools/simap_server/simap_server/__main__.py +++ b/src/tests/tools/simap_server/simap_server/__main__.py @@ -13,7 +13,7 @@ # limitations under the License. -import logging +import json, logging from flask import Flask from flask_restful import Api from .Dispatch import RestConfDispatch @@ -32,7 +32,10 @@ YANG_MODULE_NAMES = [ 'ietf-network-topology', 'ietf-network', ] -YANG_STARTUP_DATA = {} + +STARTUP_FILE = './startup.json' +with open(STARTUP_FILE, mode='r', encoding='UTF-8') as fp: + YANG_STARTUP_DATA = json.loads(fp.read()) logging.basicConfig(level=logging.INFO) LOGGER = logging.getLogger(__name__) diff --git a/src/tests/tools/simap_server/startup.json b/src/tests/tools/simap_server/startup.json new file mode 100644 index 000000000..0967ef424 --- /dev/null +++ b/src/tests/tools/simap_server/startup.json @@ -0,0 +1 @@ +{} -- GitLab From 58f99af4b1e806c26340d4c3322bf41a61a42237 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 25 Aug 2025 15:05:17 +0000 Subject: [PATCH 023/367] ECOC F5GA Telemetry Demo: - Corrected deploy scripts --- src/tests/ecoc25-f5ga-telemetry/deploy-specs-agg.sh | 4 ++-- src/tests/ecoc25-f5ga-telemetry/deploy-specs-e2e.sh | 4 ++-- src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/tests/ecoc25-f5ga-telemetry/deploy-specs-agg.sh b/src/tests/ecoc25-f5ga-telemetry/deploy-specs-agg.sh index a9ce26d40..0820e21b7 100644 --- a/src/tests/ecoc25-f5ga-telemetry/deploy-specs-agg.sh +++ b/src/tests/ecoc25-f5ga-telemetry/deploy-specs-agg.sh @@ -91,7 +91,7 @@ export TFS_COMPONENTS="context device pathcomp service slice nbi webui" # Set the tag you want to use for your images. -export TFS_IMAGE_TAG="f5ga" +export TFS_IMAGE_TAG="dev" # Set the name of the Kubernetes namespace to deploy TFS to. export TFS_K8S_NAMESPACE="tfs" @@ -109,7 +109,7 @@ export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" export TFS_GRAFANA_PASSWORD="admin123+" # Disable skip-build flag to rebuild the Docker images. -export TFS_SKIP_BUILD="YES" +export TFS_SKIP_BUILD="" # ----- CockroachDB ------------------------------------------------------------ diff --git a/src/tests/ecoc25-f5ga-telemetry/deploy-specs-e2e.sh b/src/tests/ecoc25-f5ga-telemetry/deploy-specs-e2e.sh index a9ce26d40..0820e21b7 100644 --- a/src/tests/ecoc25-f5ga-telemetry/deploy-specs-e2e.sh +++ b/src/tests/ecoc25-f5ga-telemetry/deploy-specs-e2e.sh @@ -91,7 +91,7 @@ export TFS_COMPONENTS="context device pathcomp service slice nbi webui" # Set the tag you want to use for your images. -export TFS_IMAGE_TAG="f5ga" +export TFS_IMAGE_TAG="dev" # Set the name of the Kubernetes namespace to deploy TFS to. export TFS_K8S_NAMESPACE="tfs" @@ -109,7 +109,7 @@ export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" export TFS_GRAFANA_PASSWORD="admin123+" # Disable skip-build flag to rebuild the Docker images. -export TFS_SKIP_BUILD="YES" +export TFS_SKIP_BUILD="" # ----- CockroachDB ------------------------------------------------------------ diff --git a/src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh b/src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh index a9ce26d40..0820e21b7 100644 --- a/src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh +++ b/src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh @@ -91,7 +91,7 @@ export TFS_COMPONENTS="context device pathcomp service slice nbi webui" # Set the tag you want to use for your images. -export TFS_IMAGE_TAG="f5ga" +export TFS_IMAGE_TAG="dev" # Set the name of the Kubernetes namespace to deploy TFS to. export TFS_K8S_NAMESPACE="tfs" @@ -109,7 +109,7 @@ export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" export TFS_GRAFANA_PASSWORD="admin123+" # Disable skip-build flag to rebuild the Docker images. -export TFS_SKIP_BUILD="YES" +export TFS_SKIP_BUILD="" # ----- CockroachDB ------------------------------------------------------------ -- GitLab From af66ee9d0c9eab8f7b1564305b648b9743146772 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 25 Aug 2025 15:11:14 +0000 Subject: [PATCH 024/367] Tests - Tools - SIMAP Server - Corrected Dockerfile --- src/tests/tools/simap_server/Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/tests/tools/simap_server/Dockerfile b/src/tests/tools/simap_server/Dockerfile index 3ad6626af..f2c56e0b6 100644 --- a/src/tests/tools/simap_server/Dockerfile +++ b/src/tests/tools/simap_server/Dockerfile @@ -26,9 +26,9 @@ RUN fc-lang-install -v # Create component sub-folders, and copy content RUN mkdir -p /var/simap_server/ WORKDIR /var/simap_server -COPY ./yang ./yang +COPY ./simap_server/yang/*.yang ./yang +COPY ./simap_server/*.py ./ COPY ./startup.json ./startup.json -COPY ./*.py . # Start the service ENTRYPOINT ["python", "simap_server.py"] -- GitLab From 822ff8d4eb1245d0e6a63c3a0e681164911d0cea Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 25 Aug 2025 15:44:59 +0000 Subject: [PATCH 025/367] NBI component - L3VPN: - Fixed wrong import --- src/nbi/service/ietf_l3vpn/L3VPN_Service.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/nbi/service/ietf_l3vpn/L3VPN_Service.py b/src/nbi/service/ietf_l3vpn/L3VPN_Service.py index 1473356e7..2563316e0 100644 --- a/src/nbi/service/ietf_l3vpn/L3VPN_Service.py +++ b/src/nbi/service/ietf_l3vpn/L3VPN_Service.py @@ -21,8 +21,8 @@ from common.tools.context_queries.Service import get_service_by_uuid from context.client.ContextClient import ContextClient from service.client.ServiceClient import ServiceClient from typing import Dict, List -from ..tools.Authentication import HTTP_AUTH -from ..tools.HttpStatusCodes import HTTP_GATEWAYTIMEOUT, HTTP_NOCONTENT, HTTP_OK, HTTP_SERVERERROR, HTTP_CREATED +from .._tools.Authentication import HTTP_AUTH +from .._tools.HttpStatusCodes import HTTP_GATEWAYTIMEOUT, HTTP_NOCONTENT, HTTP_OK, HTTP_SERVERERROR, HTTP_CREATED from .Handlers import update_vpn from werkzeug.exceptions import UnsupportedMediaType from .YangValidator import YangValidator -- GitLab From 5da0c05c2208e06b2e868aee2f8754153dd187c2 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 25 Aug 2025 17:35:01 +0000 Subject: [PATCH 026/367] SIMAP Connector: - Initial partial code backup --- .gitlab-ci.yml | 1 + manifests/simapservice.yaml | 89 ++++++++ my_deploy.sh | 3 + src/common/Constants.py | 2 + src/simap_connector/.gitlab-ci.yml | 39 ++++ src/simap_connector/Config.py | 24 +++ src/simap_connector/Dockerfile | 60 ++++++ src/simap_connector/__init__.py | 13 ++ src/simap_connector/requirements.in | 14 ++ .../service/SimapConnectorService.py | 25 +++ src/simap_connector/service/__init__.py | 13 ++ src/simap_connector/service/__main__.py | 74 +++++++ .../service/simap_updater/ObjectCache.py | 74 +++++++ .../service/simap_updater/SimapUpdater.py | 168 +++++++++++++++ .../service/simap_updater/Tools.py | 40 ++++ .../service/simap_updater/__init__.py | 13 ++ .../simap_client/RestConfClient.py | 191 ++++++++++++++++++ .../simap_updater/simap_client/SimapClient.py | 191 ++++++++++++++++++ .../simap_updater/simap_client/__init__.py | 14 ++ .../simap_updater/simap_client/__main__.py | 128 ++++++++++++ 20 files changed, 1176 insertions(+) create mode 100644 manifests/simapservice.yaml create mode 100644 src/simap_connector/.gitlab-ci.yml create mode 100644 src/simap_connector/Config.py create mode 100644 src/simap_connector/Dockerfile create mode 100644 src/simap_connector/__init__.py create mode 100644 src/simap_connector/requirements.in create mode 100644 src/simap_connector/service/SimapConnectorService.py create mode 100644 src/simap_connector/service/__init__.py create mode 100644 src/simap_connector/service/__main__.py create mode 100644 src/simap_connector/service/simap_updater/ObjectCache.py create mode 100644 src/simap_connector/service/simap_updater/SimapUpdater.py create mode 100644 src/simap_connector/service/simap_updater/Tools.py create mode 100644 src/simap_connector/service/simap_updater/__init__.py create mode 100644 src/simap_connector/service/simap_updater/simap_client/RestConfClient.py create mode 100644 src/simap_connector/service/simap_updater/simap_client/SimapClient.py create mode 100644 src/simap_connector/service/simap_updater/simap_client/__init__.py create mode 100644 src/simap_connector/service/simap_updater/simap_client/__main__.py diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 9e0f7e485..ef2389337 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -57,6 +57,7 @@ include: # - local: '/src/e2e_orchestrator/.gitlab-ci.yml' # - local: '/src/ztp_server/.gitlab-ci.yml' # - local: '/src/osm_client/.gitlab-ci.yml' +# - local: '/src/simap_connector/.gitlab-ci.yml' # # # This should be last one: end-to-end integration tests # - local: '/src/tests/.gitlab-ci.yml' diff --git a/manifests/simapservice.yaml b/manifests/simapservice.yaml new file mode 100644 index 000000000..550805d8d --- /dev/null +++ b/manifests/simapservice.yaml @@ -0,0 +1,89 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simap-connectorservice +spec: + selector: + matchLabels: + app: simap-connectorservice + replicas: 1 + template: + metadata: + labels: + app: simap-connectorservice + spec: + terminationGracePeriodSeconds: 5 + containers: + - name: server + image: labs.etsi.org:5050/tfs/controller/simap_connector:latest + imagePullPolicy: Always + ports: + - containerPort: 9090 + - containerPort: 9192 + env: + - name: LOG_LEVEL + value: "INFO" + - name: SIMAP_SERVER_SCHEME + value: "http" + - name: SIMAP_SERVER_ADDRESS + value: "10.254.0.9" + - name: SIMAP_SERVER_PORT + value: "80" + - name: SIMAP_SERVER_USERNAME + value: "admin" + - name: SIMAP_SERVER_PASSWORD + value: "admin" + - name: SIMAP_DEFAULT_TOPOLOGY + value: "te" + startupProbe: + grpc: + port: 9090 + failureThreshold: 30 + periodSeconds: 1 + readinessProbe: + grpc: + port: 9090 + livenessProbe: + grpc: + port: 9090 + resources: + requests: + cpu: 250m + memory: 128Mi + limits: + cpu: 1000m + memory: 1024Mi +--- +apiVersion: v1 +kind: Service +metadata: + name: simap-connectorservice + labels: + app: simap-connectorservice +spec: + type: ClusterIP + selector: + app: simap-connectorservice + ports: + - name: grpc + protocol: TCP + port: 9090 + targetPort: 9090 + - name: metrics + protocol: TCP + port: 9192 + targetPort: 9192 diff --git a/my_deploy.sh b/my_deploy.sh index 662dc389b..5b39a4ca1 100644 --- a/my_deploy.sh +++ b/my_deploy.sh @@ -86,6 +86,9 @@ export TFS_COMPONENTS="context device pathcomp service slice nbi webui" # export TFS_COMPONENTS="${BEFORE} qkd_app service ${AFTER}" #fi +# Uncomment to activate SIMAP Connector +export TFS_COMPONENTS="${TFS_COMPONENTS} simap_connector" + # Uncomment to activate Load Generator #export TFS_COMPONENTS="${TFS_COMPONENTS} load_generator" diff --git a/src/common/Constants.py b/src/common/Constants.py index 2cb7c3787..21ac64a00 100644 --- a/src/common/Constants.py +++ b/src/common/Constants.py @@ -47,6 +47,7 @@ class ServiceNameEnum(Enum): MONITORING = 'monitoring' DLT = 'dlt' NBI = 'nbi' + SIMAP_CONNECTOR = 'simap-connector' CYBERSECURITY = 'cybersecurity' INTERDOMAIN = 'interdomain' PATHCOMP = 'pathcomp' @@ -90,6 +91,7 @@ DEFAULT_SERVICE_GRPC_PORTS = { ServiceNameEnum.POLICY .value : 6060, ServiceNameEnum.MONITORING .value : 7070, ServiceNameEnum.DLT .value : 8080, + ServiceNameEnum.SIMAP_CONNECTOR .value : 9090, ServiceNameEnum.L3_CAD .value : 10001, ServiceNameEnum.L3_AM .value : 10002, ServiceNameEnum.DBSCANSERVING .value : 10008, diff --git a/src/simap_connector/.gitlab-ci.yml b/src/simap_connector/.gitlab-ci.yml new file mode 100644 index 000000000..02e15a16c --- /dev/null +++ b/src/simap_connector/.gitlab-ci.yml @@ -0,0 +1,39 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# build, tag and push the Docker image to the gitlab registry +build simap_connector: + variables: + IMAGE_NAME: 'simap_connector' # name of the microservice + IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) + stage: build + before_script: + - docker image prune --force + - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY + script: + - docker buildx build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$IMAGE_NAME/Dockerfile . + - docker tag "$IMAGE_NAME:$IMAGE_TAG" "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG" + - docker push "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG" + after_script: + - docker image prune --force + rules: + - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' + - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' + - changes: + - src/$IMAGE_NAME/**/*.{py,in,yml} + - src/$IMAGE_NAME/Dockerfile + - src/$IMAGE_NAME/tests/*.py + - src/$IMAGE_NAME/tests/Dockerfile + - manifests/${IMAGE_NAME}service.yaml + - .gitlab-ci.yml diff --git a/src/simap_connector/Config.py b/src/simap_connector/Config.py new file mode 100644 index 000000000..23958037f --- /dev/null +++ b/src/simap_connector/Config.py @@ -0,0 +1,24 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from common.Constants import DEFAULT_TOPOLOGY_NAME +from common.Settings import get_setting + +SIMAP_SERVER_SCHEME = str(get_setting('SIMAP_SERVER_SCHEME', default='http' )) +SIMAP_SERVER_ADDRESS = str(get_setting('SIMAP_SERVER_ADDRESS', default='127.0.0.1' )) +SIMAP_SERVER_PORT = int(get_setting('SIMAP_SERVER_PORT', default='80' )) +SIMAP_SERVER_USERNAME = str(get_setting('SIMAP_SERVER_USERNAME', default='admin' )) +SIMAP_SERVER_PASSWORD = str(get_setting('SIMAP_SERVER_PASSWORD', default='admin' )) +SIMAP_DEFAULT_TOPOLOGY = str(get_setting('SIMAP_DEFAULT_TOPOLOGY', default=DEFAULT_TOPOLOGY_NAME)) diff --git a/src/simap_connector/Dockerfile b/src/simap_connector/Dockerfile new file mode 100644 index 000000000..faaaf9947 --- /dev/null +++ b/src/simap_connector/Dockerfile @@ -0,0 +1,60 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM python:3.9-slim + +# Set Python to show logs as they occur +ENV PYTHONUNBUFFERED=0 + +# Get generic Python packages +RUN python3 -m pip install --upgrade pip +RUN python3 -m pip install --upgrade setuptools wheel +RUN python3 -m pip install --upgrade pip-tools + +# Get common Python packages +# Note: this step enables sharing the previous Docker build steps among all the Python components +WORKDIR /var/teraflow +COPY common_requirements.in common_requirements.in +RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in +RUN python3 -m pip install -r common_requirements.txt + +# Add common files into working directory +WORKDIR /var/teraflow/common +COPY src/common/. ./ +RUN rm -rf proto + +# Create proto sub-folder, copy .proto files, and generate Python code +RUN mkdir -p /var/teraflow/common/proto +WORKDIR /var/teraflow/common/proto +RUN touch __init__.py +COPY proto/*.proto ./ +RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto +RUN rm *.proto +RUN find . -type f -exec sed -i -E 's/^(import\ .*)_pb2/from . \1_pb2/g' {} \; + +# Create component sub-folders, get specific Python packages +RUN mkdir -p /var/teraflow/simap_connector +WORKDIR /var/teraflow/simap_connector +COPY src/simap_connector/requirements.in requirements.in +RUN pip-compile --quiet --output-file=requirements.txt requirements.in +RUN python3 -m pip install -r requirements.txt + +# Add component files into working directory +WORKDIR /var/teraflow +COPY src/context/__init__.py context/__init__.py +COPY src/context/client/. context/client/ +COPY src/simap_connector/. simap_connector/ + +# Start the service +ENTRYPOINT ["python", "-m", "simap_connector.service"] diff --git a/src/simap_connector/__init__.py b/src/simap_connector/__init__.py new file mode 100644 index 000000000..7363515f0 --- /dev/null +++ b/src/simap_connector/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/simap_connector/requirements.in b/src/simap_connector/requirements.in new file mode 100644 index 000000000..3ccc21c7d --- /dev/null +++ b/src/simap_connector/requirements.in @@ -0,0 +1,14 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/simap_connector/service/SimapConnectorService.py b/src/simap_connector/service/SimapConnectorService.py new file mode 100644 index 000000000..8555f3f9f --- /dev/null +++ b/src/simap_connector/service/SimapConnectorService.py @@ -0,0 +1,25 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from common.Constants import ServiceNameEnum +from common.Settings import get_service_port_grpc +from common.tools.service.GenericGrpcService import GenericGrpcService + +class SimapConnectorService(GenericGrpcService): + def __init__(self, cls_name: str = __name__) -> None: + port = get_service_port_grpc(ServiceNameEnum.SIMAP_CONNECTOR) + super().__init__(port, cls_name=cls_name) + + def install_servicers(self): + pass diff --git a/src/simap_connector/service/__init__.py b/src/simap_connector/service/__init__.py new file mode 100644 index 000000000..7363515f0 --- /dev/null +++ b/src/simap_connector/service/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/simap_connector/service/__main__.py b/src/simap_connector/service/__main__.py new file mode 100644 index 000000000..6f8bdbc87 --- /dev/null +++ b/src/simap_connector/service/__main__.py @@ -0,0 +1,74 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, signal, sys, threading +from prometheus_client import start_http_server +from common.Constants import ServiceNameEnum +from common.Settings import ( + ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, + get_log_level, get_metrics_port, wait_for_environment_variables +) +from .simap_updater.SimapUpdater import SimapUpdater +from .SimapConnectorService import SimapConnectorService + +TERMINATE = threading.Event() + +LOG_LEVEL = get_log_level() +logging.basicConfig(level=LOG_LEVEL, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s") +logging.getLogger('RestConfClient').setLevel(logging.WARN) + +LOGGER = logging.getLogger(__name__) + + +def signal_handler(signal, frame): # pylint: disable=redefined-outer-name + LOGGER.warning('Terminate signal received') + TERMINATE.set() + + +def main(): + wait_for_environment_variables([ + get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST ), + get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC), + ]) + + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + + LOGGER.info('Starting...') + + # Start metrics server + metrics_port = get_metrics_port() + start_http_server(metrics_port) + + # Starting service + grpc_service = SimapConnectorService() + grpc_service.start() + + simap_updater = SimapUpdater(TERMINATE) + simap_updater.start() + + LOGGER.info('Running...') + # Wait for Ctrl+C or termination signal + while not TERMINATE.wait(timeout=1.0): pass + + LOGGER.info('Terminating...') + simap_updater.stop() + grpc_service.stop() + + LOGGER.info('Bye') + return 0 + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/src/simap_connector/service/simap_updater/ObjectCache.py b/src/simap_connector/service/simap_updater/ObjectCache.py new file mode 100644 index 000000000..c8802f8c9 --- /dev/null +++ b/src/simap_connector/service/simap_updater/ObjectCache.py @@ -0,0 +1,74 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging +from enum import Enum +from typing import Any, Dict, Optional, Tuple +from common.tools.context_queries.Device import get_device +from common.tools.context_queries.Link import get_link +from common.tools.context_queries.Topology import get_topology +from context.client.ContextClient import ContextClient + + +LOGGER = logging.getLogger(__name__) + + +class CachedEntities(Enum): + TOPOLOGY = 'topology' + DEVICE = 'device' + LINK = 'link' + + +class ObjectCache: + def __init__(self, context_client : ContextClient): + self._context_client = context_client + self._object_cache : Dict[Tuple[str, str], Any] = dict() + + def get(self, entity : CachedEntities, object_uuid : str) -> Optional[Any]: + object_key = (entity.value, object_uuid) + if object_key in self._object_cache: + return self._object_cache[object_key] + return self._update(entity, object_uuid) + + def _retrieve( + self, entity : CachedEntities, entity_uuid : str + ) -> Optional[Any]: + if entity == CachedEntities.TOPOLOGY: + return get_topology(self._context_client, entity_uuid, rw_copy=False) + if entity == CachedEntities.DEVICE: + return get_device( + self._context_client, entity_uuid, rw_copy=False, include_endpoints=True, + include_components=False, include_config_rules=False, + ) + if entity == CachedEntities.LINK: + return get_link(self._context_client, entity_uuid, rw_copy=False) + MSG = 'Not Supported ({:s}, {:s})' + LOGGER.warning(MSG.format(str(entity.value).title(), str(entity_uuid))) + return None + + def _update(self, entity : CachedEntities, object_uuid : str) -> Optional[Any]: + object_inst = self._retrieve(entity, object_uuid) + if object_inst is None: + MSG = 'Not Found ({:s}, {:s})' + LOGGER.warning(MSG.format(str(entity).title(), str(object_uuid))) + return None + + object_key = (entity.value, object_uuid) + self._object_cache[object_key] = object_inst + return object_inst + + def delete(self, entity : CachedEntities, object_uuid : str) -> None: + object_key = (entity.value, object_uuid) + self._object_cache.pop(object_key, None) diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py new file mode 100644 index 000000000..788e29d2e --- /dev/null +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -0,0 +1,168 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging, queue, threading +from typing import Any, Optional +from common.proto.context_pb2 import DeviceEvent, Empty, TopologyEvent +from common.tools.context_queries.Device import get_device +from common.tools.context_queries.Link import get_link + +from common.tools.grpc.BaseEventCollector import BaseEventCollector +from common.tools.grpc.BaseEventDispatcher import BaseEventDispatcher +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.DeviceTypes import DeviceTypeEnum +from context.client.ContextClient import ContextClient +from simap_connector.service.simap_updater.ObjectCache import CachedEntities +from .simap_client.RestConfClient import RestConfClient +from .simap_client.SimapClient import SimapClient +from .ObjectCache import ObjectCache + +from simap_connector.Config import ( + SIMAP_SERVER_SCHEME, SIMAP_SERVER_ADDRESS, SIMAP_SERVER_PORT, + SIMAP_SERVER_USERNAME, SIMAP_SERVER_PASSWORD, +) + + +LOGGER = logging.getLogger(__name__) +RESTCONF_LOGGER = logging.getLogger(__name__ + '.RestConfClient') + + +class EventDispatcher(BaseEventDispatcher): + def __init__( + self, events_queue : queue.PriorityQueue, + context_client : ContextClient, + terminate : Optional[threading.Event] = None + ) -> None: + super().__init__(events_queue, terminate) + self._context_client = context_client + self._object_cache = ObjectCache(self._context_client) + self._restconf_client = RestConfClient( + scheme=SIMAP_SERVER_SCHEME, address=SIMAP_SERVER_ADDRESS, + port=SIMAP_SERVER_PORT, username=SIMAP_SERVER_USERNAME, + password=SIMAP_SERVER_PASSWORD, logger=RESTCONF_LOGGER, + ) + self._simap_client = SimapClient(self._restconf_client) + + def dispatch_topology_create(self, topology_event : TopologyEvent) -> None: + MSG = 'Processing Topology Event: {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(topology_event))) + + topology_uuid = topology_event.topology_id.topology_uuid.uuid + topology = self._object_cache.get(CachedEntities.TOPOLOGY, topology_uuid) + topology_name = topology.name + self._simap_client.network(topology_name).create() + + MSG = 'Topology Created: {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(topology_event))) + + def dispatch_topology_update(self, topology_event : TopologyEvent) -> None: + MSG = 'Processing Topology Event: {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(topology_event))) + + topology_uuid = topology_event.topology_id.topology_uuid.uuid + topology = self._object_cache.get(CachedEntities.TOPOLOGY, topology_uuid) + topology_name = topology.name + self._simap_client.network(topology_name).create() + + MSG = 'Topology Updated: {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(topology_event))) + + def dispatch_topology_remove(self, topology_event : TopologyEvent) -> None: + MSG = 'Processing Topology Event: {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(topology_event))) + + topology_uuid = topology_event.topology_id.topology_uuid.uuid + topology = self._object_cache.get(CachedEntities.TOPOLOGY, topology_uuid) + topology_name = topology.name + self._simap_client.network(topology_name).delete() + + self._object_cache.delete(CachedEntities.TOPOLOGY, topology_uuid) + + MSG = 'Topology Remove: {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(topology_event))) + + + + + + + + + #def dispatch_device_create(self, device_event : DeviceEvent) -> None: + # MSG = 'Processing Device Create: {:s}' + # LOGGER.info(MSG.format(grpc_message_to_json_string(device_event))) + topology_uuid = topology_event.topology_id.topology_uuid.uuid + topology = get_topology( + self._context_client, topology_uuid, rw_copy=False, + include_endpoints=False, include_config_rules=True, + include_components=False + ) + device_type = device.device_type + + # tfs_ctrl_settings = get_tfs_controller_settings( + # self._context_client, device_event + # ) + # if tfs_ctrl_settings is None: return + # self._subscriptions.add_subscription(tfs_ctrl_settings) + + #def dispatch_device_update(self, device_event : DeviceEvent) -> None: + # MSG = 'Processing Device Update: {:s}' + # LOGGER.info(MSG.format(grpc_message_to_json_string(device_event))) + # tfs_ctrl_settings = get_tfs_controller_settings( + # self._context_client, device_event + # ) + # if tfs_ctrl_settings is None: return + # self._subscriptions.add_subscription(tfs_ctrl_settings) + + #def dispatch_device_remove(self, device_event : DeviceEvent) -> None: + # MSG = 'Processing Device Remove: {:s}' + # LOGGER.info(MSG.format(grpc_message_to_json_string(device_event))) + # device_uuid = device_event.device_id.device_uuid.uuid + # self._subscriptions.remove_subscription(device_uuid) + + def dispatch(self, event : Any) -> None: + MSG = 'Unexpected Event: {:s}' + LOGGER.warning(MSG.format(grpc_message_to_json_string(event))) + + +class SimapUpdater: + def __init__(self, terminate : threading.Event) -> None: + self._context_client = ContextClient() + + self._event_collector = BaseEventCollector(terminate=terminate) + self._event_collector.install_collector( + self._context_client.GetTopologyEvents, Empty(), log_events_received=True + ) + self._event_collector.install_collector( + self._context_client.GetDeviceEvents, Empty(), log_events_received=True + ) + self._event_collector.install_collector( + self._context_client.GetLinkEvents, Empty(), log_events_received=True + ) + + self._event_dispatcher = EventDispatcher( + self._event_collector.get_events_queue(), self._context_client, + terminate=terminate + ) + + def start(self) -> None: + self._context_client.connect() + self._event_dispatcher.start() + self._event_collector.start() + + def stop(self): + self._event_collector.stop() + self._event_dispatcher.stop() + self._context_client.close() diff --git a/src/simap_connector/service/simap_updater/Tools.py b/src/simap_connector/service/simap_updater/Tools.py new file mode 100644 index 000000000..4ec1c83bc --- /dev/null +++ b/src/simap_connector/service/simap_updater/Tools.py @@ -0,0 +1,40 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import enum +from typing import Union +from common.proto.context_pb2 import ( + EVENTTYPE_CREATE, EVENTTYPE_REMOVE, EVENTTYPE_UPDATE, + DeviceEvent, LinkEvent, ServiceEvent, SliceEvent, TopologyEvent +) +from common.tools.grpc.Tools import grpc_message_to_json_string + + +class EventTypeEnum(enum.IntEnum): + CREATE = EVENTTYPE_CREATE + UPDATE = EVENTTYPE_UPDATE + REMOVE = EVENTTYPE_REMOVE + + +EVENT_TYPE = Union[DeviceEvent, LinkEvent, TopologyEvent, ServiceEvent, SliceEvent] + + +def get_event_type(event : EVENT_TYPE) -> EventTypeEnum: + int_event_type = event.event.event_type + enum_event_type = EventTypeEnum._value2member_map_.get(int_event_type) + if enum_event_type is None: + MSG = 'Unsupported EventType({:s}) in Event({:s})' + str_event = grpc_message_to_json_string(event) + raise Exception(MSG.format(str(int_event_type), str_event)) diff --git a/src/simap_connector/service/simap_updater/__init__.py b/src/simap_connector/service/simap_updater/__init__.py new file mode 100644 index 000000000..7363515f0 --- /dev/null +++ b/src/simap_connector/service/simap_updater/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/simap_connector/service/simap_updater/simap_client/RestConfClient.py b/src/simap_connector/service/simap_updater/simap_client/RestConfClient.py new file mode 100644 index 000000000..b7c057a70 --- /dev/null +++ b/src/simap_connector/service/simap_updater/simap_client/RestConfClient.py @@ -0,0 +1,191 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum, logging, requests +from requests.auth import HTTPBasicAuth +from typing import Any, Dict, Optional, Set + +class RestRequestMethod(enum.Enum): + GET = 'get' + POST = 'post' + PUT = 'put' + PATCH = 'patch' + DELETE = 'delete' + +EXPECTED_STATUS_CODES : Set[int] = { + requests.codes['OK' ], # 200 - OK + requests.codes['CREATED' ], # 201 - Created + requests.codes['ACCEPTED' ], # 202 - Accepted + requests.codes['NO_CONTENT'], # 204 - No Content +} + +def compose_basic_auth( + username : Optional[str] = None, password : Optional[str] = None +) -> Optional[HTTPBasicAuth]: + if username is None or password is None: return None + return HTTPBasicAuth(username, password) + +class SchemeEnum(enum.Enum): + HTTP = 'http' + HTTPS = 'https' + +def check_scheme(scheme : str) -> str: + str_scheme = str(scheme).lower() + enm_scheme = SchemeEnum._value2member_map_[str_scheme] + return enm_scheme.value + +HOST_META_URL = '{:s}://{:s}:{:d}/.well-known/host-meta' +RESTCONF_URL = '{:s}://{:s}:{:d}/{:s}' + +class RestConfClient: + def __init__( + self, address : str, port : int = 8080, scheme : str = 'http', + username : Optional[str] = None, password : Optional[str] = None, + timeout : int = 10, verify_certs : bool = True, allow_redirects : bool = True, + logger : Optional[logging.Logger] = None + ) -> None: + self._address = address + self._port = int(port) + self._scheme = check_scheme(scheme) + self._auth = compose_basic_auth(username=username, password=password) + self._base_url = '' + self._timeout = int(timeout) + self._verify_certs = verify_certs + self._allow_redirects = allow_redirects + self._logger = logger + + self._discover_base_url() + + def _discover_base_url(self) -> None: + host_meta_url = HOST_META_URL.format(self._scheme, self._address, self._port) + host_meta : Dict = self.get(host_meta_url, expected_status_codes={requests.codes['OK']}) + + links = host_meta.get('links') + if links is None: raise AttributeError('Missing attribute "links" in host-meta reply') + if not isinstance(links, list): raise AttributeError('Attribute "links" must be a list') + if len(links) != 1: raise AttributeError('Attribute "links" is expected to have exactly 1 item') + + link = links[0] + if not isinstance(link, dict): raise AttributeError('Attribute "links[0]" must be a dict') + + rel = link.get('rel') + if rel is None: raise AttributeError('Missing attribute "links[0].rel" in host-meta reply') + if not isinstance(rel, str): raise AttributeError('Attribute "links[0].rel" must be a str') + if rel != 'restconf': raise AttributeError('Attribute "links[0].rel" != "restconf"') + + href = link.get('href') + if href is None: raise AttributeError('Missing attribute "links[0]" in host-meta reply') + if not isinstance(href, str): raise AttributeError('Attribute "links[0].href" must be a str') + + self._base_url = str(href + '/data').replace('//', '/') + + def _log_msg_request( + self, method : RestRequestMethod, request_url : str, body : Optional[Any], + log_level : int = logging.INFO + ) -> str: + msg = 'Request: {:s} {:s}'.format(str(method.value).upper(), str(request_url)) + if body is not None: msg += ' body={:s}'.format(str(body)) + if self._logger is not None: self._logger.log(log_level, msg) + return msg + + def _log_msg_check_reply( + self, method : RestRequestMethod, request_url : str, body : Optional[Any], + reply : requests.Response, expected_status_codes : Set[int], + log_level : int = logging.INFO + ) -> str: + msg = 'Reply: {:s}'.format(str(reply.text)) + if self._logger is not None: self._logger.log(log_level, msg) + http_status_code = reply.status_code + if http_status_code in expected_status_codes: return msg + MSG = 'Request failed. method={:s} url={:s} body={:s} status_code={:s} reply={:s}' + msg = MSG.format( + str(method.value).upper(), str(request_url), str(body), + str(http_status_code), str(reply.text) + ) + self._logger.error(msg) + raise Exception(msg) + + def _do_rest_request( + self, method : RestRequestMethod, endpoint : str, body : Optional[Any] = None, + expected_status_codes : Set[int] = EXPECTED_STATUS_CODES + ) -> Optional[Any]: + candidate_schemes = tuple(['{:s}://'.format(m).lower() for m in SchemeEnum.__members__.keys()]) + if endpoint.lower().startswith(candidate_schemes): + request_url = endpoint.lstrip('/') + else: + endpoint = str(self._base_url + '/' + endpoint).replace('//', '/').lstrip('/') + request_url = '{:s}://{:s}:{:d}/{:s}'.format( + self._scheme, self._address, self._port, endpoint.lstrip('/') + ) + self._log_msg_request(method, request_url, body) + try: + headers = {'accept': 'application/json'} + reply = requests.request( + method.value, request_url, headers=headers, json=body, + auth=self._auth, verify=self._verify_certs, timeout=self._timeout, + allow_redirects=self._allow_redirects + ) + except Exception as e: + MSG = 'Request failed. method={:s} url={:s} body={:s}' + msg = MSG.format(str(method.value).upper(), request_url, str(body)) + self._logger.exception(msg) + raise Exception(msg) from e + self._log_msg_check_reply(method, request_url, body, reply, expected_status_codes) + if reply.content and len(reply.content) > 0: return reply.json() + return None + + def get( + self, endpoint : str, + expected_status_codes : Set[int] = {requests.codes['OK']} + ) -> Optional[Any]: + return self._do_rest_request( + RestRequestMethod.GET, endpoint, + expected_status_codes=expected_status_codes + ) + + def post( + self, endpoint : str, body : Optional[Any] = None, + expected_status_codes : Set[int] = {requests.codes['CREATED']} + ) -> Optional[Any]: + return self._do_rest_request( + RestRequestMethod.POST, endpoint, body=body, + expected_status_codes=expected_status_codes + ) + + def put( + self, endpoint : str, body : Optional[Any] = None, + expected_status_codes : Set[int] = {requests.codes['CREATED'], requests.codes['NO_CONTENT']} + ) -> Optional[Any]: + return self._do_rest_request( + RestRequestMethod.PUT, endpoint, body=body, + expected_status_codes=expected_status_codes + ) + + def patch( + self, endpoint : str, body : Optional[Any] = None, + expected_status_codes : Set[int] = {requests.codes['NO_CONTENT']} + ) -> Optional[Any]: + return self._do_rest_request( + RestRequestMethod.PATCH, endpoint, body=body, + expected_status_codes=expected_status_codes + ) + + def delete( + self, endpoint : str, body : Optional[Any] = None, + expected_status_codes : Set[int] = {requests.codes['NO_CONTENT']} + ) -> Optional[Any]: + return self._do_rest_request( + RestRequestMethod.DELETE, endpoint, body=body, + expected_status_codes=expected_status_codes + ) diff --git a/src/simap_connector/service/simap_updater/simap_client/SimapClient.py b/src/simap_connector/service/simap_updater/simap_client/SimapClient.py new file mode 100644 index 000000000..26713ac5e --- /dev/null +++ b/src/simap_connector/service/simap_updater/simap_client/SimapClient.py @@ -0,0 +1,191 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import Dict, List, Tuple +from .RestConfClient import RestConfClient + + +class TerminationPoint: + ENDPOINT_NO_ID = '/ietf-network:networks/network[network-id="{:s}"]/node[node-id="{:s}"]' + ENDPOINT_ID = ENDPOINT_NO_ID + '/ietf-network-topology:termination-point[tp-id="{:s}"]' + + def __init__(self, restconf_client : RestConfClient, network_id : str, node_id : str, tp_id : str): + self._restconf_client = restconf_client + self._network_id = network_id + self._node_id = node_id + self._tp_id = tp_id + + def create(self, supporting_termination_point_ids : List[Tuple[str, str, str]] = []) -> None: + endpoint = TerminationPoint.ENDPOINT_ID.format(self._network_id, self._node_id, self._tp_id) + tp = {'tp-id': self._tp_id} + stps = [ + {'network-ref': snet_id, 'node-ref': snode_id, 'tp-ref': stp_id} + for snet_id,snode_id,stp_id in supporting_termination_point_ids + ] + if len(stps) > 0: tp['supporting-termination-point'] = stps + node = {'node-id': self._node_id, 'ietf-network-topology:termination-point': [tp]} + network = {'network-id': self._network_id, 'node': [node]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.post(endpoint, payload) + + def get(self) -> Dict: + endpoint = TerminationPoint.ENDPOINT_ID.format(self._network_id, self._node_id, self._tp_id) + node : Dict = self._restconf_client.get(endpoint) + return node['ietf-network-topology:termination-point'][0] + + def delete(self) -> None: + endpoint = TerminationPoint.ENDPOINT_ID.format(self._network_id, self._node_id, self._tp_id) + self._restconf_client.delete(endpoint) + +class Node: + ENDPOINT_NO_ID = '/ietf-network:networks/network[network-id="{:s}"]' + ENDPOINT_ID = ENDPOINT_NO_ID + '/node[node-id="{:s}"]' + + def __init__(self, restconf_client : RestConfClient, network_id : str, node_id : str): + self._restconf_client = restconf_client + self._network_id = network_id + self._node_id = node_id + self._tps : Dict[str, TerminationPoint] = dict() + + def termination_points(self) -> List[Dict]: + tps : Dict = self._restconf_client.get(TerminationPoint.ENDPOINT_NO_ID) + return tps['ietf-network-topology:termination-point'].get('termination-point', list()) + + def termination_point(self, tp_id : str) -> TerminationPoint: + _tp = self._tps.get(tp_id) + if _tp is not None: return _tp + _tp = TerminationPoint(self._restconf_client, self._network_id, self._node_id, tp_id) + return self._tps.setdefault(tp_id, _tp) + + def create( + self, termination_point_ids : List[str] = [], + supporting_node_ids : List[Tuple[str, str]] = [] + ) -> None: + endpoint = Node.ENDPOINT_ID.format(self._network_id, self._node_id) + node = {'node-id': self._node_id} + tps = [{'tp-id': tp_id} for tp_id in termination_point_ids] + if len(tps) > 0: node['ietf-network-topology:termination-point'] = tps + sns = [{'network-ref': snet_id, 'node-ref': snode_id} for snet_id,snode_id in supporting_node_ids] + if len(sns) > 0: node['supporting-node'] = sns + network = {'network-id': self._network_id, 'node': [node]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.post(endpoint, payload) + + def get(self) -> Dict: + endpoint = Node.ENDPOINT_ID.format(self._network_id, self._node_id) + node : Dict = self._restconf_client.get(endpoint) + return node['ietf-network:node'][0] + + def delete(self) -> None: + endpoint = Node.ENDPOINT_ID.format(self._network_id, self._node_id) + self._restconf_client.delete(endpoint) + +class Link: + ENDPOINT_NO_ID = '/ietf-network:networks/network[network-id="{:s}"]' + ENDPOINT_ID = ENDPOINT_NO_ID + '/ietf-network-topology:link[link-id="{:s}"]' + + def __init__(self, restconf_client : RestConfClient, network_id : str, link_id : str): + self._restconf_client = restconf_client + self._network_id = network_id + self._link_id = link_id + + def create( + self, src_node_id : str, src_tp_id : str, dst_node_id : str, dst_tp_id : str, + supporting_link_ids : List[Tuple[str, str]] = [] + ) -> None: + endpoint = Link.ENDPOINT_ID.format(self._network_id, self._link_id) + link = { + 'link-id' : self._link_id, + 'source' : {'source-node': src_node_id, 'source-tp': src_tp_id}, + 'destination': {'dest-node' : dst_node_id, 'dest-tp' : dst_tp_id}, + } + sls = [{'network-ref': snet_id, 'link-ref': slink_id} for snet_id,slink_id in supporting_link_ids] + if len(sls) > 0: link['supporting-link'] = sls + network = {'network-id': self._network_id, 'ietf-network-topology:link': [link]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.post(endpoint, payload) + + def get(self) -> Dict: + endpoint = Link.ENDPOINT_ID.format(self._network_id, self._link_id) + link : Dict = self._restconf_client.get(endpoint) + return link['ietf-network-topology:link'][0] + + def delete(self) -> None: + endpoint = Link.ENDPOINT_ID.format(self._network_id, self._link_id) + self._restconf_client.delete(endpoint) + + +class Network: + ENDPOINT_NO_ID = '/ietf-network:networks' + ENDPOINT_ID = ENDPOINT_NO_ID + '/network[network-id="{:s}"]' + + def __init__(self, restconf_client : RestConfClient, network_id : str): + self._restconf_client = restconf_client + self._network_id = network_id + self._nodes : Dict[str, Node] = dict() + self._links : Dict[str, Link] = dict() + + def nodes(self) -> List[Dict]: + reply : Dict = self._restconf_client.get(Node.ENDPOINT_NO_ID.format(self._network_id)) + return reply['ietf-network:network'][0].get('node', list()) + + def links(self) -> List[Dict]: + reply : Dict = self._restconf_client.get(Link.ENDPOINT_NO_ID.format(self._network_id)) + return reply['ietf-network:network'][0].get('ietf-network-topology:link', list()) + + def node(self, node_id : str) -> Node: + _node = self._nodes.get(node_id) + if _node is not None: return _node + _node = Node(self._restconf_client, self._network_id, node_id) + return self._nodes.setdefault(node_id, _node) + + def link(self, link_id : str) -> Link: + _link = self._links.get(link_id) + if _link is not None: return _link + _link = Link(self._restconf_client, self._network_id, link_id) + return self._links.setdefault(link_id, _link) + + def create(self, supporting_network_ids : List[str] = []) -> None: + endpoint = Network.ENDPOINT_ID.format(self._network_id) + network = {'network-id': self._network_id} + sns = [{'network-ref': sn_id} for sn_id in supporting_network_ids] + if len(sns) > 0: network['supporting-network'] = sns + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.post(endpoint, payload) + + def get(self) -> Dict: + endpoint = Network.ENDPOINT_ID.format(self._network_id) + networks : Dict = self._restconf_client.get(endpoint) + return networks['ietf-network:network'][0] + + def delete(self) -> None: + endpoint = Network.ENDPOINT_ID.format(self._network_id) + self._restconf_client.delete(endpoint) + + +class SimapClient: + def __init__(self, restconf_client : RestConfClient) -> None: + self._restconf_client = restconf_client + self._networks : Dict[str, Network] = dict() + + def networks(self) -> List[Dict]: + reply : Dict = self._restconf_client.get(Network.ENDPOINT_NO_ID) + return reply['ietf-network:networks'].get('network', list()) + + def network(self, network_id : str) -> Network: + _network = self._networks.get(network_id) + if _network is not None: return _network + _network = Network(self._restconf_client, network_id) + return self._networks.setdefault(network_id, _network) diff --git a/src/simap_connector/service/simap_updater/simap_client/__init__.py b/src/simap_connector/service/simap_updater/simap_client/__init__.py new file mode 100644 index 000000000..3ccc21c7d --- /dev/null +++ b/src/simap_connector/service/simap_updater/simap_client/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/simap_connector/service/simap_updater/simap_client/__main__.py b/src/simap_connector/service/simap_updater/simap_client/__main__.py new file mode 100644 index 000000000..3aecad42e --- /dev/null +++ b/src/simap_connector/service/simap_updater/simap_client/__main__.py @@ -0,0 +1,128 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import json, logging +from .RestConfClient import RestConfClient +from .SimapClient import SimapClient + +logging.basicConfig(level=logging.INFO) +LOGGER = logging.getLogger(__name__) + +logging.getLogger('RestConfClient').setLevel(logging.WARN) + +def main() -> None: + restconf_client = RestConfClient('127.0.0.1', logger=logging.getLogger('RestConfClient')) + simap_client = SimapClient(restconf_client) + + te_topo = simap_client.network('te') + te_topo.create() + + te_topo.node('ONT1').create(termination_point_ids=['200', '500']) + te_topo.node('ONT2').create(termination_point_ids=['200', '500']) + te_topo.node('OLT' ).create(termination_point_ids=['200', '201', '500', '501']) + te_topo.link('L1').create('ONT1', '500', 'OLT', '200') + te_topo.link('L2').create('ONT2', '500', 'OLT', '201') + + te_topo.node('PE1').create(termination_point_ids=['200', '500', '501']) + te_topo.node('P1' ).create(termination_point_ids=['500', '501']) + te_topo.node('P2' ).create(termination_point_ids=['500', '501']) + te_topo.node('PE2').create(termination_point_ids=['200', '500', '501']) + te_topo.link('L5' ).create('PE1', '500', 'P1', '500') + te_topo.link('L6' ).create('PE1', '501', 'P2', '500') + te_topo.link('L9' ).create('P1', '501', 'PE2', '500') + te_topo.link('L10').create('P2', '501', 'PE2', '501') + + te_topo.node('OA' ).create(termination_point_ids=['200', '500', '501']) + te_topo.node('OTN1').create(termination_point_ids=['500', '501']) + te_topo.node('OTN2').create(termination_point_ids=['500', '501']) + te_topo.node('OE' ).create(termination_point_ids=['200', '500', '501']) + te_topo.link('L7' ).create('OA', '500', 'OTN1', '500') + te_topo.link('L8' ).create('OA', '501', 'OTN2', '500') + te_topo.link('L11' ).create('OTN1', '501', 'OE', '500') + te_topo.link('L12' ).create('OTN2', '501', 'OE', '501') + + te_topo.link('L3').create('OLT', '500', 'PE1', '200') + te_topo.link('L4').create('OLT', '501', 'OA', '200') + + te_topo.node('POP1').create(termination_point_ids=['200', '201', '500']) + te_topo.link('L13').create('PE2', '200', 'POP1', '500') + + te_topo.node('POP2').create(termination_point_ids=['200', '201', '500']) + te_topo.link('L14').create('OE', '200', 'POP2', '500') + + + + simap_trans = simap_client.network('simap-trans') + simap_trans.create(supporting_network_ids=['te']) + + site_1 = simap_trans.node('site1') + site_1.create(supporting_node_ids=[('te', 'PE1')]) + site_1.termination_point('200').create(supporting_termination_point_ids=[('te', 'PE1', '200')]) + site_1.termination_point('500').create(supporting_termination_point_ids=[('te', 'PE1', '500')]) + site_1.termination_point('501').create(supporting_termination_point_ids=[('te', 'PE1', '501')]) + + site_2 = simap_trans.node('site2') + site_2.create(supporting_node_ids=[('te', 'PE2')]) + site_2.termination_point('200').create(supporting_termination_point_ids=[('te', 'PE2', '200')]) + site_2.termination_point('500').create(supporting_termination_point_ids=[('te', 'PE2', '500')]) + site_2.termination_point('501').create(supporting_termination_point_ids=[('te', 'PE2', '501')]) + + simap_trans.link('Trans-L1').create('site1', '500', 'site2', '500', supporting_link_ids=[('te', 'L5'), ('te', 'L9')]) + + + + + simap_aggnet = simap_client.network('simap-aggnet') + simap_aggnet.create(supporting_network_ids=['te', 'simap-trans']) + + sdp_1 = simap_aggnet.node('sdp1') + sdp_1.create(supporting_node_ids=[('te', 'OLT')]) + sdp_1.termination_point('200').create(supporting_termination_point_ids=[('te', 'OLT', '200')]) + sdp_1.termination_point('201').create(supporting_termination_point_ids=[('te', 'OLT', '201')]) + sdp_1.termination_point('500').create(supporting_termination_point_ids=[('te', 'OLT', '500')]) + sdp_1.termination_point('501').create(supporting_termination_point_ids=[('te', 'OLT', '501')]) + + sdp_2 = simap_aggnet.node('sdp2') + sdp_2.create(supporting_node_ids=[('te', 'POP1')]) + sdp_2.termination_point('200').create(supporting_termination_point_ids=[('te', 'POP1', '200')]) + sdp_2.termination_point('201').create(supporting_termination_point_ids=[('te', 'POP1', '201')]) + sdp_2.termination_point('500').create(supporting_termination_point_ids=[('te', 'POP1', '500')]) + + simap_aggnet.link('AggNet-L1').create('sdp1', '500', 'sdp2', '500', supporting_link_ids=[('te', 'L3'), ('simap-trans', 'Trans-L1'), ('te', 'L13')]) + + + + + simap_e2e = simap_client.network('simap-e2e') + simap_e2e.create(supporting_network_ids=['te', 'simap-trans']) + + sdp_1 = simap_e2e.node('sdp1') + sdp_1.create(supporting_node_ids=[('te', 'ONT1')]) + sdp_1.termination_point('200').create(supporting_termination_point_ids=[('te', 'ONT1', '200')]) + sdp_1.termination_point('500').create(supporting_termination_point_ids=[('te', 'ONT1', '500')]) + + sdp_2 = simap_e2e.node('sdp2') + sdp_2.create(supporting_node_ids=[('te', 'POP1')]) + sdp_2.termination_point('200').create(supporting_termination_point_ids=[('te', 'POP1', '200')]) + sdp_2.termination_point('201').create(supporting_termination_point_ids=[('te', 'POP1', '201')]) + sdp_2.termination_point('500').create(supporting_termination_point_ids=[('te', 'POP1', '500')]) + + simap_e2e.link('E2E-L1').create('sdp1', '500', 'sdp2', '500', supporting_link_ids=[('te', 'L1'), ('simap-aggnet', 'AggNet-L1')]) + + + print('networks=', json.dumps(simap_client.networks())) + +if __name__ == '__main__': + main() -- GitLab From 2ea1cf77d5ef3e216302969ca7a6f6454c8f24ef Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 26 Aug 2025 15:41:39 +0000 Subject: [PATCH 027/367] Tests - Tools - SIMAP Server - Arranged file structure - Corrected dockerization - Corrected test scripts - Updated README.md --- src/tests/tools/simap_server/Dockerfile | 47 +++++++++--- src/tests/tools/simap_server/README.md | 37 ++++------ src/tests/tools/simap_server/deploy.sh | 2 +- src/tests/tools/simap_server/requirements.in | 25 +++++++ src/tests/tools/simap_server/run_client.sh | 16 +++++ src/tests/tools/simap_server/run_server.sh | 3 - .../{redeploy.sh => run_server_gunicorn.sh} | 18 +---- .../simap_server/run_server_standalone.sh | 19 +++++ .../simap_server/simap_client/__main__.py | 5 +- .../simap_server/simap_server/__main__.py | 63 +++------------- .../tools/simap_server/simap_server/app.py | 72 +++++++++++++++++++ src/tests/tools/simap_server/tests.sh | 66 ----------------- .../yang/ietf-inet-types.yang | 0 .../yang/ietf-network-topology.yang | 0 .../{simap_server => }/yang/ietf-network.yang | 0 .../yang/simap-telemetry.yang | 0 .../{simap_server => }/yang/simap.txt | 0 17 files changed, 199 insertions(+), 174 deletions(-) create mode 100644 src/tests/tools/simap_server/requirements.in delete mode 100755 src/tests/tools/simap_server/run_server.sh rename src/tests/tools/simap_server/{redeploy.sh => run_server_gunicorn.sh} (67%) create mode 100755 src/tests/tools/simap_server/run_server_standalone.sh create mode 100644 src/tests/tools/simap_server/simap_server/app.py delete mode 100755 src/tests/tools/simap_server/tests.sh rename src/tests/tools/simap_server/{simap_server => }/yang/ietf-inet-types.yang (100%) rename src/tests/tools/simap_server/{simap_server => }/yang/ietf-network-topology.yang (100%) rename src/tests/tools/simap_server/{simap_server => }/yang/ietf-network.yang (100%) rename src/tests/tools/simap_server/{simap_server => }/yang/simap-telemetry.yang (100%) rename src/tests/tools/simap_server/{simap_server => }/yang/simap.txt (100%) diff --git a/src/tests/tools/simap_server/Dockerfile b/src/tests/tools/simap_server/Dockerfile index f2c56e0b6..973d56a8f 100644 --- a/src/tests/tools/simap_server/Dockerfile +++ b/src/tests/tools/simap_server/Dockerfile @@ -14,21 +14,48 @@ FROM python:3.9-slim +# Install dependencies +RUN apt-get --yes --quiet --quiet update && \ + apt-get --yes --quiet --quiet install git build-essential cmake libpcre2-dev python3-dev python3-cffi && \ + rm -rf /var/lib/apt/lists/* + +# Download, build and install libyang. Note that APT package is outdated +# - Ref: https://github.com/CESNET/libyang +# - Ref: https://github.com/CESNET/libyang-python/ +RUN mkdir -p /var/libyang +RUN git clone https://github.com/CESNET/libyang.git /var/libyang +WORKDIR /var/libyang +RUN git fetch +RUN git checkout v2.1.148 +RUN mkdir -p /var/libyang/build +WORKDIR /var/libyang/build +RUN cmake -D CMAKE_BUILD_TYPE:String="Release" .. +RUN make +RUN make install +RUN ldconfig + # Set Python to show logs as they occur ENV PYTHONUNBUFFERED=0 -# Get Python dependencies +# Get generic Python packages RUN python3 -m pip install --upgrade pip RUN python3 -m pip install --upgrade setuptools wheel -RUN python3 -m pip install https://github.com/freeconf/lang/releases/download/v0.1.0-alpha/freeconf-0.1.0-py3-none-any.whl -RUN fc-lang-install -v - -# Create component sub-folders, and copy content -RUN mkdir -p /var/simap_server/ -WORKDIR /var/simap_server -COPY ./simap_server/yang/*.yang ./yang -COPY ./simap_server/*.py ./ +RUN python3 -m pip install --upgrade pip-tools + +# Create component sub-folders, get specific Python packages +RUN mkdir -p /var/teraflow/simap_server/ +WORKDIR /var/teraflow/simap_server/ +COPY ./requirements.in ./requirements.in +RUN pip-compile --quiet --output-file=requirements.txt requirements.in +RUN python3 -m pip install -r requirements.txt + +# Add component files into working directory +COPY ./yang/*.yang ./yang/ +COPY ./simap_server/*.py ./simap_server/ COPY ./startup.json ./startup.json +# Configure Flask for production +ENV FLASK_ENV=production + # Start the service -ENTRYPOINT ["python", "simap_server.py"] +ENTRYPOINT ["gunicorn", "--workers", "1", "--worker-class", "eventlet", "--bind", "0.0.0.0:8080", "simap_server.app:app"] diff --git a/src/tests/tools/simap_server/README.md b/src/tests/tools/simap_server/README.md index 7087fe061..bdea3b5bf 100644 --- a/src/tests/tools/simap_server/README.md +++ b/src/tests/tools/simap_server/README.md @@ -1,34 +1,25 @@ -# Mock QKD Node +# RESTCONF/SIMAP Server -This Mock implements very basic support for the software-defined QKD node information models specified in ETSI GS QKD 015 V2.1.1. +This server implements a basic RESTCONF Server that can load, potentially, any YANG data model. +In this case, it is prepared to load a SIMAP Server based on IETF Network Topology + custom SIMAP Telemetry extensions. -The aim of this mock is to enable testing the TFS QKD Framework with an emulated data plane. - -## Build the Mock QKD Node Docker image +## Build the RESTCONF/SIMAP Server Docker image ```bash ./build.sh ``` -## Run the Mock QKD Node as a container: +## Deploy the RESTCONF/SIMAP Server ```bash -docker network create --driver bridge --subnet=172.254.252.0/24 --gateway=172.254.252.254 tfs-qkd-net-mgmt - -docker run --name qkd-node-01 --detach --publish 80:80 \ - --network=tfs-qkd-net-mgmt --ip=172.254.252.101 \ - --env "DATA_FILE_PATH=/var/teraflow/mock-qkd-node/data/database.json" \ - --volume "$PWD/src/tests/mock-qkd-node/data/database-01.json:/var/teraflow/mock-qkd-node/data/database.json" \ - mock-qkd-node:test +./deploy.sh +``` -docker run --name qkd-node-02 --detach --publish 80:80 \ - --network=tfs-qkd-net-mgmt --ip=172.254.252.102 \ - --env "DATA_FILE_PATH=/var/teraflow/mock-qkd-node/data/database.json" \ - --volume "$PWD/src/tests/mock-qkd-node/data/database-02.json:/var/teraflow/mock-qkd-node/data/database.json" \ - mock-qkd-node:test +## Run the RESTCONF/SIMAP Client for testing: +```bash +./run_client.sh +``` -docker run --name qkd-node-03 --detach --publish 80:80 \ - --network=tfs-qkd-net-mgmt --ip=172.254.252.103 \ - --env "DATA_FILE_PATH=/var/teraflow/mock-qkd-node/data/database.json" \ - --volume "$PWD/src/tests/mock-qkd-node/data/database-03.json:/var/teraflow/mock-qkd-node/data/database.json" \ - mock-qkd-node:test +## Destroy the RESTCONF/SIMAP Server +```bash +./destroy.sh ``` diff --git a/src/tests/tools/simap_server/deploy.sh b/src/tests/tools/simap_server/deploy.sh index f13c56379..71bbeb041 100755 --- a/src/tests/tools/simap_server/deploy.sh +++ b/src/tests/tools/simap_server/deploy.sh @@ -17,7 +17,7 @@ docker rm --force simap-server # Create SIMAP Server -docker run --detach --name simap-server --network host simap-server:test +docker run --detach --name simap-server --publish 8080:8080 simap-server:test sleep 2 diff --git a/src/tests/tools/simap_server/requirements.in b/src/tests/tools/simap_server/requirements.in new file mode 100644 index 000000000..17155ed58 --- /dev/null +++ b/src/tests/tools/simap_server/requirements.in @@ -0,0 +1,25 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cryptography==39.0.1 +eventlet==0.39.0 +Flask-HTTPAuth==4.5.0 +Flask-RESTful==0.3.9 +Flask==2.1.3 +gunicorn==23.0.0 +jsonschema==4.4.0 +libyang==2.8.4 +pyopenssl==23.0.0 +requests==2.27.1 +werkzeug==2.3.7 diff --git a/src/tests/tools/simap_server/run_client.sh b/src/tests/tools/simap_server/run_client.sh index 5761bb37c..518deb462 100755 --- a/src/tests/tools/simap_server/run_client.sh +++ b/src/tests/tools/simap_server/run_client.sh @@ -1,3 +1,19 @@ #!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Make folder containing the script the root folder for its execution +cd $(dirname $0) python -m simap_client diff --git a/src/tests/tools/simap_server/run_server.sh b/src/tests/tools/simap_server/run_server.sh deleted file mode 100755 index 20901314b..000000000 --- a/src/tests/tools/simap_server/run_server.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -python -m simap_server diff --git a/src/tests/tools/simap_server/redeploy.sh b/src/tests/tools/simap_server/run_server_gunicorn.sh similarity index 67% rename from src/tests/tools/simap_server/redeploy.sh rename to src/tests/tools/simap_server/run_server_gunicorn.sh index 298d23227..be9c62fcb 100755 --- a/src/tests/tools/simap_server/redeploy.sh +++ b/src/tests/tools/simap_server/run_server_gunicorn.sh @@ -13,20 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Cleanup -docker rm --force simap-server - -# Build +# Make folder containing the script the root folder for its execution cd $(dirname $0) -docker buildx build -t simap-server:test -f Dockerfile . - -# Create SIMAP Server -docker run --detach --name simap-server --network host simap-server:test - -sleep 2 - -# Dump SIMAP Server Docker container -docker ps -a -# Follow logs -docker logs simap-server --follow +export FLASK_ENV=development +gunicorn -w 1 --worker-class eventlet -b 0.0.0.0:8080 --log-level DEBUG simap_server.app:app diff --git a/src/tests/tools/simap_server/run_server_standalone.sh b/src/tests/tools/simap_server/run_server_standalone.sh new file mode 100755 index 000000000..d2580f41d --- /dev/null +++ b/src/tests/tools/simap_server/run_server_standalone.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Make folder containing the script the root folder for its execution +cd $(dirname $0) + +python -m simap_server diff --git a/src/tests/tools/simap_server/simap_client/__main__.py b/src/tests/tools/simap_server/simap_client/__main__.py index 3aecad42e..77f79aa6c 100644 --- a/src/tests/tools/simap_server/simap_client/__main__.py +++ b/src/tests/tools/simap_server/simap_client/__main__.py @@ -23,7 +23,10 @@ LOGGER = logging.getLogger(__name__) logging.getLogger('RestConfClient').setLevel(logging.WARN) def main() -> None: - restconf_client = RestConfClient('127.0.0.1', logger=logging.getLogger('RestConfClient')) + restconf_client = RestConfClient( + '127.0.0.1', port=8080, + logger=logging.getLogger('RestConfClient') + ) simap_client = SimapClient(restconf_client) te_topo = simap_client.network('te') diff --git a/src/tests/tools/simap_server/simap_server/__main__.py b/src/tests/tools/simap_server/simap_server/__main__.py index 59fbe17ba..2c84d92ef 100644 --- a/src/tests/tools/simap_server/simap_server/__main__.py +++ b/src/tests/tools/simap_server/simap_server/__main__.py @@ -13,61 +13,14 @@ # limitations under the License. -import json, logging -from flask import Flask -from flask_restful import Api -from .Dispatch import RestConfDispatch -from .HostMeta import HostMeta -from .YangHandler import YangHandler +from .app import app -RESTCONF_PREFIX = '/restconf' -SECRET_KEY = '28dfce787f4d2dd9e2f7462ce493d3c6da46864d83e67f6b4f4765398c4155ce' -BIND_ADDRESS = '0.0.0.0' -BIND_PORT = 8080 - -YANG_SEARCH_PATH = './simap_server/yang' -YANG_MODULE_NAMES = [ - 'ietf-inet-types', - 'simap-telemetry', - 'ietf-network-topology', - 'ietf-network', -] - -STARTUP_FILE = './startup.json' -with open(STARTUP_FILE, mode='r', encoding='UTF-8') as fp: - YANG_STARTUP_DATA = json.loads(fp.read()) - -logging.basicConfig(level=logging.INFO) -LOGGER = logging.getLogger(__name__) - -def main() -> None: - yang_handler = YangHandler( - YANG_SEARCH_PATH, YANG_MODULE_NAMES, YANG_STARTUP_DATA - ) - restconf_paths = yang_handler.get_module_paths() - - app = Flask(__name__) - app.config['SECRET_KEY'] = SECRET_KEY - - api = Api(app) - api.add_resource( - HostMeta, - '/.well-known/host-meta', - resource_class_args=(RESTCONF_PREFIX,) - ) - api.add_resource( - RestConfDispatch, - RESTCONF_PREFIX + '/data', - RESTCONF_PREFIX + '/data/', - RESTCONF_PREFIX + '/data/', - resource_class_args=(yang_handler,) - ) - - LOGGER.info('Available RESTCONF paths:') - for restconf_path in restconf_paths: - LOGGER.info('- {:s}'.format(str(restconf_path))) - - app.run(host=BIND_ADDRESS, port=BIND_PORT) +BIND_ADDRESS = '0.0.0.0' +BIND_PORT = 8080 if __name__ == '__main__': - main() + # Only used to run it locally during development stage; + # otherwise, app is directly launched by gunicorn. + app.run( + host=BIND_ADDRESS, port=BIND_PORT, debug=True, use_reloader=False + ) diff --git a/src/tests/tools/simap_server/simap_server/app.py b/src/tests/tools/simap_server/simap_server/app.py new file mode 100644 index 000000000..3e9f815e5 --- /dev/null +++ b/src/tests/tools/simap_server/simap_server/app.py @@ -0,0 +1,72 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import json, logging, secrets +from flask import Flask +from flask_restful import Api +from .Dispatch import RestConfDispatch +from .HostMeta import HostMeta +from .YangHandler import YangHandler + + +logging.basicConfig( + level=logging.INFO, + format="[Worker-%(process)d][%(asctime)s] %(levelname)s:%(name)s:%(message)s", +) +LOGGER = logging.getLogger(__name__) + + +RESTCONF_PREFIX = '/restconf' +SECRET_KEY = secrets.token_hex(64) + + +YANG_SEARCH_PATH = './yang' +YANG_MODULE_NAMES = [ + 'ietf-inet-types', + 'simap-telemetry', + 'ietf-network-topology', + 'ietf-network', +] + +STARTUP_FILE = './startup.json' +with open(STARTUP_FILE, mode='r', encoding='UTF-8') as fp: + YANG_STARTUP_DATA = json.loads(fp.read()) + + +yang_handler = YangHandler( + YANG_SEARCH_PATH, YANG_MODULE_NAMES, YANG_STARTUP_DATA +) +restconf_paths = yang_handler.get_module_paths() + +app = Flask(__name__) +app.config['SECRET_KEY'] = SECRET_KEY + +api = Api(app) +api.add_resource( + HostMeta, + '/.well-known/host-meta', + resource_class_args=(RESTCONF_PREFIX,) +) +api.add_resource( + RestConfDispatch, + RESTCONF_PREFIX + '/data', + RESTCONF_PREFIX + '/data/', + RESTCONF_PREFIX + '/data/', + resource_class_args=(yang_handler,) +) + +LOGGER.info('Available RESTCONF paths:') +for restconf_path in restconf_paths: + LOGGER.info('- {:s}'.format(str(restconf_path))) diff --git a/src/tests/tools/simap_server/tests.sh b/src/tests/tools/simap_server/tests.sh deleted file mode 100755 index de0eeef50..000000000 --- a/src/tests/tools/simap_server/tests.sh +++ /dev/null @@ -1,66 +0,0 @@ -#!/bin/bash -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -echo "Read data on startup..." -curl http://127.0.0.1:8080/restconf/data/ietf-network-topology - - -curl http://127.0.0.1:8080/restconf/data/simap-telemetry - -echo -echo - -echo "Initializing networks..." -curl -X POST -d '{"networks":{"network":[]}}' http://127.0.0.1:8080/restconf/data/simap-telemetry: -# or -curl -X POST -d '{"networks":{}}' http://127.0.0.1:8080/restconf/data/ietf-network-topology: -curl -X PUT -d '{"networks":{"network":[]}}' http://127.0.0.1:8080/restconf/data/ietf-network:networks -echo -echo - -echo "Adding a network..." -curl -X POST -d '{"network":[{"network-id":"simap-1", "node": []}]}' http://127.0.0.1:8080/restconf/data/simap-telemetry:networks/network -echo - -echo "Adding a node..." -curl -X POST -d '{"node":[{"node-id":"r1", "termination-point":[]}]}' http://127.0.0.1:8080/restconf/data/simap-telemetry:networks/network=simap-1/node -echo - -curl -X POST -d '{"termination-point":[{"tp-id":"201"}]}' http://127.0.0.1:8080/restconf/data/ietf-network:networks/network=simap-1/node=r1/termination-point - - -curl -X POST -d '{"ietf-network:network": {"network-id":"simap-1"}}' http://127.0.0.1:8080/restconf/data/ietf-network:networks/network - - -curl -X POST -d '{"node":[{"node-id":"r2", "simap:simap-telemetry":{}}]}' http://127.0.0.1:8080/restconf/data/simap-telemetry:networks/network=simap-1/node - -curl -X POST -d '{"simap-telemetry":{"cpu-utilization": 98.3}}' http://127.0.0.1:8080/restconf/data/simap-telemetry:networks/network=simap-1/node=r1/simap-telemetry - - - -echo "Read data after update 1..." -curl http://127.0.0.1:8080/restconf/data/ietf-network-topology -echo -echo - -echo "Updating location (from path)..." -curl -X PATCH -d '{"qkdn_location_id":"new-loc-2"}' http://127.0.0.1:8080/restconf/data/ietf-network-topology:qkd_node -echo - -echo "Read final value..." -curl http://127.0.0.1:8080/restconf/data/ietf-network-topology -echo -echo diff --git a/src/tests/tools/simap_server/simap_server/yang/ietf-inet-types.yang b/src/tests/tools/simap_server/yang/ietf-inet-types.yang similarity index 100% rename from src/tests/tools/simap_server/simap_server/yang/ietf-inet-types.yang rename to src/tests/tools/simap_server/yang/ietf-inet-types.yang diff --git a/src/tests/tools/simap_server/simap_server/yang/ietf-network-topology.yang b/src/tests/tools/simap_server/yang/ietf-network-topology.yang similarity index 100% rename from src/tests/tools/simap_server/simap_server/yang/ietf-network-topology.yang rename to src/tests/tools/simap_server/yang/ietf-network-topology.yang diff --git a/src/tests/tools/simap_server/simap_server/yang/ietf-network.yang b/src/tests/tools/simap_server/yang/ietf-network.yang similarity index 100% rename from src/tests/tools/simap_server/simap_server/yang/ietf-network.yang rename to src/tests/tools/simap_server/yang/ietf-network.yang diff --git a/src/tests/tools/simap_server/simap_server/yang/simap-telemetry.yang b/src/tests/tools/simap_server/yang/simap-telemetry.yang similarity index 100% rename from src/tests/tools/simap_server/simap_server/yang/simap-telemetry.yang rename to src/tests/tools/simap_server/yang/simap-telemetry.yang diff --git a/src/tests/tools/simap_server/simap_server/yang/simap.txt b/src/tests/tools/simap_server/yang/simap.txt similarity index 100% rename from src/tests/tools/simap_server/simap_server/yang/simap.txt rename to src/tests/tools/simap_server/yang/simap.txt -- GitLab From e52cacb5223b31312ceee5979286e5356486e631 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 26 Aug 2025 15:42:23 +0000 Subject: [PATCH 028/367] Code cleanup --- manifests/{simapservice.yaml => simap_connectorservice.yaml} | 0 src/slice/service/slice_grouper/MetricsExporter.py | 4 ++-- src/tests/tools/mock_nce_ctrl/Dockerfile | 2 -- 3 files changed, 2 insertions(+), 4 deletions(-) rename manifests/{simapservice.yaml => simap_connectorservice.yaml} (100%) diff --git a/manifests/simapservice.yaml b/manifests/simap_connectorservice.yaml similarity index 100% rename from manifests/simapservice.yaml rename to manifests/simap_connectorservice.yaml diff --git a/src/slice/service/slice_grouper/MetricsExporter.py b/src/slice/service/slice_grouper/MetricsExporter.py index 593d2edf6..d22c595d0 100644 --- a/src/slice/service/slice_grouper/MetricsExporter.py +++ b/src/slice/service/slice_grouper/MetricsExporter.py @@ -29,8 +29,8 @@ MSG_REST_FAILED = '[rest_request] Query({:s}) failed, retry={:d}/{:d}...' MSG_ERROR_MAX_RETRIES = 'Maximum number of retries achieved: {:d}' METRICSDB_HOSTNAME = os.environ.get('METRICSDB_HOSTNAME') -METRICSDB_ILP_PORT = int(os.environ.get('METRICSDB_ILP_PORT', 0)) -METRICSDB_REST_PORT = int(os.environ.get('METRICSDB_REST_PORT', 0)) +METRICSDB_ILP_PORT = int(os.environ.get('METRICSDB_ILP_PORT', 0) or 0) +METRICSDB_REST_PORT = int(os.environ.get('METRICSDB_REST_PORT', 0) or 0) METRICSDB_TABLE_SLICE_GROUPS = os.environ.get('METRICSDB_TABLE_SLICE_GROUPS') COLORS = { diff --git a/src/tests/tools/mock_nce_ctrl/Dockerfile b/src/tests/tools/mock_nce_ctrl/Dockerfile index ae9dde4eb..579bab536 100644 --- a/src/tests/tools/mock_nce_ctrl/Dockerfile +++ b/src/tests/tools/mock_nce_ctrl/Dockerfile @@ -31,7 +31,5 @@ COPY . . RUN pip-compile --quiet --output-file=requirements.txt requirements.in RUN python3 -m pip install -r requirements.txt -RUN python3 -m pip list - # Start the service ENTRYPOINT ["python", "MockNCECtrl.py"] -- GitLab From 8617abfb871823dd39f62bcb2026d766c7b40314 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 26 Aug 2025 16:46:08 +0000 Subject: [PATCH 029/367] SIMAP Connector: - Added show logs script - Corrected manifest file - Added requirements.in - Added Update methods in SimapClient - Added ENDPOINT to ObjectCache - Improved object key and instance handining in ObjectCache - Implemented Create/Update/Remove events for Topology/Device/Link in SimapUpdater --- manifests/simap_connectorservice.yaml | 10 +- scripts/show_logs_simap_connector.sh | 27 +++ src/simap_connector/requirements.in | 1 + .../service/simap_updater/ObjectCache.py | 99 +++++++-- .../service/simap_updater/SimapUpdater.py | 208 ++++++++++++++---- .../service/simap_updater/Tools.py | 84 ++++++- .../simap_updater/simap_client/SimapClient.py | 51 +++++ 7 files changed, 407 insertions(+), 73 deletions(-) create mode 100755 scripts/show_logs_simap_connector.sh diff --git a/manifests/simap_connectorservice.yaml b/manifests/simap_connectorservice.yaml index 550805d8d..4d00f5122 100644 --- a/manifests/simap_connectorservice.yaml +++ b/manifests/simap_connectorservice.yaml @@ -40,9 +40,15 @@ spec: - name: SIMAP_SERVER_SCHEME value: "http" - name: SIMAP_SERVER_ADDRESS - value: "10.254.0.9" + # Assuming SIMAP Server is deployed in a local Docker container, as per: + # - ./src/tests/tools/simap_server/build.sh + # - ./src/tests/tools/simap_server/deploy.sh + value: "172.17.0.1" - name: SIMAP_SERVER_PORT - value: "80" + # Assuming SIMAP Server is deployed in a local Docker container, as per: + # - ./src/tests/tools/simap_server/build.sh + # - ./src/tests/tools/simap_server/deploy.sh + value: "8080" - name: SIMAP_SERVER_USERNAME value: "admin" - name: SIMAP_SERVER_PASSWORD diff --git a/scripts/show_logs_simap_connector.sh b/scripts/show_logs_simap_connector.sh new file mode 100755 index 000000000..20e5a5d3e --- /dev/null +++ b/scripts/show_logs_simap_connector.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +######################################################################################################################## +# Define your deployment settings here +######################################################################################################################## + +# If not already set, set the name of the Kubernetes namespace to deploy to. +export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs"} + +######################################################################################################################## +# Automated steps start here +######################################################################################################################## + +kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/simap-connectorservice -c server diff --git a/src/simap_connector/requirements.in b/src/simap_connector/requirements.in index 3ccc21c7d..5c92783a2 100644 --- a/src/simap_connector/requirements.in +++ b/src/simap_connector/requirements.in @@ -12,3 +12,4 @@ # See the License for the specific language governing permissions and # limitations under the License. +requests==2.27.* diff --git a/src/simap_connector/service/simap_updater/ObjectCache.py b/src/simap_connector/service/simap_updater/ObjectCache.py index c8802f8c9..4b5cf8244 100644 --- a/src/simap_connector/service/simap_updater/ObjectCache.py +++ b/src/simap_connector/service/simap_updater/ObjectCache.py @@ -28,47 +28,100 @@ LOGGER = logging.getLogger(__name__) class CachedEntities(Enum): TOPOLOGY = 'topology' DEVICE = 'device' + ENDPOINT = 'endpoint' LINK = 'link' +KEY_LENGTHS = { + CachedEntities.TOPOLOGY : 1, + CachedEntities.DEVICE : 1, + CachedEntities.ENDPOINT : 2, + CachedEntities.LINK : 1, +} + + +def compose_object_key(entity : CachedEntities, *object_uuids : str) -> Tuple[str, ...]: + expected_length = KEY_LENGTHS.get(entity) + entity_name = str(entity.value) + if expected_length is None: + MSG = 'Unsupported ({:s}, {:s})' + raise Exception(MSG.format(entity_name.title(), str(object_uuids))) + + if len(object_uuids) == expected_length: + return (entity_name, *object_uuids) + + MSG = 'Invalid Key ({:s}, {:s})' + raise Exception(MSG.format(entity_name.title(), str(object_uuids))) + + class ObjectCache: def __init__(self, context_client : ContextClient): self._context_client = context_client self._object_cache : Dict[Tuple[str, str], Any] = dict() - def get(self, entity : CachedEntities, object_uuid : str) -> Optional[Any]: - object_key = (entity.value, object_uuid) + def get( + self, entity : CachedEntities, *object_uuids : str, auto_retrieve : bool = True + ) -> Optional[Any]: + object_key = compose_object_key(entity, *object_uuids) if object_key in self._object_cache: return self._object_cache[object_key] - return self._update(entity, object_uuid) - def _retrieve( - self, entity : CachedEntities, entity_uuid : str - ) -> Optional[Any]: + if not auto_retrieve: return None + return self._update(entity, *object_uuids) + + def set(self, entity : CachedEntities, object_inst : Any, *object_uuids : str) -> None: + object_key = compose_object_key(entity, *object_uuids) + self._object_cache[object_key] = object_inst + + def _update(self, entity : CachedEntities, *object_uuids : str) -> Optional[Any]: if entity == CachedEntities.TOPOLOGY: - return get_topology(self._context_client, entity_uuid, rw_copy=False) - if entity == CachedEntities.DEVICE: - return get_device( - self._context_client, entity_uuid, rw_copy=False, include_endpoints=True, + object_inst = get_topology( + self._context_client, object_uuids[0], rw_copy=False + ) + elif entity == CachedEntities.DEVICE: + object_inst = get_device( + self._context_client, object_uuids[0], rw_copy=False, include_endpoints=True, include_components=False, include_config_rules=False, ) - if entity == CachedEntities.LINK: - return get_link(self._context_client, entity_uuid, rw_copy=False) - MSG = 'Not Supported ({:s}, {:s})' - LOGGER.warning(MSG.format(str(entity.value).title(), str(entity_uuid))) - return None - - def _update(self, entity : CachedEntities, object_uuid : str) -> Optional[Any]: - object_inst = self._retrieve(entity, object_uuid) + elif entity == CachedEntities.ENDPOINT: + # Endpoints are only updated when updating a Device + return None + elif entity == CachedEntities.LINK: + object_inst = get_link( + self._context_client, object_uuids[0], rw_copy=False + ) + else: + MSG = 'Not Supported ({:s}, {:s})' + LOGGER.warning(MSG.format(str(entity.value).title(), str(object_uuids))) + return None + if object_inst is None: MSG = 'Not Found ({:s}, {:s})' - LOGGER.warning(MSG.format(str(entity).title(), str(object_uuid))) + LOGGER.warning(MSG.format(str(entity.value).title(), str(object_uuids))) return None - object_key = (entity.value, object_uuid) - self._object_cache[object_key] = object_inst + self.set(entity, object_inst, object_uuids[0]) + self.set(entity, object_inst, object_inst.name) + + if entity == CachedEntities.DEVICE: + device_uuid = object_inst.device_id.device_uuid.uuid + device_name = object_inst.name + + for endpoint in object_inst.device_endpoints: + endpoint_device_uuid = endpoint.endpoint_id.device_id.device_uuid.uuid + if device_uuid != endpoint_device_uuid: + MSG = 'DeviceUUID({:s}) != Endpoint.DeviceUUID({:s})' + raise Exception(str(device_uuid), str(endpoint_device_uuid)) + + endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid + endpoint_name = endpoint.name + self.set(CachedEntities.ENDPOINT, endpoint, device_uuid, endpoint_uuid) + self.set(CachedEntities.ENDPOINT, endpoint, device_uuid, endpoint_name) + self.set(CachedEntities.ENDPOINT, endpoint, device_name, endpoint_uuid) + self.set(CachedEntities.ENDPOINT, endpoint, device_name, endpoint_name) + return object_inst - def delete(self, entity : CachedEntities, object_uuid : str) -> None: - object_key = (entity.value, object_uuid) + def delete(self, entity : CachedEntities, *object_uuids : str) -> None: + object_key = compose_object_key(entity, *object_uuids) self._object_cache.pop(object_key, None) diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 788e29d2e..50330c7ce 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -15,24 +15,19 @@ import logging, queue, threading from typing import Any, Optional -from common.proto.context_pb2 import DeviceEvent, Empty, TopologyEvent -from common.tools.context_queries.Device import get_device -from common.tools.context_queries.Link import get_link - +from common.proto.context_pb2 import DeviceEvent, Empty, LinkEvent, TopologyEvent from common.tools.grpc.BaseEventCollector import BaseEventCollector from common.tools.grpc.BaseEventDispatcher import BaseEventDispatcher from common.tools.grpc.Tools import grpc_message_to_json_string -from common.DeviceTypes import DeviceTypeEnum from context.client.ContextClient import ContextClient -from simap_connector.service.simap_updater.ObjectCache import CachedEntities -from .simap_client.RestConfClient import RestConfClient -from .simap_client.SimapClient import SimapClient -from .ObjectCache import ObjectCache - from simap_connector.Config import ( SIMAP_SERVER_SCHEME, SIMAP_SERVER_ADDRESS, SIMAP_SERVER_PORT, SIMAP_SERVER_USERNAME, SIMAP_SERVER_PASSWORD, ) +from .simap_client.RestConfClient import RestConfClient +from .simap_client.SimapClient import SimapClient +from .ObjectCache import CachedEntities, ObjectCache +from .Tools import get_device_endpoint, get_link_endpoint LOGGER = logging.getLogger(__name__) @@ -55,6 +50,12 @@ class EventDispatcher(BaseEventDispatcher): ) self._simap_client = SimapClient(self._restconf_client) + + def dispatch(self, event : Any) -> None: + MSG = 'Unexpected Event: {:s}' + LOGGER.warning(MSG.format(grpc_message_to_json_string(event))) + + def dispatch_topology_create(self, topology_event : TopologyEvent) -> None: MSG = 'Processing Topology Event: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(topology_event))) @@ -67,6 +68,7 @@ class EventDispatcher(BaseEventDispatcher): MSG = 'Topology Created: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(topology_event))) + def dispatch_topology_update(self, topology_event : TopologyEvent) -> None: MSG = 'Processing Topology Event: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(topology_event))) @@ -74,11 +76,12 @@ class EventDispatcher(BaseEventDispatcher): topology_uuid = topology_event.topology_id.topology_uuid.uuid topology = self._object_cache.get(CachedEntities.TOPOLOGY, topology_uuid) topology_name = topology.name - self._simap_client.network(topology_name).create() + self._simap_client.network(topology_name).update() MSG = 'Topology Updated: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(topology_event))) + def dispatch_topology_remove(self, topology_event : TopologyEvent) -> None: MSG = 'Processing Topology Event: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(topology_event))) @@ -89,52 +92,167 @@ class EventDispatcher(BaseEventDispatcher): self._simap_client.network(topology_name).delete() self._object_cache.delete(CachedEntities.TOPOLOGY, topology_uuid) + self._object_cache.delete(CachedEntities.TOPOLOGY, topology_name) MSG = 'Topology Remove: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(topology_event))) + def dispatch_device_create(self, device_event : DeviceEvent) -> None: + MSG = 'Processing Device Event: {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(device_event))) + device_uuid = device_event.device_id.device_uuid.uuid + device = self._object_cache.get(CachedEntities.DEVICE, device_uuid) + device_name = device.name + topology_uuid, endpoint_names = get_device_endpoint(device) + topology = self._object_cache.get(CachedEntities.TOPOLOGY, topology_uuid) + topology_name = topology.name + te_topo = self._simap_client.network(topology_name) + te_topo.update() + te_topo.node(device_name).create(termination_point_ids=endpoint_names) + MSG = 'Device Created: {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(device_event))) - #def dispatch_device_create(self, device_event : DeviceEvent) -> None: - # MSG = 'Processing Device Create: {:s}' - # LOGGER.info(MSG.format(grpc_message_to_json_string(device_event))) - topology_uuid = topology_event.topology_id.topology_uuid.uuid - topology = get_topology( - self._context_client, topology_uuid, rw_copy=False, - include_endpoints=False, include_config_rules=True, - include_components=False - ) - device_type = device.device_type - - # tfs_ctrl_settings = get_tfs_controller_settings( - # self._context_client, device_event - # ) - # if tfs_ctrl_settings is None: return - # self._subscriptions.add_subscription(tfs_ctrl_settings) - - #def dispatch_device_update(self, device_event : DeviceEvent) -> None: - # MSG = 'Processing Device Update: {:s}' - # LOGGER.info(MSG.format(grpc_message_to_json_string(device_event))) - # tfs_ctrl_settings = get_tfs_controller_settings( - # self._context_client, device_event - # ) - # if tfs_ctrl_settings is None: return - # self._subscriptions.add_subscription(tfs_ctrl_settings) - - #def dispatch_device_remove(self, device_event : DeviceEvent) -> None: - # MSG = 'Processing Device Remove: {:s}' - # LOGGER.info(MSG.format(grpc_message_to_json_string(device_event))) - # device_uuid = device_event.device_id.device_uuid.uuid - # self._subscriptions.remove_subscription(device_uuid) - def dispatch(self, event : Any) -> None: - MSG = 'Unexpected Event: {:s}' - LOGGER.warning(MSG.format(grpc_message_to_json_string(event))) + def dispatch_device_update(self, device_event : DeviceEvent) -> None: + MSG = 'Processing Device Event: {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(device_event))) + + device_uuid = device_event.device_id.device_uuid.uuid + device = self._object_cache.get(CachedEntities.DEVICE, device_uuid) + device_name = device.name + + topology_uuid, endpoint_names = get_device_endpoint(device) + topology = self._object_cache.get(CachedEntities.TOPOLOGY, topology_uuid) + topology_name = topology.name + + te_topo = self._simap_client.network(topology_name) + te_topo.update() + + te_device = te_topo.node(device_name) + te_device.update() + + for endpoint_name in endpoint_names: + te_device.termination_point(endpoint_name).update() + + MSG = 'Device Updated: {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(device_event))) + + + def dispatch_device_remove(self, device_event : DeviceEvent) -> None: + MSG = 'Processing Device Event: {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(device_event))) + + device_uuid = device_event.device_id.device_uuid.uuid + device = self._object_cache.get(CachedEntities.DEVICE, device_uuid) + device_name = device.name + + topology_uuid, endpoint_names = get_device_endpoint(device) + topology = self._object_cache.get(CachedEntities.TOPOLOGY, topology_uuid) + topology_name = topology.name + + te_topo = self._simap_client.network(topology_name) + te_topo.update() + + te_device = te_topo.node(device_name) + for endpoint_name in endpoint_names: + te_device.termination_point(endpoint_name).delete() + + endpoint = self._object_cache.get(CachedEntities.ENDPOINT, device_uuid, endpoint_name) + endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid + self._object_cache.delete(CachedEntities.DEVICE, device_uuid, endpoint_uuid) + self._object_cache.delete(CachedEntities.DEVICE, device_uuid, endpoint_name) + self._object_cache.delete(CachedEntities.DEVICE, device_name, endpoint_uuid) + self._object_cache.delete(CachedEntities.DEVICE, device_name, endpoint_name) + + te_device.delete() + + self._object_cache.delete(CachedEntities.DEVICE, device_uuid) + self._object_cache.delete(CachedEntities.DEVICE, device_name) + + MSG = 'Device Remove: {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(device_event))) + + + def dispatch_link_create(self, link_event : LinkEvent) -> None: + MSG = 'Processing Link Event: {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(link_event))) + + link_uuid = link_event.link_id.link_uuid.uuid + link = self._object_cache.get(CachedEntities.LINK, link_uuid) + link_name = link.name + + topology_uuid, endpoint_uuids = get_link_endpoint(link) + topology = self._object_cache.get(CachedEntities.TOPOLOGY, topology_uuid) + topology_name = topology.name + + te_topo = self._simap_client.network(topology_name) + te_topo.update() + + src_device_name = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[0][0], auto_retrieve=False) + src_endpoint_name = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[0]), auto_retrieve=False) + dst_device_name = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[1][0], auto_retrieve=False) + dst_endpoint_name = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[1]), auto_retrieve=False) + + te_topo.link(link_name).create(src_device_name, src_endpoint_name, dst_device_name, dst_endpoint_name) + + MSG = 'Link Created: {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(link_event))) + + def dispatch_link_update(self, link_event : LinkEvent) -> None: + MSG = 'Processing Link Event: {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(link_event))) + + link_uuid = link_event.link_id.link_uuid.uuid + link = self._object_cache.get(CachedEntities.LINK, link_uuid) + link_name = link.name + + topology_uuid, endpoint_uuids = get_link_endpoint(link) + topology = self._object_cache.get(CachedEntities.TOPOLOGY, topology_uuid) + topology_name = topology.name + + te_topo = self._simap_client.network(topology_name) + te_topo.update() + + src_device_name = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[0][0], auto_retrieve=False) + src_endpoint_name = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[0]), auto_retrieve=False) + dst_device_name = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[1][0], auto_retrieve=False) + dst_endpoint_name = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[1]), auto_retrieve=False) + + te_link = te_topo.link(link_name) + te_link.update(src_device_name, src_endpoint_name, dst_device_name, dst_endpoint_name) + + MSG = 'Link Updated: {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(link_event))) + + def dispatch_link_remove(self, link_event : LinkEvent) -> None: + MSG = 'Processing Link Event: {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(link_event))) + + link_uuid = link_event.link_id.link_uuid.uuid + link = self._object_cache.get(CachedEntities.LINK, link_uuid) + link_name = link.name + + topology_uuid, _ = get_link_endpoint(link) + topology = self._object_cache.get(CachedEntities.TOPOLOGY, topology_uuid) + topology_name = topology.name + + te_topo = self._simap_client.network(topology_name) + te_topo.update() + + te_link = te_topo.link(link_name) + te_link.delete() + + self._object_cache.delete(CachedEntities.LINK, link_uuid) + self._object_cache.delete(CachedEntities.LINK, link_name) + + MSG = 'Link Remove: {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(link_event))) class SimapUpdater: diff --git a/src/simap_connector/service/simap_updater/Tools.py b/src/simap_connector/service/simap_updater/Tools.py index 4ec1c83bc..52587af56 100644 --- a/src/simap_connector/service/simap_updater/Tools.py +++ b/src/simap_connector/service/simap_updater/Tools.py @@ -14,10 +14,10 @@ import enum -from typing import Union +from typing import Dict, List, Set, Tuple, Union from common.proto.context_pb2 import ( - EVENTTYPE_CREATE, EVENTTYPE_REMOVE, EVENTTYPE_UPDATE, - DeviceEvent, LinkEvent, ServiceEvent, SliceEvent, TopologyEvent + EVENTTYPE_CREATE, EVENTTYPE_REMOVE, EVENTTYPE_UPDATE, Device, + DeviceEvent, Link, LinkEvent, ServiceEvent, SliceEvent, TopologyEvent ) from common.tools.grpc.Tools import grpc_message_to_json_string @@ -38,3 +38,81 @@ def get_event_type(event : EVENT_TYPE) -> EventTypeEnum: MSG = 'Unsupported EventType({:s}) in Event({:s})' str_event = grpc_message_to_json_string(event) raise Exception(MSG.format(str(int_event_type), str_event)) + + +def get_device_endpoint(device : Device) -> Tuple[str, List[str]]: + topology_uuids : Set[str] = set() + endpoint_device_uuids : Set[str] = set() + endpoint_names : List[str] = list() + + for endpoint in device.device_endpoints: + topology_uuid = endpoint.endpoint_id.topology_id.topology_uuid.uuid + topology_uuids.add(topology_uuid) + + endpoint_device_uuid = endpoint.endpoint_id.device_id.device_uuid.uuid + endpoint_device_uuids.add(endpoint_device_uuid) + + endpoint_name = endpoint.name + endpoint_names.append(endpoint_name) + + try: + # Check topology UUIDs + if len(topology_uuids) != 1: + MSG = 'Unsupported: no/multiple Topologies({:s}) referenced' + raise Exception(MSG.format(str(topology_uuids))) + topology_uuid = list(topology_uuids)[0] + if len(topology_uuid) == 0: + MSG = 'Unsupported: empty TopologyUUID({:s}) referenced' + raise Exception(MSG.format(str(topology_uuid))) + + # Check Device UUIDs + if len(endpoint_device_uuids) != 1: + MSG = 'Unsupported: no/multiple DeviceUUID({:s}) referenced' + raise Exception(MSG.format(str(endpoint_device_uuids))) + endpoint_device_uuid = list(endpoint_device_uuids)[0] + if len(endpoint_device_uuid) == 0: + MSG = 'Unsupported: empty Endpoint.DeviceUUID({:s}) referenced' + raise Exception(MSG.format(str(endpoint_device_uuid))) + + device_uuid = device.device_id.device_uuid.uuid + if endpoint_device_uuid != device_uuid: + MSG = 'Unsupported: Endpoint.DeviceUUID({:s}) != DeviceUUID({:s})' + raise Exception(MSG.format(str(endpoint_device_uuid), str(device_uuid))) + except Exception as e: + MSG = '{:s} in Device({:s})' + raise Exception(MSG.format(str(e), grpc_message_to_json_string(device))) from e + + return topology_uuid, endpoint_names + + +def get_link_endpoint(link : Link) -> Tuple[str, List[Tuple[str, str]]]: + topology_uuids : Set[str] = set() + endpoint_uuids : List[Tuple[str, str]] = list() + + for endpoint_id in link.link_endpoint_ids: + topology_uuid = endpoint_id.topology_id.topology_uuid.uuid + topology_uuids.add(topology_uuid) + + device_uuid = endpoint_id.device_id.device_uuid.uuid + endpoint_uuid = endpoint_id.endpoint_uuid.uuid + endpoint_uuids.append((device_uuid, endpoint_uuid)) + + try: + # Check topology UUIDs + if len(topology_uuids) != 1: + MSG = 'Unsupported: no/multiple Topologies({:s}) referenced' + raise Exception(MSG.format(str(topology_uuids))) + topology_uuid = list(topology_uuids)[0] + if len(topology_uuid) == 0: + MSG = 'Unsupported: empty TopologyUUID({:s}) referenced' + raise Exception(MSG.format(str(topology_uuid))) + + # Check Count Endpoints + if len(endpoint_uuids) != 2: + MSG = 'Unsupported: non-p2p link LinkUUIDs({:s})' + raise Exception(MSG.format(str(endpoint_uuids))) + except Exception as e: + MSG = '{:s} in Link({:s})' + raise Exception(MSG.format(str(e), grpc_message_to_json_string(link))) from e + + return topology_uuid, endpoint_uuids diff --git a/src/simap_connector/service/simap_updater/simap_client/SimapClient.py b/src/simap_connector/service/simap_updater/simap_client/SimapClient.py index 26713ac5e..b4c27d43a 100644 --- a/src/simap_connector/service/simap_updater/simap_client/SimapClient.py +++ b/src/simap_connector/service/simap_updater/simap_client/SimapClient.py @@ -45,6 +45,19 @@ class TerminationPoint: node : Dict = self._restconf_client.get(endpoint) return node['ietf-network-topology:termination-point'][0] + def update(self, supporting_termination_point_ids : List[Tuple[str, str, str]] = []) -> None: + endpoint = TerminationPoint.ENDPOINT_ID.format(self._network_id, self._node_id, self._tp_id) + tp = {'tp-id': self._tp_id} + stps = [ + {'network-ref': snet_id, 'node-ref': snode_id, 'tp-ref': stp_id} + for snet_id,snode_id,stp_id in supporting_termination_point_ids + ] + if len(stps) > 0: tp['supporting-termination-point'] = stps + node = {'node-id': self._node_id, 'ietf-network-topology:termination-point': [tp]} + network = {'network-id': self._network_id, 'node': [node]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.patch(endpoint, payload) + def delete(self) -> None: endpoint = TerminationPoint.ENDPOINT_ID.format(self._network_id, self._node_id, self._tp_id) self._restconf_client.delete(endpoint) @@ -88,6 +101,20 @@ class Node: node : Dict = self._restconf_client.get(endpoint) return node['ietf-network:node'][0] + def update( + self, termination_point_ids : List[str] = [], + supporting_node_ids : List[Tuple[str, str]] = [] + ) -> None: + endpoint = Node.ENDPOINT_ID.format(self._network_id, self._node_id) + node = {'node-id': self._node_id} + tps = [{'tp-id': tp_id} for tp_id in termination_point_ids] + if len(tps) > 0: node['ietf-network-topology:termination-point'] = tps + sns = [{'network-ref': snet_id, 'node-ref': snode_id} for snet_id,snode_id in supporting_node_ids] + if len(sns) > 0: node['supporting-node'] = sns + network = {'network-id': self._network_id, 'node': [node]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.patch(endpoint, payload) + def delete(self) -> None: endpoint = Node.ENDPOINT_ID.format(self._network_id, self._node_id) self._restconf_client.delete(endpoint) @@ -122,6 +149,22 @@ class Link: link : Dict = self._restconf_client.get(endpoint) return link['ietf-network-topology:link'][0] + def update( + self, src_node_id : str, src_tp_id : str, dst_node_id : str, dst_tp_id : str, + supporting_link_ids : List[Tuple[str, str]] = [] + ) -> None: + endpoint = Link.ENDPOINT_ID.format(self._network_id, self._link_id) + link = { + 'link-id' : self._link_id, + 'source' : {'source-node': src_node_id, 'source-tp': src_tp_id}, + 'destination': {'dest-node' : dst_node_id, 'dest-tp' : dst_tp_id}, + } + sls = [{'network-ref': snet_id, 'link-ref': slink_id} for snet_id,slink_id in supporting_link_ids] + if len(sls) > 0: link['supporting-link'] = sls + network = {'network-id': self._network_id, 'ietf-network-topology:link': [link]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.patch(endpoint, payload) + def delete(self) -> None: endpoint = Link.ENDPOINT_ID.format(self._network_id, self._link_id) self._restconf_client.delete(endpoint) @@ -170,6 +213,14 @@ class Network: networks : Dict = self._restconf_client.get(endpoint) return networks['ietf-network:network'][0] + def update(self, supporting_network_ids : List[str] = []) -> None: + endpoint = Network.ENDPOINT_ID.format(self._network_id) + network = {'network-id': self._network_id} + sns = [{'network-ref': sn_id} for sn_id in supporting_network_ids] + if len(sns) > 0: network['supporting-network'] = sns + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.patch(endpoint, payload) + def delete(self) -> None: endpoint = Network.ENDPOINT_ID.format(self._network_id) self._restconf_client.delete(endpoint) -- GitLab From 9d772ce509c332fa8d2289baf8e6da8ba92e8410 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 26 Aug 2025 16:47:06 +0000 Subject: [PATCH 030/367] Tests - Tools - SIMAP Server - Added Update methods in SimapClient - Code cleanup --- .../simap_server/simap_client/SimapClient.py | 51 +++++++++++++++++++ .../tools/simap_server/simap_client/tests.py | 50 ------------------ 2 files changed, 51 insertions(+), 50 deletions(-) delete mode 100644 src/tests/tools/simap_server/simap_client/tests.py diff --git a/src/tests/tools/simap_server/simap_client/SimapClient.py b/src/tests/tools/simap_server/simap_client/SimapClient.py index 26713ac5e..b4c27d43a 100644 --- a/src/tests/tools/simap_server/simap_client/SimapClient.py +++ b/src/tests/tools/simap_server/simap_client/SimapClient.py @@ -45,6 +45,19 @@ class TerminationPoint: node : Dict = self._restconf_client.get(endpoint) return node['ietf-network-topology:termination-point'][0] + def update(self, supporting_termination_point_ids : List[Tuple[str, str, str]] = []) -> None: + endpoint = TerminationPoint.ENDPOINT_ID.format(self._network_id, self._node_id, self._tp_id) + tp = {'tp-id': self._tp_id} + stps = [ + {'network-ref': snet_id, 'node-ref': snode_id, 'tp-ref': stp_id} + for snet_id,snode_id,stp_id in supporting_termination_point_ids + ] + if len(stps) > 0: tp['supporting-termination-point'] = stps + node = {'node-id': self._node_id, 'ietf-network-topology:termination-point': [tp]} + network = {'network-id': self._network_id, 'node': [node]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.patch(endpoint, payload) + def delete(self) -> None: endpoint = TerminationPoint.ENDPOINT_ID.format(self._network_id, self._node_id, self._tp_id) self._restconf_client.delete(endpoint) @@ -88,6 +101,20 @@ class Node: node : Dict = self._restconf_client.get(endpoint) return node['ietf-network:node'][0] + def update( + self, termination_point_ids : List[str] = [], + supporting_node_ids : List[Tuple[str, str]] = [] + ) -> None: + endpoint = Node.ENDPOINT_ID.format(self._network_id, self._node_id) + node = {'node-id': self._node_id} + tps = [{'tp-id': tp_id} for tp_id in termination_point_ids] + if len(tps) > 0: node['ietf-network-topology:termination-point'] = tps + sns = [{'network-ref': snet_id, 'node-ref': snode_id} for snet_id,snode_id in supporting_node_ids] + if len(sns) > 0: node['supporting-node'] = sns + network = {'network-id': self._network_id, 'node': [node]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.patch(endpoint, payload) + def delete(self) -> None: endpoint = Node.ENDPOINT_ID.format(self._network_id, self._node_id) self._restconf_client.delete(endpoint) @@ -122,6 +149,22 @@ class Link: link : Dict = self._restconf_client.get(endpoint) return link['ietf-network-topology:link'][0] + def update( + self, src_node_id : str, src_tp_id : str, dst_node_id : str, dst_tp_id : str, + supporting_link_ids : List[Tuple[str, str]] = [] + ) -> None: + endpoint = Link.ENDPOINT_ID.format(self._network_id, self._link_id) + link = { + 'link-id' : self._link_id, + 'source' : {'source-node': src_node_id, 'source-tp': src_tp_id}, + 'destination': {'dest-node' : dst_node_id, 'dest-tp' : dst_tp_id}, + } + sls = [{'network-ref': snet_id, 'link-ref': slink_id} for snet_id,slink_id in supporting_link_ids] + if len(sls) > 0: link['supporting-link'] = sls + network = {'network-id': self._network_id, 'ietf-network-topology:link': [link]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.patch(endpoint, payload) + def delete(self) -> None: endpoint = Link.ENDPOINT_ID.format(self._network_id, self._link_id) self._restconf_client.delete(endpoint) @@ -170,6 +213,14 @@ class Network: networks : Dict = self._restconf_client.get(endpoint) return networks['ietf-network:network'][0] + def update(self, supporting_network_ids : List[str] = []) -> None: + endpoint = Network.ENDPOINT_ID.format(self._network_id) + network = {'network-id': self._network_id} + sns = [{'network-ref': sn_id} for sn_id in supporting_network_ids] + if len(sns) > 0: network['supporting-network'] = sns + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.patch(endpoint, payload) + def delete(self) -> None: endpoint = Network.ENDPOINT_ID.format(self._network_id) self._restconf_client.delete(endpoint) diff --git a/src/tests/tools/simap_server/simap_client/tests.py b/src/tests/tools/simap_server/simap_client/tests.py deleted file mode 100644 index d12770513..000000000 --- a/src/tests/tools/simap_server/simap_client/tests.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import logging -from .RestConfClient import RestConfClient -from .SimapClient import SimapClient - -logging.basicConfig(level=logging.INFO) -LOGGER = logging.getLogger(__name__) - -def main() -> None: - restconf_client = RestConfClient('127.0.0.1', logger=LOGGER) - simap_client = SimapClient(restconf_client) - - simap_client.network('topology').create() - - simap_client.network('topology').node('r1').create() - simap_client.network('topology').node('r1').termination_point('100').create() - simap_client.network('topology').node('r1').termination_point('101').create() - simap_client.network('topology').node('r1').termination_point('102').create() - - simap_client.network('topology').node('r2').create(tp_ids=['200', '201', '202']) - - simap_client.network('topology').link('l1').create('r1', '102', 'r2', '201') - - print('networks=', simap_client.networks()) - print('network[topology].nodes=', simap_client.network('topology').nodes()) - print('network[topology].node[r2]=', simap_client.network('topology').node('r2').get()) - print('network[topology].links=', simap_client.network('topology').links()) - print('network[topology].link[l1]=', simap_client.network('topology').link('l1').get()) - - simap_client.network('topology').link('l1').delete() - simap_client.network('topology').node('r2').delete() - simap_client.network('topology').node('r1').delete() - print('networks=', simap_client.networks()) - -if __name__ == '__main__': - main() -- GitLab From 7e9a189b20338e68daf55a375666216302129c34 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 26 Aug 2025 16:48:30 +0000 Subject: [PATCH 031/367] ECOC F5GA Telemetry Demo: - Corrected deploy specs - Corrected topology descriptors - Added/Updated redeploy scripts --- .../data/topology-agg.json | 4 +- .../data/topology-e2e.json | 8 +-- .../data/topology-ip.json | 56 +++++++++---------- .../ecoc25-f5ga-telemetry/deploy-specs-agg.sh | 3 + .../ecoc25-f5ga-telemetry/deploy-specs-e2e.sh | 3 + .../ecoc25-f5ga-telemetry/deploy-specs-ip.sh | 3 + .../redeploy-simap-server.sh | 19 +++++++ .../ecoc25-f5ga-telemetry/redeploy-tfs.sh | 15 +++-- 8 files changed, 73 insertions(+), 38 deletions(-) create mode 100755 src/tests/ecoc25-f5ga-telemetry/redeploy-simap-server.sh diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json b/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json index 6076408f9..ded54af19 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json @@ -25,7 +25,7 @@ }}} ]}}, {"device_id": {"device_uuid": {"uuid": "172.16.58.10"}}, "name": "172.16.58.10", "device_type": "emu-packet-router", - "device_drivers": ["DEVICEDRIVER_UNKNOWN"], "device_config": {"config_rules": [ + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ @@ -35,7 +35,7 @@ ]}}} ]}}, {"device_id": {"device_uuid": {"uuid": "172.16.204.22x"}}, "device_type": "emu-datacenter", - "device_drivers": ["DEVICEDRIVER_UNKNOWN"], "device_config": {"config_rules": [ + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology-e2e.json b/src/tests/ecoc25-f5ga-telemetry/data/topology-e2e.json index 9f3eeee18..872189091 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/topology-e2e.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology-e2e.json @@ -26,7 +26,7 @@ ]}}, {"device_id": {"device_uuid": {"uuid": "172.16.204.220"}}, "device_type": "emu-datacenter", - "device_drivers": ["DEVICEDRIVER_UNKNOWN"], "device_config": {"config_rules": [ + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ @@ -37,7 +37,7 @@ ]} }, {"device_id": {"device_uuid": {"uuid": "172.16.204.220"}}, "device_type": "emu-datacenter", - "device_drivers": ["DEVICEDRIVER_UNKNOWN"], "device_config": {"config_rules": [ + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ @@ -48,7 +48,7 @@ ]} }, {"device_id": {"device_uuid": {"uuid": "172.16.204.220"}}, "device_type": "emu-datacenter", - "device_drivers": ["DEVICEDRIVER_UNKNOWN"], "device_config": {"config_rules": [ + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ @@ -59,7 +59,7 @@ ]} }, {"device_id": {"device_uuid": {"uuid": "172.16.204.220"}}, "device_type": "emu-datacenter", - "device_drivers": ["DEVICEDRIVER_UNKNOWN"], "device_config": {"config_rules": [ + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology-ip.json b/src/tests/ecoc25-f5ga-telemetry/data/topology-ip.json index 28758674d..f4258d52a 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/topology-ip.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology-ip.json @@ -8,7 +8,7 @@ "devices": [ { "device_id": {"device_uuid": {"uuid": "172.16.125.25"}}, "name": "172.16.125.25", "device_type": "emu-packet-router", - "device_drivers": ["DEVICEDRIVER_UNKNOWN"], + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, @@ -21,7 +21,7 @@ }, { "device_id": {"device_uuid": {"uuid": "172.16.125.31"}}, "name": "172.16.125.31", "device_type": "emu-packet-router", - "device_drivers": ["DEVICEDRIVER_UNKNOWN"], + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, @@ -33,7 +33,7 @@ }, { "device_id": {"device_uuid": {"uuid": "172.16.125.33"}}, "name": "172.16.125.33", "device_type": "emu-packet-router", - "device_drivers": ["DEVICEDRIVER_UNKNOWN"], + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, @@ -45,7 +45,7 @@ }, { "device_id": {"device_uuid": {"uuid": "172.16.125.32"}}, "name": "172.16.125.32", "device_type": "emu-packet-router", - "device_drivers": ["DEVICEDRIVER_UNKNOWN"], + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, @@ -59,70 +59,70 @@ ], "links": [ { - "link_id": {"link_uuid": {"uuid": "172.16.122.25-500"}}, "name": "172.16.122.25-500", + "link_id": {"link_uuid": {"uuid": "172.16.125.25-500"}}, "name": "172.16.125.25-500", "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "172.16.122.25"}}, "endpoint_uuid": {"uuid": "500"}}, - {"device_id": {"device_uuid": {"uuid": "172.16.122.31"}}, "endpoint_uuid": {"uuid": "500"}} + {"device_id": {"device_uuid": {"uuid": "172.16.125.25"}}, "endpoint_uuid": {"uuid": "500"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.125.31"}}, "endpoint_uuid": {"uuid": "500"}} ] }, { - "link_id": {"link_uuid": {"uuid": "172.16.122.31-500"}}, "name": "172.16.122.31-500", + "link_id": {"link_uuid": {"uuid": "172.16.125.31-500"}}, "name": "172.16.125.31-500", "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "172.16.122.31"}}, "endpoint_uuid": {"uuid": "500"}}, - {"device_id": {"device_uuid": {"uuid": "172.16.122.25"}}, "endpoint_uuid": {"uuid": "500"}} + {"device_id": {"device_uuid": {"uuid": "172.16.125.31"}}, "endpoint_uuid": {"uuid": "500"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.125.25"}}, "endpoint_uuid": {"uuid": "500"}} ] }, { - "link_id": {"link_uuid": {"uuid": "172.16.122.25-501"}}, "name": "172.16.122.25-501", + "link_id": {"link_uuid": {"uuid": "172.16.125.25-501"}}, "name": "172.16.125.25-501", "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "172.16.122.25"}}, "endpoint_uuid": {"uuid": "501"}}, - {"device_id": {"device_uuid": {"uuid": "172.16.122.33"}}, "endpoint_uuid": {"uuid": "500"}} + {"device_id": {"device_uuid": {"uuid": "172.16.125.25"}}, "endpoint_uuid": {"uuid": "501"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.125.33"}}, "endpoint_uuid": {"uuid": "500"}} ] }, { - "link_id": {"link_uuid": {"uuid": "172.16.122.33-500"}}, "name": "172.16.122.33-500", + "link_id": {"link_uuid": {"uuid": "172.16.125.33-500"}}, "name": "172.16.125.33-500", "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "172.16.122.33"}}, "endpoint_uuid": {"uuid": "500"}}, - {"device_id": {"device_uuid": {"uuid": "172.16.122.25"}}, "endpoint_uuid": {"uuid": "501"}} + {"device_id": {"device_uuid": {"uuid": "172.16.125.33"}}, "endpoint_uuid": {"uuid": "500"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.125.25"}}, "endpoint_uuid": {"uuid": "501"}} ] }, { - "link_id": {"link_uuid": {"uuid": "172.16.122.31-501"}}, "name": "172.16.122.31-501", + "link_id": {"link_uuid": {"uuid": "172.16.125.31-501"}}, "name": "172.16.125.31-501", "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "172.16.122.31"}}, "endpoint_uuid": {"uuid": "501"}}, - {"device_id": {"device_uuid": {"uuid": "172.16.122.32"}}, "endpoint_uuid": {"uuid": "500"}} + {"device_id": {"device_uuid": {"uuid": "172.16.125.31"}}, "endpoint_uuid": {"uuid": "501"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.125.32"}}, "endpoint_uuid": {"uuid": "500"}} ] }, { - "link_id": {"link_uuid": {"uuid": "172.16.122.32-500"}}, "name": "172.16.122.32-500", + "link_id": {"link_uuid": {"uuid": "172.16.125.32-500"}}, "name": "172.16.125.32-500", "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "172.16.122.32"}}, "endpoint_uuid": {"uuid": "500"}}, - {"device_id": {"device_uuid": {"uuid": "172.16.122.31"}}, "endpoint_uuid": {"uuid": "501"}} + {"device_id": {"device_uuid": {"uuid": "172.16.125.32"}}, "endpoint_uuid": {"uuid": "500"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.125.31"}}, "endpoint_uuid": {"uuid": "501"}} ] }, { - "link_id": {"link_uuid": {"uuid": "172.16.122.32-501"}}, "name": "172.16.122.32-501", + "link_id": {"link_uuid": {"uuid": "172.16.125.32-501"}}, "name": "172.16.125.32-501", "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "172.16.122.32"}}, "endpoint_uuid": {"uuid": "501"}}, - {"device_id": {"device_uuid": {"uuid": "172.16.122.33"}}, "endpoint_uuid": {"uuid": "501"}} + {"device_id": {"device_uuid": {"uuid": "172.16.125.32"}}, "endpoint_uuid": {"uuid": "501"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.125.33"}}, "endpoint_uuid": {"uuid": "501"}} ] }, { - "link_id": {"link_uuid": {"uuid": "172.16.122.33-501"}}, "name": "172.16.122.33-501", + "link_id": {"link_uuid": {"uuid": "172.16.125.33-501"}}, "name": "172.16.125.33-501", "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "172.16.122.33"}}, "endpoint_uuid": {"uuid": "501"}}, - {"device_id": {"device_uuid": {"uuid": "172.16.122.32"}}, "endpoint_uuid": {"uuid": "501"}} + {"device_id": {"device_uuid": {"uuid": "172.16.125.33"}}, "endpoint_uuid": {"uuid": "501"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.125.32"}}, "endpoint_uuid": {"uuid": "501"}} ] } ] diff --git a/src/tests/ecoc25-f5ga-telemetry/deploy-specs-agg.sh b/src/tests/ecoc25-f5ga-telemetry/deploy-specs-agg.sh index 0820e21b7..c7b5e98b5 100644 --- a/src/tests/ecoc25-f5ga-telemetry/deploy-specs-agg.sh +++ b/src/tests/ecoc25-f5ga-telemetry/deploy-specs-agg.sh @@ -86,6 +86,9 @@ export TFS_COMPONENTS="context device pathcomp service slice nbi webui" # export TFS_COMPONENTS="${BEFORE} qkd_app service ${AFTER}" #fi +# Uncomment to activate SIMAP Connector +export TFS_COMPONENTS="${TFS_COMPONENTS} simap_connector" + # Uncomment to activate Load Generator #export TFS_COMPONENTS="${TFS_COMPONENTS} load_generator" diff --git a/src/tests/ecoc25-f5ga-telemetry/deploy-specs-e2e.sh b/src/tests/ecoc25-f5ga-telemetry/deploy-specs-e2e.sh index 0820e21b7..c7b5e98b5 100644 --- a/src/tests/ecoc25-f5ga-telemetry/deploy-specs-e2e.sh +++ b/src/tests/ecoc25-f5ga-telemetry/deploy-specs-e2e.sh @@ -86,6 +86,9 @@ export TFS_COMPONENTS="context device pathcomp service slice nbi webui" # export TFS_COMPONENTS="${BEFORE} qkd_app service ${AFTER}" #fi +# Uncomment to activate SIMAP Connector +export TFS_COMPONENTS="${TFS_COMPONENTS} simap_connector" + # Uncomment to activate Load Generator #export TFS_COMPONENTS="${TFS_COMPONENTS} load_generator" diff --git a/src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh b/src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh index 0820e21b7..c7b5e98b5 100644 --- a/src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh +++ b/src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh @@ -86,6 +86,9 @@ export TFS_COMPONENTS="context device pathcomp service slice nbi webui" # export TFS_COMPONENTS="${BEFORE} qkd_app service ${AFTER}" #fi +# Uncomment to activate SIMAP Connector +export TFS_COMPONENTS="${TFS_COMPONENTS} simap_connector" + # Uncomment to activate Load Generator #export TFS_COMPONENTS="${TFS_COMPONENTS} load_generator" diff --git a/src/tests/ecoc25-f5ga-telemetry/redeploy-simap-server.sh b/src/tests/ecoc25-f5ga-telemetry/redeploy-simap-server.sh new file mode 100755 index 000000000..e50fc0418 --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/redeploy-simap-server.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +~/tfs-ctrl/src/tests/tools/simap_server/build.sh +~/tfs-ctrl/src/tests/tools/simap_server/deploy.sh + +echo "Done!" diff --git a/src/tests/ecoc25-f5ga-telemetry/redeploy-tfs.sh b/src/tests/ecoc25-f5ga-telemetry/redeploy-tfs.sh index 9e9416b78..6e4961af0 100755 --- a/src/tests/ecoc25-f5ga-telemetry/redeploy-tfs.sh +++ b/src/tests/ecoc25-f5ga-telemetry/redeploy-tfs.sh @@ -13,9 +13,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -# assuming the instances are named as tfs-e2e-ctrl, tfs-agg-ctrl, and tfs-ip-ctrl -CTRL_NAME=$(hostname | cut -d'-' -f2) -echo "Deploying: ${CTRL_NAME}" +# Assuming the instances are named as tfs-e2e-ctrl, tfs-agg-ctrl, and tfs-ip-ctrl +#CTRL_NAME=$(hostname | cut -d'-' -f2) +#echo "Deploying: ${CTRL_NAME}" -source ~/tfs-ctrl/src/tests/ecoc25-f5ga-telemetry/deploy-specs-${CTRL_NAME}.sh +#source ~/tfs-ctrl/src/tests/ecoc25-f5ga-telemetry/deploy-specs-${CTRL_NAME}.sh +source ~/tfs-ctrl/src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh ./deploy/all.sh + +echo "Waiting for NATS connection..." +while ! kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server 2>&1 | grep -q 'Subscriber is Ready? True'; do sleep 1; done +kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server + +echo "Ready!" -- GitLab From 6ade08f39ab58e5cfb52f57942369c16a805925f Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 26 Aug 2025 17:12:19 +0000 Subject: [PATCH 032/367] SIMAP Connector: - Corrected retrieval of Link device/endpoint names in SimapUpdater --- .../service/simap_updater/SimapUpdater.py | 56 +++++++++++++++---- 1 file changed, 45 insertions(+), 11 deletions(-) diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 50330c7ce..621feb756 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -194,12 +194,29 @@ class EventDispatcher(BaseEventDispatcher): te_topo = self._simap_client.network(topology_name) te_topo.update() - src_device_name = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[0][0], auto_retrieve=False) - src_endpoint_name = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[0]), auto_retrieve=False) - dst_device_name = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[1][0], auto_retrieve=False) - dst_endpoint_name = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[1]), auto_retrieve=False) - - te_topo.link(link_name).create(src_device_name, src_endpoint_name, dst_device_name, dst_endpoint_name) + src_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[0][0], auto_retrieve=False) + src_endpoint = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[0]), auto_retrieve=False) + dst_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[1][0], auto_retrieve=False) + dst_endpoint = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[1]), auto_retrieve=False) + + try: + if src_device is None: + MSG = 'Device({:s}) not found in cache' + raise Exception(MSG.format(str(endpoint_uuids[0][0]))) + if src_endpoint is None: + MSG = 'Endpoint({:s}) not found in cache' + raise Exception(MSG.format(str(endpoint_uuids[0]))) + if dst_device is None: + MSG = 'Device({:s}) not found in cache' + raise Exception(MSG.format(str(endpoint_uuids[1][0]))) + if dst_endpoint is None: + MSG = 'Endpoint({:s}) not found in cache' + raise Exception(MSG.format(str(endpoint_uuids[1]))) + except Exception as e: + MSG = '{:s} in Link({:s})' + raise Exception(MSG.format(str(e), grpc_message_to_json_string(link))) from e + + te_topo.link(link_name).create(src_device.name, src_endpoint.name, dst_device.name, dst_endpoint.name) MSG = 'Link Created: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(link_event))) @@ -219,13 +236,30 @@ class EventDispatcher(BaseEventDispatcher): te_topo = self._simap_client.network(topology_name) te_topo.update() - src_device_name = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[0][0], auto_retrieve=False) - src_endpoint_name = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[0]), auto_retrieve=False) - dst_device_name = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[1][0], auto_retrieve=False) - dst_endpoint_name = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[1]), auto_retrieve=False) + src_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[0][0], auto_retrieve=False) + src_endpoint = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[0]), auto_retrieve=False) + dst_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[1][0], auto_retrieve=False) + dst_endpoint = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[1]), auto_retrieve=False) + + try: + if src_device is None: + MSG = 'Device({:s}) not found in cache' + raise Exception(MSG.format(str(endpoint_uuids[0][0]))) + if src_endpoint is None: + MSG = 'Endpoint({:s}) not found in cache' + raise Exception(MSG.format(str(endpoint_uuids[0]))) + if dst_device is None: + MSG = 'Device({:s}) not found in cache' + raise Exception(MSG.format(str(endpoint_uuids[1][0]))) + if dst_endpoint is None: + MSG = 'Endpoint({:s}) not found in cache' + raise Exception(MSG.format(str(endpoint_uuids[1]))) + except Exception as e: + MSG = '{:s} in Link({:s})' + raise Exception(MSG.format(str(e), grpc_message_to_json_string(link))) from e te_link = te_topo.link(link_name) - te_link.update(src_device_name, src_endpoint_name, dst_device_name, dst_endpoint_name) + te_link.update(src_device.name, src_endpoint.name, dst_device.name, dst_endpoint.name) MSG = 'Link Updated: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(link_event))) -- GitLab From 0618ca07c10147071b7d1705889d6e9c85d1627c Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 26 Aug 2025 17:18:43 +0000 Subject: [PATCH 033/367] SIMAP Connector: - Code Cleanup, removed SIMAP_DEFAULT_TOPOLOGY - Added SimapConnectorClient skeleton - Removed wrong tests --- manifests/simap_connectorservice.yaml | 2 - src/simap_connector/Config.py | 12 ++--- .../client/SimapConnectorClient.py | 50 +++++++++++++++++++ src/simap_connector/client/__init__.py | 13 +++++ src/simap_connector/tests/__init__.py | 14 ++++++ 5 files changed, 82 insertions(+), 9 deletions(-) create mode 100644 src/simap_connector/client/SimapConnectorClient.py create mode 100644 src/simap_connector/client/__init__.py create mode 100644 src/simap_connector/tests/__init__.py diff --git a/manifests/simap_connectorservice.yaml b/manifests/simap_connectorservice.yaml index 4d00f5122..1049b0dfc 100644 --- a/manifests/simap_connectorservice.yaml +++ b/manifests/simap_connectorservice.yaml @@ -53,8 +53,6 @@ spec: value: "admin" - name: SIMAP_SERVER_PASSWORD value: "admin" - - name: SIMAP_DEFAULT_TOPOLOGY - value: "te" startupProbe: grpc: port: 9090 diff --git a/src/simap_connector/Config.py b/src/simap_connector/Config.py index 23958037f..656e9a875 100644 --- a/src/simap_connector/Config.py +++ b/src/simap_connector/Config.py @@ -13,12 +13,10 @@ # limitations under the License. -from common.Constants import DEFAULT_TOPOLOGY_NAME from common.Settings import get_setting -SIMAP_SERVER_SCHEME = str(get_setting('SIMAP_SERVER_SCHEME', default='http' )) -SIMAP_SERVER_ADDRESS = str(get_setting('SIMAP_SERVER_ADDRESS', default='127.0.0.1' )) -SIMAP_SERVER_PORT = int(get_setting('SIMAP_SERVER_PORT', default='80' )) -SIMAP_SERVER_USERNAME = str(get_setting('SIMAP_SERVER_USERNAME', default='admin' )) -SIMAP_SERVER_PASSWORD = str(get_setting('SIMAP_SERVER_PASSWORD', default='admin' )) -SIMAP_DEFAULT_TOPOLOGY = str(get_setting('SIMAP_DEFAULT_TOPOLOGY', default=DEFAULT_TOPOLOGY_NAME)) +SIMAP_SERVER_SCHEME = str(get_setting('SIMAP_SERVER_SCHEME', default='http' )) +SIMAP_SERVER_ADDRESS = str(get_setting('SIMAP_SERVER_ADDRESS', default='127.0.0.1')) +SIMAP_SERVER_PORT = int(get_setting('SIMAP_SERVER_PORT', default='80' )) +SIMAP_SERVER_USERNAME = str(get_setting('SIMAP_SERVER_USERNAME', default='admin' )) +SIMAP_SERVER_PASSWORD = str(get_setting('SIMAP_SERVER_PASSWORD', default='admin' )) diff --git a/src/simap_connector/client/SimapConnectorClient.py b/src/simap_connector/client/SimapConnectorClient.py new file mode 100644 index 000000000..299b3bada --- /dev/null +++ b/src/simap_connector/client/SimapConnectorClient.py @@ -0,0 +1,50 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import grpc, logging +from common.Constants import ServiceNameEnum +from common.proto.context_pb2 import Empty +#from common.proto.e2eorchestrator_pb2_grpc import E2EOrchestratorServiceStub +from common.Settings import get_service_host, get_service_port_grpc +from common.tools.client.RetryDecorator import delay_exponential, retry +#from common.tools.grpc.Tools import grpc_message_to_json +#from common.proto.e2eorchestrator_pb2 import E2EOrchestratorRequest, E2EOrchestratorReply + +LOGGER = logging.getLogger(__name__) +MAX_RETRIES = 15 +DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0) +RETRY_DECORATOR = retry( + max_retries=MAX_RETRIES, delay_function=DELAY_FUNCTION, + prepare_method_name='connect', +) + +class SimapConnectorClient: + def __init__(self, host=None, port=None): + if not host: host = get_service_host(ServiceNameEnum.SIMAP_CONNECTOR) + if not port: port = get_service_port_grpc(ServiceNameEnum.SIMAP_CONNECTOR) + self.endpoint = '{:s}:{:s}'.format(str(host), str(port)) + LOGGER.debug('Creating channel to {:s}...'.format(str(self.endpoint))) + self.channel = None + self.stub = None + self.connect() + LOGGER.debug('Channel created') + + def connect(self): + self.channel = grpc.insecure_channel(self.endpoint) + #self.stub = E2EOrchestratorServiceStub(self.channel) + + def close(self): + if self.channel is not None: self.channel.close() + self.channel = None + self.stub = None diff --git a/src/simap_connector/client/__init__.py b/src/simap_connector/client/__init__.py new file mode 100644 index 000000000..7363515f0 --- /dev/null +++ b/src/simap_connector/client/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/simap_connector/tests/__init__.py b/src/simap_connector/tests/__init__.py new file mode 100644 index 000000000..3ccc21c7d --- /dev/null +++ b/src/simap_connector/tests/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + -- GitLab From 9c8001fa11a0e17d5a68e176e51684d19f493acd Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 27 Aug 2025 09:55:43 +0000 Subject: [PATCH 034/367] ECOC F5GA Telemetry Demo: - Updated manifests and deploy scripts to simplify --- deploy/all.sh | 6 +++--- manifests/simap_connectorservice.yaml | 3 ++- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/deploy/all.sh b/deploy/all.sh index a284287bc..f02f7bbb0 100755 --- a/deploy/all.sh +++ b/deploy/all.sh @@ -230,16 +230,16 @@ export GRAF_EXT_PORT_HTTP=${GRAF_EXT_PORT_HTTP:-"3000"} ./deploy/nats.sh # Deploy QuestDB -./deploy/qdb.sh +#./deploy/qdb.sh # Deploy Apache Kafka ./deploy/kafka.sh #Deploy Monitoring (Prometheus, Mimir, Grafana) -./deploy/monitoring.sh +#./deploy/monitoring.sh # Expose Dashboard -./deploy/expose_dashboard.sh +#./deploy/expose_dashboard.sh # Deploy TeraFlowSDN ./deploy/tfs.sh diff --git a/manifests/simap_connectorservice.yaml b/manifests/simap_connectorservice.yaml index 1049b0dfc..a1cfe5324 100644 --- a/manifests/simap_connectorservice.yaml +++ b/manifests/simap_connectorservice.yaml @@ -43,7 +43,8 @@ spec: # Assuming SIMAP Server is deployed in a local Docker container, as per: # - ./src/tests/tools/simap_server/build.sh # - ./src/tests/tools/simap_server/deploy.sh - value: "172.17.0.1" + #value: "172.17.0.1" + value: "10.254.0.9" - name: SIMAP_SERVER_PORT # Assuming SIMAP Server is deployed in a local Docker container, as per: # - ./src/tests/tools/simap_server/build.sh -- GitLab From 725b4c1983dac20b0ae5e20e886e064ecfa2e922 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 27 Aug 2025 12:16:57 +0000 Subject: [PATCH 035/367] SIMAP Connector: - Skipped events reporting partial objects --- .../service/simap_updater/SimapUpdater.py | 28 +++++++++++++++++++ .../service/simap_updater/Tools.py | 12 ++++++-- 2 files changed, 37 insertions(+), 3 deletions(-) diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 621feb756..1e6cfbe35 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -107,6 +107,13 @@ class EventDispatcher(BaseEventDispatcher): device_name = device.name topology_uuid, endpoint_names = get_device_endpoint(device) + if topology_uuid is None: + MSG = 'DeviceEvent({:s}) skipped, no endpoints to identify topology: {:s}' + str_device_event = grpc_message_to_json_string(device_event) + str_device = grpc_message_to_json_string(device) + LOGGER.warning(MSG.format(str_device_event, str_device)) + return + topology = self._object_cache.get(CachedEntities.TOPOLOGY, topology_uuid) topology_name = topology.name @@ -128,6 +135,13 @@ class EventDispatcher(BaseEventDispatcher): device_name = device.name topology_uuid, endpoint_names = get_device_endpoint(device) + if topology_uuid is None: + MSG = 'DeviceEvent({:s}) skipped, no endpoints to identify topology: {:s}' + str_device_event = grpc_message_to_json_string(device_event) + str_device = grpc_message_to_json_string(device) + LOGGER.warning(MSG.format(str_device_event, str_device)) + return + topology = self._object_cache.get(CachedEntities.TOPOLOGY, topology_uuid) topology_name = topology.name @@ -188,6 +202,13 @@ class EventDispatcher(BaseEventDispatcher): link_name = link.name topology_uuid, endpoint_uuids = get_link_endpoint(link) + if topology_uuid is None: + MSG = 'LinkEvent({:s}) skipped, no endpoint_ids to identify topology: {:s}' + str_link_event = grpc_message_to_json_string(link_event) + str_link = grpc_message_to_json_string(link) + LOGGER.warning(MSG.format(str_link_event, str_link)) + return + topology = self._object_cache.get(CachedEntities.TOPOLOGY, topology_uuid) topology_name = topology.name @@ -230,6 +251,13 @@ class EventDispatcher(BaseEventDispatcher): link_name = link.name topology_uuid, endpoint_uuids = get_link_endpoint(link) + if topology_uuid is None: + MSG = 'LinkEvent({:s}) skipped, no endpoint_ids to identify topology: {:s}' + str_link_event = grpc_message_to_json_string(link_event) + str_link = grpc_message_to_json_string(link) + LOGGER.warning(MSG.format(str_link_event, str_link)) + return + topology = self._object_cache.get(CachedEntities.TOPOLOGY, topology_uuid) topology_name = topology.name diff --git a/src/simap_connector/service/simap_updater/Tools.py b/src/simap_connector/service/simap_updater/Tools.py index 52587af56..db0430747 100644 --- a/src/simap_connector/service/simap_updater/Tools.py +++ b/src/simap_connector/service/simap_updater/Tools.py @@ -14,7 +14,7 @@ import enum -from typing import Dict, List, Set, Tuple, Union +from typing import List, Optional, Set, Tuple, Union from common.proto.context_pb2 import ( EVENTTYPE_CREATE, EVENTTYPE_REMOVE, EVENTTYPE_UPDATE, Device, DeviceEvent, Link, LinkEvent, ServiceEvent, SliceEvent, TopologyEvent @@ -40,11 +40,14 @@ def get_event_type(event : EVENT_TYPE) -> EventTypeEnum: raise Exception(MSG.format(str(int_event_type), str_event)) -def get_device_endpoint(device : Device) -> Tuple[str, List[str]]: +def get_device_endpoint(device : Device) -> Tuple[Optional[str], List[str]]: topology_uuids : Set[str] = set() endpoint_device_uuids : Set[str] = set() endpoint_names : List[str] = list() + if len(device.device_endpoints) == 0: + return None, endpoint_names + for endpoint in device.device_endpoints: topology_uuid = endpoint.endpoint_id.topology_id.topology_uuid.uuid topology_uuids.add(topology_uuid) @@ -85,10 +88,13 @@ def get_device_endpoint(device : Device) -> Tuple[str, List[str]]: return topology_uuid, endpoint_names -def get_link_endpoint(link : Link) -> Tuple[str, List[Tuple[str, str]]]: +def get_link_endpoint(link : Link) -> Tuple[Optional[str], List[Tuple[str, str]]]: topology_uuids : Set[str] = set() endpoint_uuids : List[Tuple[str, str]] = list() + if len(link.link_endpoint_ids) == 0: + return None, endpoint_uuids + for endpoint_id in link.link_endpoint_ids: topology_uuid = endpoint_id.topology_id.topology_uuid.uuid topology_uuids.add(topology_uuid) -- GitLab From 71908367e71aabedd8fbfcf0612237996b1ae602 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 27 Aug 2025 12:17:21 +0000 Subject: [PATCH 036/367] ECOC F5GA Telemetry Demo: - Corrected underlay controller IP addresses on topology descriptors --- src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json | 8 ++++---- src/tests/ecoc25-f5ga-telemetry/data/topology-e2e.json | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json b/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json index ded54af19..fd3b99118 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json @@ -8,8 +8,8 @@ "devices": [ {"device_id": {"device_uuid": {"uuid": "TFS-IP"}}, "name": "TFS-IP", "device_type": "teraflowsdn", "device_drivers": ["DEVICEDRIVER_IETF_L3VPN"], "device_config": {"config_rules": [ - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.6.208"}}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "80" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.0.12"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "80" }}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { "scheme": "http", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", "timeout": 120, "verify": false @@ -17,8 +17,8 @@ ]}}, {"device_id": {"device_uuid": {"uuid": "NCE-T"}}, "name": "NCE-T", "device_type": "ip-sdn-controller", "device_drivers": ["DEVICEDRIVER_IETF_ACTN"], "device_config": {"config_rules": [ - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.6.221"}}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8443" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.0.9"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8081" }}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { "scheme": "https", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", "timeout": 120, "verify": false diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology-e2e.json b/src/tests/ecoc25-f5ga-telemetry/data/topology-e2e.json index 872189091..74e4950cd 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/topology-e2e.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology-e2e.json @@ -8,7 +8,7 @@ "devices": [ {"device_id": {"device_uuid": {"uuid": "TFS-AGG"}}, "name": "TFS-AGG", "device_type": "teraflowsdn", "device_drivers": ["DEVICEDRIVER_IETF_L3VPN"], "device_config": {"config_rules": [ - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.6.221"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.0.11"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "80" }}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { "scheme": "http", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", @@ -17,8 +17,8 @@ ]}}, {"device_id": {"device_uuid": {"uuid": "NCE-FAN"}}, "name": "NCE-FAN", "device_type": "ip-sdn-controller", "device_drivers": ["DEVICEDRIVER_IETF_ACTN"], "device_config": {"config_rules": [ - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.6.221"}}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8443" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.0.9"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8082" }}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { "scheme": "https", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", "timeout": 120, "verify": false -- GitLab From dafb2eabbbcec445b32656e6714a9961372fd01b Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 27 Aug 2025 12:27:34 +0000 Subject: [PATCH 037/367] ECOC F5GA Telemetry Demo: - Corrected redeploy support services script --- .../redeploy-simap-server.sh | 19 -------- .../redeploy-support-mocks.sh | 44 +++++++++++++++++++ 2 files changed, 44 insertions(+), 19 deletions(-) delete mode 100755 src/tests/ecoc25-f5ga-telemetry/redeploy-simap-server.sh create mode 100755 src/tests/ecoc25-f5ga-telemetry/redeploy-support-mocks.sh diff --git a/src/tests/ecoc25-f5ga-telemetry/redeploy-simap-server.sh b/src/tests/ecoc25-f5ga-telemetry/redeploy-simap-server.sh deleted file mode 100755 index e50fc0418..000000000 --- a/src/tests/ecoc25-f5ga-telemetry/redeploy-simap-server.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -~/tfs-ctrl/src/tests/tools/simap_server/build.sh -~/tfs-ctrl/src/tests/tools/simap_server/deploy.sh - -echo "Done!" diff --git a/src/tests/ecoc25-f5ga-telemetry/redeploy-support-mocks.sh b/src/tests/ecoc25-f5ga-telemetry/redeploy-support-mocks.sh new file mode 100755 index 000000000..9a0ee1630 --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/redeploy-support-mocks.sh @@ -0,0 +1,44 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Build SIMAP Server +cd ~/tfs-ctrl/src/tests/tools/simap_server +docker buildx build -t simap-server:mock -f Dockerfile . + +# Build NCE-FAN Controller +cd ~/tfs-ctrl/src/tests/tools/mock_nce_ctrl +docker buildx build -t nce-fan-ctrl:mock -f Dockerfile . + +# Build NCE-T Controller +cd ~/tfs-ctrl/src/tests/tools/mock_ietf_actn_sdn_ctrl +docker buildx build -t nce-t-ctrl:mock -f Dockerfile . + + +# Cleanup +docker rm --force simap-server +docker rm --force nce-fan-ctrl +docker rm --force nce-t-ctrl + +# Create SIMAP Server, NCE-FAN Controller, NCE-T Controller +docker run --detach --name simap-server --publish 8080:8080 simap-server:mock +docker run --detach --name nce-fan-ctrl --publish 8081:8080 nce-fan-ctrl:mock +docker run --detach --name nce-t-ctrl --publish 8082:8080 nce-t-ctrl:mock + +sleep 2 + +# Dump Docker containers +docker ps -a + +echo "Bye!" -- GitLab From 492a0c661f7d40687e0b476ba126155fc1f727a3 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 27 Aug 2025 13:09:19 +0000 Subject: [PATCH 038/367] Context component: - Shifted to single ALL topic in Message Broker to ensure proper ordering of delivered messages. --- src/context/service/database/Events.py | 41 +++++++++++++++++++++----- 1 file changed, 34 insertions(+), 7 deletions(-) diff --git a/src/context/service/database/Events.py b/src/context/service/database/Events.py index 4667de549..06725807b 100644 --- a/src/context/service/database/Events.py +++ b/src/context/service/database/Events.py @@ -31,6 +31,7 @@ class EventTopicEnum(enum.Enum): SLICE = 'slice' TOPOLOGY = 'topology' OPTICALCONFIG = 'optical-config' + ALL = 'all' TOPIC_TO_EVENTCLASS = { @@ -49,13 +50,18 @@ CONSUME_TIMEOUT = 0.5 # seconds LOGGER = logging.getLogger(__name__) +# NOTE: Forced to use a single "ALL" topic so that we ensure all messages are kept ordered. +# Consumer filters appropriate ones while delivering. +# TODO: Upgrade this schema with proper in-topic filters to enhance performance. + def notify_event( messagebroker : MessageBroker, topic_enum : EventTopicEnum, event_type : EventTypeEnum, fields : Dict[str, str] ) -> None: event = {'event': {'timestamp': {'timestamp': time.time()}, 'event_type': event_type}} for field_name, field_value in fields.items(): event[field_name] = field_value - messagebroker.publish(Message(topic_enum.value, json.dumps(event))) + #messagebroker.publish(Message(topic_enum.value, json.dumps(event))) + messagebroker.publish(Message(EventTopicEnum.ALL.value, json.dumps(event))) def notify_event_context(messagebroker : MessageBroker, event_type : EventTypeEnum, context_id : Dict) -> None: notify_event(messagebroker, EventTopicEnum.CONTEXT, event_type, {'context_id': context_id}) @@ -87,11 +93,32 @@ def notify_event_policy_rule(messagebroker : MessageBroker, event_type : EventTy def consume_events( messagebroker : MessageBroker, topic_enums : Set[EventTopicEnum], consume_timeout : float = CONSUME_TIMEOUT ) -> Iterator: - topic_names = [topic_enum.value for topic_enum in topic_enums] + #topic_names = [topic_enum.value for topic_enum in topic_enums] + topic_names = [EventTopicEnum.ALL.value] for message in messagebroker.consume(topic_names, consume_timeout=consume_timeout): - event_class = TOPIC_TO_EVENTCLASS.get(message.topic) - if event_class is None: - MSG = 'No EventClass defined for Topic({:s}). Ignoring...' - LOGGER.warning(MSG.format(str(message.topic))) + #event_class = TOPIC_TO_EVENTCLASS.get(message.topic) + #if event_class is None: + # MSG = 'No EventClass defined for Topic({:s}). Ignoring...' + # LOGGER.warning(MSG.format(str(message.topic))) + # continue + data = json.loads(message.content) + if 'context_id' in data and EventTopicEnum.CONTEXT in topic_enums: + yield ContextEvent(**data) + elif 'topology_id' in data and EventTopicEnum.TOPOLOGY in topic_enums: + yield TopologyEvent(**data) + elif 'device_id' in data and EventTopicEnum.DEVICE in topic_enums: + yield DeviceEvent(**data) + elif 'opticalconfig_id' in data and EventTopicEnum.OPTICALCONFIG in topic_enums: + yield DeviceEvent(**data) + elif 'link_id' in data and EventTopicEnum.LINK in topic_enums: + yield LinkEvent(**data) + elif 'service_id' in data and EventTopicEnum.SERVICE in topic_enums: + yield ServiceEvent(**data) + elif 'slice_id' in data and EventTopicEnum.SLICE in topic_enums: + yield SliceEvent(**data) + elif 'connection_id' in data and EventTopicEnum.CONNECTION in topic_enums: + yield ConnectionEvent(**data) + else: + MSG = 'Unable to identify EventClass for Message({:s}). Ignoring...' + LOGGER.warning(MSG.format(str(message))) continue - yield event_class(**json.loads(message.content)) -- GitLab From 13d89fec110276bc90abd96b1c1bf2691aaa62ce Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 27 Aug 2025 13:40:20 +0000 Subject: [PATCH 039/367] SIMAP Connector: - Update SImapUpdater to skip remotely-managed devices and controllers --- .../service/simap_updater/SimapUpdater.py | 96 ++++++++++++++++++- 1 file changed, 92 insertions(+), 4 deletions(-) diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 1e6cfbe35..456fd5901 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -15,6 +15,7 @@ import logging, queue, threading from typing import Any, Optional +from common.DeviceTypes import DeviceTypeEnum from common.proto.context_pb2 import DeviceEvent, Empty, LinkEvent, TopologyEvent from common.tools.grpc.BaseEventCollector import BaseEventCollector from common.tools.grpc.BaseEventDispatcher import BaseEventDispatcher @@ -63,7 +64,10 @@ class EventDispatcher(BaseEventDispatcher): topology_uuid = topology_event.topology_id.topology_uuid.uuid topology = self._object_cache.get(CachedEntities.TOPOLOGY, topology_uuid) topology_name = topology.name - self._simap_client.network(topology_name).create() + + # Theoretically it should be create(), but given we have multiple clients + # updating same SIMAP server, use update to skip tricks on get-check-create-or-update. + self._simap_client.network(topology_name).update() MSG = 'Topology Created: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(topology_event))) @@ -104,7 +108,34 @@ class EventDispatcher(BaseEventDispatcher): device_uuid = device_event.device_id.device_uuid.uuid device = self._object_cache.get(CachedEntities.DEVICE, device_uuid) - device_name = device.name + + device_type = device.device_type + SKIPPED_DEVICE_TYPES = { + DeviceTypeEnum.EMULATED_IP_SDN_CONTROLLER.value, + DeviceTypeEnum.IP_SDN_CONTROLLER.value, + DeviceTypeEnum.NCE.value, + DeviceTypeEnum.TERAFLOWSDN_CONTROLLER.value, + } + if device_type in SKIPPED_DEVICE_TYPES: + MSG = ( + 'DeviceEvent({:s}) skipped, is of a skipped device type. ' + 'SIMAP should be updated by him: {:s}' + ) + str_device_event = grpc_message_to_json_string(device_event) + str_device = grpc_message_to_json_string(device) + LOGGER.warning(MSG.format(str_device_event, str_device)) + return + + device_controller_uuid = device.controller_id.device_uuid.uuid + if len(device_controller_uuid) > 0: + MSG = ( + 'DeviceEvent({:s}) skipped, is a remotely-managed device. ' + 'SIMAP should be populated by remote controller: {:s}' + ) + str_device_event = grpc_message_to_json_string(device_event) + str_device = grpc_message_to_json_string(device) + LOGGER.warning(MSG.format(str_device_event, str_device)) + return topology_uuid, endpoint_names = get_device_endpoint(device) if topology_uuid is None: @@ -120,6 +151,7 @@ class EventDispatcher(BaseEventDispatcher): te_topo = self._simap_client.network(topology_name) te_topo.update() + device_name = device.name te_topo.node(device_name).create(termination_point_ids=endpoint_names) MSG = 'Device Created: {:s}' @@ -132,7 +164,34 @@ class EventDispatcher(BaseEventDispatcher): device_uuid = device_event.device_id.device_uuid.uuid device = self._object_cache.get(CachedEntities.DEVICE, device_uuid) - device_name = device.name + + device_type = device.device_type + SKIPPED_DEVICE_TYPES = { + DeviceTypeEnum.EMULATED_IP_SDN_CONTROLLER.value, + DeviceTypeEnum.IP_SDN_CONTROLLER.value, + DeviceTypeEnum.NCE.value, + DeviceTypeEnum.TERAFLOWSDN_CONTROLLER.value, + } + if device_type in SKIPPED_DEVICE_TYPES: + MSG = ( + 'DeviceEvent({:s}) skipped, is of a skipped device type. ' + 'SIMAP should be updated by him: {:s}' + ) + str_device_event = grpc_message_to_json_string(device_event) + str_device = grpc_message_to_json_string(device) + LOGGER.warning(MSG.format(str_device_event, str_device)) + return + + device_controller_uuid = device.controller_id.device_uuid.uuid + if len(device_controller_uuid) > 0: + MSG = ( + 'DeviceEvent({:s}) skipped, is a remotely-managed device. ' + 'SIMAP should be updated by remote controller: {:s}' + ) + str_device_event = grpc_message_to_json_string(device_event) + str_device = grpc_message_to_json_string(device) + LOGGER.warning(MSG.format(str_device_event, str_device)) + return topology_uuid, endpoint_names = get_device_endpoint(device) if topology_uuid is None: @@ -148,6 +207,7 @@ class EventDispatcher(BaseEventDispatcher): te_topo = self._simap_client.network(topology_name) te_topo.update() + device_name = device.name te_device = te_topo.node(device_name) te_device.update() @@ -164,7 +224,34 @@ class EventDispatcher(BaseEventDispatcher): device_uuid = device_event.device_id.device_uuid.uuid device = self._object_cache.get(CachedEntities.DEVICE, device_uuid) - device_name = device.name + + device_type = device.device_type + SKIPPED_DEVICE_TYPES = { + DeviceTypeEnum.EMULATED_IP_SDN_CONTROLLER.value, + DeviceTypeEnum.IP_SDN_CONTROLLER.value, + DeviceTypeEnum.NCE.value, + DeviceTypeEnum.TERAFLOWSDN_CONTROLLER.value, + } + if device_type in SKIPPED_DEVICE_TYPES: + MSG = ( + 'DeviceEvent({:s}) skipped, is of a skipped device type. ' + 'SIMAP should be updated by him: {:s}' + ) + str_device_event = grpc_message_to_json_string(device_event) + str_device = grpc_message_to_json_string(device) + LOGGER.warning(MSG.format(str_device_event, str_device)) + return + + device_controller_uuid = device.controller_id.device_uuid.uuid + if len(device_controller_uuid) > 0: + MSG = ( + 'DeviceEvent({:s}) skipped, is a remotely-managed device. ' + 'SIMAP should be updated by remote controller: {:s}' + ) + str_device_event = grpc_message_to_json_string(device_event) + str_device = grpc_message_to_json_string(device) + LOGGER.warning(MSG.format(str_device_event, str_device)) + return topology_uuid, endpoint_names = get_device_endpoint(device) topology = self._object_cache.get(CachedEntities.TOPOLOGY, topology_uuid) @@ -173,6 +260,7 @@ class EventDispatcher(BaseEventDispatcher): te_topo = self._simap_client.network(topology_name) te_topo.update() + device_name = device.name te_device = te_topo.node(device_name) for endpoint_name in endpoint_names: te_device.termination_point(endpoint_name).delete() -- GitLab From 162f741dcdfc473aaa5d5e99a5827057704e16ee Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 27 Aug 2025 13:41:08 +0000 Subject: [PATCH 040/367] ECOC F5GA Telemetry Demo: - Corrected underlay controller ports on topology descriptors - Corrected deploy support services script --- src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json | 2 +- src/tests/ecoc25-f5ga-telemetry/data/topology-e2e.json | 2 +- src/tests/ecoc25-f5ga-telemetry/redeploy-support-mocks.sh | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json b/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json index fd3b99118..2edc3f1d8 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json @@ -18,7 +18,7 @@ {"device_id": {"device_uuid": {"uuid": "NCE-T"}}, "name": "NCE-T", "device_type": "ip-sdn-controller", "device_drivers": ["DEVICEDRIVER_IETF_ACTN"], "device_config": {"config_rules": [ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.0.9"}}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8081" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8444" }}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { "scheme": "https", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", "timeout": 120, "verify": false diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology-e2e.json b/src/tests/ecoc25-f5ga-telemetry/data/topology-e2e.json index 74e4950cd..d8634caf0 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/topology-e2e.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology-e2e.json @@ -18,7 +18,7 @@ {"device_id": {"device_uuid": {"uuid": "NCE-FAN"}}, "name": "NCE-FAN", "device_type": "ip-sdn-controller", "device_drivers": ["DEVICEDRIVER_IETF_ACTN"], "device_config": {"config_rules": [ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.0.9"}}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8082" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8443" }}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { "scheme": "https", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", "timeout": 120, "verify": false diff --git a/src/tests/ecoc25-f5ga-telemetry/redeploy-support-mocks.sh b/src/tests/ecoc25-f5ga-telemetry/redeploy-support-mocks.sh index 9a0ee1630..35ad81574 100755 --- a/src/tests/ecoc25-f5ga-telemetry/redeploy-support-mocks.sh +++ b/src/tests/ecoc25-f5ga-telemetry/redeploy-support-mocks.sh @@ -33,8 +33,8 @@ docker rm --force nce-t-ctrl # Create SIMAP Server, NCE-FAN Controller, NCE-T Controller docker run --detach --name simap-server --publish 8080:8080 simap-server:mock -docker run --detach --name nce-fan-ctrl --publish 8081:8080 nce-fan-ctrl:mock -docker run --detach --name nce-t-ctrl --publish 8082:8080 nce-t-ctrl:mock +docker run --detach --name nce-fan-ctrl --publish 8443:8443 nce-fan-ctrl:mock +docker run --detach --name nce-t-ctrl --publish 8444:8443 nce-t-ctrl:mock sleep 2 -- GitLab From b6e6d29f0c2ac08fb8cb1a8564d342bfb325e3f5 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 27 Aug 2025 13:54:21 +0000 Subject: [PATCH 041/367] Context component: - Corrected warning messages in events from Context component --- src/context/service/database/Events.py | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/src/context/service/database/Events.py b/src/context/service/database/Events.py index 06725807b..decd7f005 100644 --- a/src/context/service/database/Events.py +++ b/src/context/service/database/Events.py @@ -102,21 +102,29 @@ def consume_events( # LOGGER.warning(MSG.format(str(message.topic))) # continue data = json.loads(message.content) - if 'context_id' in data and EventTopicEnum.CONTEXT in topic_enums: + if 'context_id' in data: + if EventTopicEnum.CONTEXT not in topic_enums: continue yield ContextEvent(**data) - elif 'topology_id' in data and EventTopicEnum.TOPOLOGY in topic_enums: + elif 'topology_id' in data: + if EventTopicEnum.TOPOLOGY not in topic_enums: continue yield TopologyEvent(**data) - elif 'device_id' in data and EventTopicEnum.DEVICE in topic_enums: + elif 'device_id' in data: + if EventTopicEnum.DEVICE not in topic_enums: continue yield DeviceEvent(**data) - elif 'opticalconfig_id' in data and EventTopicEnum.OPTICALCONFIG in topic_enums: + elif 'opticalconfig_id' in data: + if EventTopicEnum.OPTICALCONFIG not in topic_enums: continue yield DeviceEvent(**data) - elif 'link_id' in data and EventTopicEnum.LINK in topic_enums: + elif 'link_id' in data: + if EventTopicEnum.LINK not in topic_enums: continue yield LinkEvent(**data) - elif 'service_id' in data and EventTopicEnum.SERVICE in topic_enums: + elif 'service_id' in data: + if EventTopicEnum.SERVICE not in topic_enums: continue yield ServiceEvent(**data) - elif 'slice_id' in data and EventTopicEnum.SLICE in topic_enums: + elif 'slice_id' in data: + if EventTopicEnum.SLICE not in topic_enums: continue yield SliceEvent(**data) - elif 'connection_id' in data and EventTopicEnum.CONNECTION in topic_enums: + elif 'connection_id' in data: + if EventTopicEnum.CONNECTION not in topic_enums: continue yield ConnectionEvent(**data) else: MSG = 'Unable to identify EventClass for Message({:s}). Ignoring...' -- GitLab From b1de78eb43ff1f8eac6bca1c281d4cce5d8fbca2 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 29 Aug 2025 08:37:53 +0000 Subject: [PATCH 042/367] ECOC F5GA Telemetry Demo: - Corrected agg controller topology descriptor --- .../data/topology-agg.json | 71 ++++++++++++++++--- 1 file changed, 61 insertions(+), 10 deletions(-) diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json b/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json index 2edc3f1d8..5d23fdd5c 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json @@ -34,14 +34,43 @@ {"uuid": "501", "name": "501", "type": "copper"} ]}}} ]}}, - {"device_id": {"device_uuid": {"uuid": "172.16.204.22x"}}, "device_type": "emu-datacenter", + {"device_id": {"device_uuid": {"uuid": "172.16.204.221"}}, "name": "172.16.204.221", "device_type": "emu-packet-router", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "200", "name": "200", "type": "copper"}, + {"uuid": "500", "name": "500", "type": "copper"}, + {"uuid": "501", "name": "501", "type": "copper"} + ]}}} + ]}}, + {"device_id": {"device_uuid": {"uuid": "172.1.101.22"}}, "name": "172.1.101.22", "device_type": "emu-datacenter", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "lo", "name": "lo", "type": "loopback"}, + {"uuid": "500", "name": "500", "type": "copper"} + ]}}} + ]} + }, + {"device_id": {"device_uuid": {"uuid": "172.16.204.220"}}, "name": "172.16.204.220", "device_type": "emu-packet-router", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "200", "name": "200", "type": "copper"}, + {"uuid": "500", "name": "500", "type": "copper"}, + {"uuid": "501", "name": "501", "type": "copper"} + ]}}} + ]}}, + {"device_id": {"device_uuid": {"uuid": "172.1.201.22"}}, "name": "172.1.201.22", "device_type": "emu-datacenter", "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ - {"uuid": "lo", "name": "lo", "type": "loopback"}, - {"uuid": "500a", "name": "500a", "type": "copper"}, - {"uuid": "500b", "name": "500b", "type": "copper"} + {"uuid": "lo", "name": "lo", "type": "loopback"}, + {"uuid": "500", "name": "500", "type": "copper"} ]}}} ]} } @@ -72,23 +101,45 @@ {"link_id": {"link_uuid": {"uuid": "172.16.125.32-200"}}, "name": "172.16.125.32-200", "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ {"device_id": {"device_uuid": {"uuid": "172.16.125.32"}}, "endpoint_uuid": {"uuid": "200"}}, - {"device_id": {"device_uuid": {"uuid": "172.16.204.22x"}}, "endpoint_uuid": {"uuid": "500a"}} + {"device_id": {"device_uuid": {"uuid": "172.16.204.221"}}, "endpoint_uuid": {"uuid": "500"}} ]}, - {"link_id": {"link_uuid": {"uuid": "172.16.204.22x-500a"}}, "name": "172.16.204.22x-500a", + {"link_id": {"link_uuid": {"uuid": "172.16.204.221-500"}}, "name": "172.16.204.221-500", "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "172.16.204.22x"}}, "endpoint_uuid": {"uuid": "500a"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.204.221"}}, "endpoint_uuid": {"uuid": "500"}}, {"device_id": {"device_uuid": {"uuid": "172.16.125.32"}}, "endpoint_uuid": {"uuid": "200"}} ]}, - {"link_id": {"link_uuid": {"uuid": "172.16.204.22x-500b"}}, "name": "172.16.204.22x-500b", + {"link_id": {"link_uuid": {"uuid": "172.16.204.220-500"}}, "name": "172.16.204.220-500", "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "172.16.204.22x"}}, "endpoint_uuid": {"uuid": "500b"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.204.220"}}, "endpoint_uuid": {"uuid": "500"}}, {"device_id": {"device_uuid": {"uuid": "172.16.185.32"}}, "endpoint_uuid": {"uuid": "200"}} ]}, {"link_id": {"link_uuid": {"uuid": "172.16.185.32-200"}}, "name": "172.16.185.32-200", "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ {"device_id": {"device_uuid": {"uuid": "172.16.185.32"}}, "endpoint_uuid": {"uuid": "200"}}, - {"device_id": {"device_uuid": {"uuid": "172.16.204.22x"}}, "endpoint_uuid": {"uuid": "500b"}} + {"device_id": {"device_uuid": {"uuid": "172.16.204.220"}}, "endpoint_uuid": {"uuid": "500"}} + ]}, + + {"link_id": {"link_uuid": {"uuid": "172.16.204.221-200"}}, "name": "172.16.204.221-200", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.204.221"}}, "endpoint_uuid": {"uuid": "200"}}, + {"device_id": {"device_uuid": {"uuid": "172.1.101.22"}}, "endpoint_uuid": {"uuid": "500"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "172.1.101.22-500"}}, "name": "172.1.101.22-500", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.1.101.22"}}, "endpoint_uuid": {"uuid": "500"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.204.221"}}, "endpoint_uuid": {"uuid": "200"}} + ]}, + + {"link_id": {"link_uuid": {"uuid": "172.16.204.220-200"}}, "name": "172.16.204.220-200", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.204.220"}}, "endpoint_uuid": {"uuid": "200"}}, + {"device_id": {"device_uuid": {"uuid": "172.1.201.22"}}, "endpoint_uuid": {"uuid": "500"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "172.1.201.22-500"}}, "name": "172.1.201.22-500", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.1.201.22"}}, "endpoint_uuid": {"uuid": "500"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.204.220"}}, "endpoint_uuid": {"uuid": "200"}} ]} ] } -- GitLab From d4b728395cb98a62bcc5f2e2f404d5a8e72b4bbc Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 29 Aug 2025 08:56:47 +0000 Subject: [PATCH 043/367] ECOC F5GA Telemetry Demo: - Updated deploy scripts --- .../redeploy-support-mocks.sh | 44 ---------- .../ecoc25-f5ga-telemetry/redeploy-tfs.sh | 28 ------- src/tests/ecoc25-f5ga-telemetry/redeploy.sh | 84 +++++++++++++++++++ 3 files changed, 84 insertions(+), 72 deletions(-) delete mode 100755 src/tests/ecoc25-f5ga-telemetry/redeploy-support-mocks.sh delete mode 100755 src/tests/ecoc25-f5ga-telemetry/redeploy-tfs.sh create mode 100755 src/tests/ecoc25-f5ga-telemetry/redeploy.sh diff --git a/src/tests/ecoc25-f5ga-telemetry/redeploy-support-mocks.sh b/src/tests/ecoc25-f5ga-telemetry/redeploy-support-mocks.sh deleted file mode 100755 index 35ad81574..000000000 --- a/src/tests/ecoc25-f5ga-telemetry/redeploy-support-mocks.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Build SIMAP Server -cd ~/tfs-ctrl/src/tests/tools/simap_server -docker buildx build -t simap-server:mock -f Dockerfile . - -# Build NCE-FAN Controller -cd ~/tfs-ctrl/src/tests/tools/mock_nce_ctrl -docker buildx build -t nce-fan-ctrl:mock -f Dockerfile . - -# Build NCE-T Controller -cd ~/tfs-ctrl/src/tests/tools/mock_ietf_actn_sdn_ctrl -docker buildx build -t nce-t-ctrl:mock -f Dockerfile . - - -# Cleanup -docker rm --force simap-server -docker rm --force nce-fan-ctrl -docker rm --force nce-t-ctrl - -# Create SIMAP Server, NCE-FAN Controller, NCE-T Controller -docker run --detach --name simap-server --publish 8080:8080 simap-server:mock -docker run --detach --name nce-fan-ctrl --publish 8443:8443 nce-fan-ctrl:mock -docker run --detach --name nce-t-ctrl --publish 8444:8443 nce-t-ctrl:mock - -sleep 2 - -# Dump Docker containers -docker ps -a - -echo "Bye!" diff --git a/src/tests/ecoc25-f5ga-telemetry/redeploy-tfs.sh b/src/tests/ecoc25-f5ga-telemetry/redeploy-tfs.sh deleted file mode 100755 index 6e4961af0..000000000 --- a/src/tests/ecoc25-f5ga-telemetry/redeploy-tfs.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Assuming the instances are named as tfs-e2e-ctrl, tfs-agg-ctrl, and tfs-ip-ctrl -#CTRL_NAME=$(hostname | cut -d'-' -f2) -#echo "Deploying: ${CTRL_NAME}" - -#source ~/tfs-ctrl/src/tests/ecoc25-f5ga-telemetry/deploy-specs-${CTRL_NAME}.sh -source ~/tfs-ctrl/src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh -./deploy/all.sh - -echo "Waiting for NATS connection..." -while ! kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server 2>&1 | grep -q 'Subscriber is Ready? True'; do sleep 1; done -kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server - -echo "Ready!" diff --git a/src/tests/ecoc25-f5ga-telemetry/redeploy.sh b/src/tests/ecoc25-f5ga-telemetry/redeploy.sh new file mode 100755 index 000000000..47d14ecd8 --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/redeploy.sh @@ -0,0 +1,84 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Assuming the instances are named as: simap-server, tfs-e2e-ctrl, tfs-agg-ctrl, tfs-ip-ctrl + +# Get the current hostname +HOSTNAME=$(hostname) +echo "Deploying in ${HOSTNAME}..." + + +case "$HOSTNAME" in + simap-server) + echo "Building SIMAP Server..." + cd ~/tfs-ctrl/src/tests/tools/simap_server + docker buildx build -t simap-server:mock -f Dockerfile . + + echo "Building NCE-FAN Controller..." + cd ~/tfs-ctrl/src/tests/tools/mock_nce_ctrl + docker buildx build -t nce-fan-ctrl:mock -f Dockerfile . + + echo "Building NCE-T Controller..." + cd ~/tfs-ctrl/src/tests/tools/mock_ietf_actn_sdn_ctrl + docker buildx build -t nce-t-ctrl:mock -f Dockerfile . + + echo "Cleaning up..." + docker rm --force simap-server + docker rm --force nce-fan-ctrl + docker rm --force nce-t-ctrl + + echo "Deploying support services..." + docker run --detach --name simap-server --publish 8080:8080 simap-server:mock + docker run --detach --name nce-fan-ctrl --publish 8443:8443 nce-fan-ctrl:mock + docker run --detach --name nce-t-ctrl --publish 8444:8443 nce-t-ctrl:mock + + sleep 2 + docker ps -a + ;; + tfs-e2e-ctrl) + echo "Deploying TFS E2E Controller..." + source ~/tfs-ctrl/src/tests/ecoc25-f5ga-telemetry/deploy-specs-e2e.sh + ./deploy/all.sh + + echo "Waiting for NATS connection..." + while ! kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server 2>&1 | grep -q 'Subscriber is Ready? True'; do sleep 1; done + kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server + ;; + tfs-agg-ctrl) + echo "Deploying TFS Agg Controller..." + source ~/tfs-ctrl/src/tests/ecoc25-f5ga-telemetry/deploy-specs-agg.sh + ./deploy/all.sh + + echo "Waiting for NATS connection..." + while ! kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server 2>&1 | grep -q 'Subscriber is Ready? True'; do sleep 1; done + kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server + ;; + tfs-ip-ctrl) + echo "Deploying TFS IP Controller..." + source ~/tfs-ctrl/src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh + ./deploy/all.sh + + echo "Waiting for NATS connection..." + while ! kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server 2>&1 | grep -q 'Subscriber is Ready? True'; do sleep 1; done + kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server + ;; + *) + echo "Unknown host: $HOSTNAME" + echo "No commands to run." + ;; +esac + +echo "Ready!" -- GitLab From ab1836d5a14830d833bedd808033596b9cdb4d4a Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 29 Aug 2025 18:53:55 +0000 Subject: [PATCH 044/367] Context & WebUI components: - Added bidirectionality to links - Added attribute --- proto/context.proto | 16 +++++++++------- src/context/service/database/Link.py | 6 ++++++ src/context/service/database/models/LinkModel.py | 8 +++++--- .../service/database/models/enums/LinkType.py | 1 + src/webui/service/templates/link/home.html | 10 ++++++++++ 5 files changed, 31 insertions(+), 10 deletions(-) diff --git a/proto/context.proto b/proto/context.proto index b33750e80..0fdb64405 100644 --- a/proto/context.proto +++ b/proto/context.proto @@ -268,17 +268,19 @@ message LinkId { } enum LinkTypeEnum { - LINKTYPE_UNKNOWN = 0; - LINKTYPE_COPPER = 1; - LINKTYPE_FIBER = 2; - LINKTYPE_RADIO = 3; - LINKTYPE_VIRTUAL = 4; + LINKTYPE_UNKNOWN = 0; + LINKTYPE_COPPER = 1; + LINKTYPE_FIBER = 2; + LINKTYPE_RADIO = 3; + LINKTYPE_VIRTUAL = 4; LINKTYPE_MANAGEMENT = 5; + LINKTYPE_REMOTE = 6; // imported from remote topology } message LinkAttributes { - float total_capacity_gbps = 1; - float used_capacity_gbps = 2; + bool is_bidirectional = 1; + float total_capacity_gbps = 2; + float used_capacity_gbps = 3; } message Link { diff --git a/src/context/service/database/Link.py b/src/context/service/database/Link.py index 8176873de..68462b3f1 100644 --- a/src/context/service/database/Link.py +++ b/src/context/service/database/Link.py @@ -105,12 +105,16 @@ def link_set(db_engine : Engine, messagebroker : MessageBroker, request : Link) topology_uuids.add(endpoint_topology_uuid) total_capacity_gbps, used_capacity_gbps = None, None + is_bidirectional = False if request.HasField('attributes'): attributes = request.attributes # In proto3, HasField() does not work for scalar fields, using ListFields() instead. attribute_names = set([field.name for field,_ in attributes.ListFields()]) + if 'is_bidirectional' in attribute_names: + is_bidirectional = attributes.is_bidirectional + if 'total_capacity_gbps' in attribute_names: total_capacity_gbps = attributes.total_capacity_gbps @@ -125,6 +129,7 @@ def link_set(db_engine : Engine, messagebroker : MessageBroker, request : Link) 'link_type' : link_type, 'total_capacity_gbps' : total_capacity_gbps, 'used_capacity_gbps' : used_capacity_gbps, + 'is_bidirectional' : is_bidirectional, 'created_at' : now, 'updated_at' : now, }] @@ -138,6 +143,7 @@ def link_set(db_engine : Engine, messagebroker : MessageBroker, request : Link) link_type = stmt.excluded.link_type, total_capacity_gbps = stmt.excluded.total_capacity_gbps, used_capacity_gbps = stmt.excluded.used_capacity_gbps, + is_bidirectional = stmt.excluded.is_bidirectional, updated_at = stmt.excluded.updated_at, ) ) diff --git a/src/context/service/database/models/LinkModel.py b/src/context/service/database/models/LinkModel.py index 77a9d9fa4..ff95df288 100644 --- a/src/context/service/database/models/LinkModel.py +++ b/src/context/service/database/models/LinkModel.py @@ -14,7 +14,8 @@ import operator from sqlalchemy import ( - CheckConstraint, Column, DateTime, Enum, Float, ForeignKey, Integer, String + CheckConstraint, Boolean, Column, DateTime, Enum, Float, ForeignKey, + Integer, String ) from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.orm import relationship @@ -31,6 +32,7 @@ class LinkModel(_Base): link_type = Column(Enum(ORM_LinkTypeEnum), nullable=False) total_capacity_gbps = Column(Float, nullable=True) used_capacity_gbps = Column(Float, nullable=True) + is_bidirectional = Column(Boolean, default=False) created_at = Column(DateTime, nullable=False) updated_at = Column(DateTime, nullable=False) @@ -57,11 +59,11 @@ class LinkModel(_Base): } if self.link_type is None: self.link_type = LinkTypeEnum.LINKTYPE_UNKNOWN + attributes : Dict = result.setdefault('attributes', dict()) + attributes.setdefault('is_bidirectional', self.is_bidirectional) if self.total_capacity_gbps is not None: - attributes : Dict = result.setdefault('attributes', dict()) attributes.setdefault('total_capacity_gbps', self.total_capacity_gbps) if self.used_capacity_gbps is not None: - attributes : Dict = result.setdefault('attributes', dict()) attributes.setdefault('used_capacity_gbps', self.used_capacity_gbps) return result diff --git a/src/context/service/database/models/enums/LinkType.py b/src/context/service/database/models/enums/LinkType.py index 6571b19a9..01e77f248 100644 --- a/src/context/service/database/models/enums/LinkType.py +++ b/src/context/service/database/models/enums/LinkType.py @@ -28,6 +28,7 @@ class ORM_LinkTypeEnum(enum.Enum): RADIO = LinkTypeEnum.LINKTYPE_RADIO VIRTUAL = LinkTypeEnum.LINKTYPE_VIRTUAL MANAGEMENT = LinkTypeEnum.LINKTYPE_MANAGEMENT + REMOTE = LinkTypeEnum.LINKTYPE_REMOTE grpc_to_enum__link_type_enum = functools.partial( grpc_to_enum, LinkTypeEnum, ORM_LinkTypeEnum diff --git a/src/webui/service/templates/link/home.html b/src/webui/service/templates/link/home.html index d8e2522ae..acc7091d2 100644 --- a/src/webui/service/templates/link/home.html +++ b/src/webui/service/templates/link/home.html @@ -46,6 +46,7 @@ Name Type Endpoints + Attributes @@ -79,6 +80,15 @@ {% endfor %} + +
    + {% for field_descriptor, field_value in link.attributes.ListFields() %} +
  • + {{ field_descriptor.name }} = {{ field_value }} +
  • + {% endfor %} +
+ -- GitLab From eef68e8b52a152a1e25e38898f8e3efbd127292a Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 29 Aug 2025 18:55:44 +0000 Subject: [PATCH 045/367] Common tools: - Renamed RestClient to RestApiClient - Added RestConfClient --- .../{RestClient.py => RestApiClient.py} | 35 ++++-- src/common/tools/client/RestConfClient.py | 104 ++++++++++++++++++ .../drivers/ietf_l2vpn/TfsApiClient.py | 4 +- .../drivers/ietf_l3vpn/TfsApiClient.py | 4 +- .../drivers/optical_tfs/TfsApiClient.py | 4 +- .../drivers/optical_tfs/TfsOpticalClient.py | 4 +- 6 files changed, 140 insertions(+), 15 deletions(-) rename src/common/tools/client/{RestClient.py => RestApiClient.py} (90%) create mode 100644 src/common/tools/client/RestConfClient.py diff --git a/src/common/tools/client/RestClient.py b/src/common/tools/client/RestApiClient.py similarity index 90% rename from src/common/tools/client/RestClient.py rename to src/common/tools/client/RestApiClient.py index 89717dbc0..1a85f91da 100644 --- a/src/common/tools/client/RestClient.py +++ b/src/common/tools/client/RestApiClient.py @@ -12,10 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. + import enum, logging, requests from requests.auth import HTTPBasicAuth from typing import Any, Optional, Set + class RestRequestMethod(enum.Enum): GET = 'get' POST = 'post' @@ -23,6 +25,7 @@ class RestRequestMethod(enum.Enum): PATCH = 'patch' DELETE = 'delete' + EXPECTED_STATUS_CODES : Set[int] = { requests.codes['OK' ], # 200 - OK requests.codes['CREATED' ], # 201 - Created @@ -30,7 +33,6 @@ EXPECTED_STATUS_CODES : Set[int] = { requests.codes['NO_CONTENT'], # 204 - No Content } -URL_TEMPLATE = '{:s}://{:s}:{:d}/{:s}' def compose_basic_auth( username : Optional[str] = None, password : Optional[str] = None @@ -38,19 +40,24 @@ def compose_basic_auth( if username is None or password is None: return None return HTTPBasicAuth(username, password) + class SchemeEnum(enum.Enum): HTTP = 'http' HTTPS = 'https' + def check_scheme(scheme : str) -> str: str_scheme = str(scheme).lower() enm_scheme = SchemeEnum._value2member_map_[str_scheme] return enm_scheme.value -class RestClient: +TEMPLATE_URL = '{:s}://{:s}:{:d}/{:s}' + + +class RestApiClient: def __init__( - self, address : str, port : int, scheme : str = 'http', + self, address : str, port : int = 8080, scheme : str = 'http', base_url : str = '', username : Optional[str] = None, password : Optional[str] = None, timeout : int = 30, verify_certs : bool = True, allow_redirects : bool = True, logger : Optional[logging.Logger] = None @@ -58,15 +65,13 @@ class RestClient: self._address = address self._port = int(port) self._scheme = check_scheme(scheme) + self._base_url = base_url self._auth = compose_basic_auth(username=username, password=password) self._timeout = int(timeout) self._verify_certs = verify_certs self._allow_redirects = allow_redirects self._logger = logger - def _compose_url(self, endpoint : str) -> str: - endpoint = endpoint.lstrip('/') - return URL_TEMPLATE.format(self._scheme, self._address, self._port, endpoint) def _log_msg_request( self, method : RestRequestMethod, request_url : str, body : Optional[Any], @@ -77,6 +82,7 @@ class RestClient: if self._logger is not None: self._logger.log(log_level, msg) return msg + def _log_msg_check_reply( self, method : RestRequestMethod, request_url : str, body : Optional[Any], reply : requests.Response, expected_status_codes : Set[int], @@ -94,12 +100,20 @@ class RestClient: self._logger.error(msg) raise Exception(msg) + def _do_rest_request( self, method : RestRequestMethod, endpoint : str, body : Optional[Any] = None, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES ) -> Optional[Any]: - request_url = self._compose_url(endpoint) + candidate_schemes = tuple(['{:s}://'.format(m).lower() for m in SchemeEnum.__members__]) + if endpoint.lower().startswith(candidate_schemes): + request_url = endpoint.lstrip('/') + else: + endpoint = str(self._base_url + '/' + endpoint).replace('//', '/').lstrip('/') + request_url = TEMPLATE_URL.format(self._scheme, self._address, self._port, endpoint) + self._log_msg_request(method, request_url, body) + try: headers = {'accept': 'application/json'} reply = requests.request( @@ -112,10 +126,13 @@ class RestClient: msg = MSG.format(str(method.value).upper(), request_url, str(body)) self._logger.exception(msg) raise Exception(msg) from e + self._log_msg_check_reply(method, request_url, body, reply, expected_status_codes) + if reply.content and len(reply.content) > 0: return reply.json() return None + def get( self, endpoint : str, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES @@ -125,6 +142,7 @@ class RestClient: expected_status_codes=expected_status_codes ) + def post( self, endpoint : str, body : Optional[Any] = None, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES @@ -134,6 +152,7 @@ class RestClient: expected_status_codes=expected_status_codes ) + def put( self, endpoint : str, body : Optional[Any] = None, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES @@ -143,6 +162,7 @@ class RestClient: expected_status_codes=expected_status_codes ) + def patch( self, endpoint : str, body : Optional[Any] = None, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES @@ -152,6 +172,7 @@ class RestClient: expected_status_codes=expected_status_codes ) + def delete( self, endpoint : str, body : Optional[Any] = None, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES diff --git a/src/common/tools/client/RestConfClient.py b/src/common/tools/client/RestConfClient.py new file mode 100644 index 000000000..1cd84c218 --- /dev/null +++ b/src/common/tools/client/RestConfClient.py @@ -0,0 +1,104 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, requests +from typing import Any, Dict, Optional, Set +from .RestApiClient import RestApiClient + + +HOST_META_URL = '{:s}://{:s}:{:d}/.well-known/host-meta' + + +class RestConfClient(RestApiClient): + def __init__( + self, address : str, port : int = 8080, scheme : str = 'http', + username : Optional[str] = None, password : Optional[str] = None, + timeout : int = 30, verify_certs : bool = True, allow_redirects : bool = True, + logger : Optional[logging.Logger] = None + ) -> None: + super().__init__( + address, port=port, scheme=scheme, username=username, password=password, + timeout=timeout, verify_certs=verify_certs, allow_redirects=allow_redirects, + logger=logger + ) + + self._discover_base_url() + + def _discover_base_url(self) -> None: + host_meta_url = HOST_META_URL.format(self._scheme, self._address, self._port) + host_meta : Dict = self.get(host_meta_url, expected_status_codes={requests.codes['OK']}) + + links = host_meta.get('links') + if links is None: raise AttributeError('Missing attribute "links" in host-meta reply') + if not isinstance(links, list): raise AttributeError('Attribute "links" must be a list') + if len(links) != 1: raise AttributeError('Attribute "links" is expected to have exactly 1 item') + + link = links[0] + if not isinstance(link, dict): raise AttributeError('Attribute "links[0]" must be a dict') + + rel = link.get('rel') + if rel is None: raise AttributeError('Missing attribute "links[0].rel" in host-meta reply') + if not isinstance(rel, str): raise AttributeError('Attribute "links[0].rel" must be a str') + if rel != 'restconf': raise AttributeError('Attribute "links[0].rel" != "restconf"') + + href = link.get('href') + if href is None: raise AttributeError('Missing attribute "links[0]" in host-meta reply') + if not isinstance(href, str): raise AttributeError('Attribute "links[0].href" must be a str') + + self._base_url = str(href + '/data').replace('//', '/') + + def get( + self, endpoint : str, + expected_status_codes : Set[int] = {requests.codes['OK']} + ) -> Optional[Any]: + return super().get( + endpoint, + expected_status_codes=expected_status_codes + ) + + def post( + self, endpoint : str, body : Optional[Any] = None, + expected_status_codes : Set[int] = {requests.codes['CREATED']} + ) -> Optional[Any]: + return super().post( + endpoint, body=body, + expected_status_codes=expected_status_codes + ) + + def put( + self, endpoint : str, body : Optional[Any] = None, + expected_status_codes : Set[int] = {requests.codes['CREATED'], requests.codes['NO_CONTENT']} + ) -> Optional[Any]: + return super().put( + endpoint, body=body, + expected_status_codes=expected_status_codes + ) + + def patch( + self, endpoint : str, body : Optional[Any] = None, + expected_status_codes : Set[int] = {requests.codes['NO_CONTENT']} + ) -> Optional[Any]: + return super().patch( + endpoint, body=body, + expected_status_codes=expected_status_codes + ) + + def delete( + self, endpoint : str, body : Optional[Any] = None, + expected_status_codes : Set[int] = {requests.codes['NO_CONTENT']} + ) -> Optional[Any]: + return super().delete( + endpoint, body=body, + expected_status_codes=expected_status_codes + ) diff --git a/src/device/service/drivers/ietf_l2vpn/TfsApiClient.py b/src/device/service/drivers/ietf_l2vpn/TfsApiClient.py index cc124b02c..ed8367e60 100644 --- a/src/device/service/drivers/ietf_l2vpn/TfsApiClient.py +++ b/src/device/service/drivers/ietf_l2vpn/TfsApiClient.py @@ -14,7 +14,7 @@ import logging, requests from typing import Dict, List, Optional -from common.tools.client.RestClient import RestClient +from common.tools.client.RestApiClient import RestApiClient from device.service.driver_api.ImportTopologyEnum import ImportTopologyEnum GET_CONTEXT_IDS_URL = '/tfs-api/context_ids' @@ -51,7 +51,7 @@ MAPPING_DRIVER = { LOGGER = logging.getLogger(__name__) -class TfsApiClient(RestClient): +class TfsApiClient(RestApiClient): def __init__( self, address : str, port : int, scheme : str = 'http', username : Optional[str] = None, password : Optional[str] = None, diff --git a/src/device/service/drivers/ietf_l3vpn/TfsApiClient.py b/src/device/service/drivers/ietf_l3vpn/TfsApiClient.py index f8a22d0a3..f379d56b8 100644 --- a/src/device/service/drivers/ietf_l3vpn/TfsApiClient.py +++ b/src/device/service/drivers/ietf_l3vpn/TfsApiClient.py @@ -14,7 +14,7 @@ import logging, requests from typing import Dict, List, Optional -from common.tools.client.RestClient import RestClient +from common.tools.client.RestApiClient import RestApiClient from device.service.driver_api.ImportTopologyEnum import ImportTopologyEnum GET_CONTEXT_IDS_URL = '/tfs-api/context_ids' @@ -52,7 +52,7 @@ MAPPING_DRIVER = { LOGGER = logging.getLogger(__name__) -class TfsApiClient(RestClient): +class TfsApiClient(RestApiClient): def __init__( self, address : str, port : int, scheme : str = 'http', username : Optional[str] = None, password : Optional[str] = None, diff --git a/src/device/service/drivers/optical_tfs/TfsApiClient.py b/src/device/service/drivers/optical_tfs/TfsApiClient.py index 854154be6..f60edd4fc 100644 --- a/src/device/service/drivers/optical_tfs/TfsApiClient.py +++ b/src/device/service/drivers/optical_tfs/TfsApiClient.py @@ -16,7 +16,7 @@ import logging from typing import Dict, List, Optional, Tuple from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME from common.proto.context_pb2 import ServiceStatusEnum, ServiceTypeEnum -from common.tools.client.RestClient import RestClient +from common.tools.client.RestApiClient import RestApiClient from common.tools.object_factory.Constraint import json_constraint_custom from common.tools.object_factory.Context import json_context_id from common.tools.object_factory.Device import json_device_id @@ -59,7 +59,7 @@ MAPPING_DRIVER = { LOGGER = logging.getLogger(__name__) -class TfsApiClient(RestClient): +class TfsApiClient(RestApiClient): def __init__( self, address : str, port : int, scheme : str = 'http', username : Optional[str] = None, password : Optional[str] = None, diff --git a/src/device/service/drivers/optical_tfs/TfsOpticalClient.py b/src/device/service/drivers/optical_tfs/TfsOpticalClient.py index 07bf615c3..648e7e596 100644 --- a/src/device/service/drivers/optical_tfs/TfsOpticalClient.py +++ b/src/device/service/drivers/optical_tfs/TfsOpticalClient.py @@ -15,7 +15,7 @@ import logging, requests from typing import Dict, List, Optional, Union -from common.tools.client.RestClient import RestClient +from common.tools.client.RestApiClient import RestApiClient LOGGER = logging.getLogger(__name__) @@ -26,7 +26,7 @@ ADD_LIGHTPATH_URL = '/OpticalTFS/AddLightpath/{src_node:s}/{dst_node:s}/{bit DEL_LIGHTPATH_URL = '/OpticalTFS/DelLightpath/{flow_id:s}/{src_node:s}/{dst_node:s}/{bitrate:s}' -class TfsOpticalClient(RestClient): +class TfsOpticalClient(RestApiClient): def __init__( self, address : str, port : int, scheme : str = 'http', username : Optional[str] = None, password : Optional[str] = None, -- GitLab From b803da4122f35ba6233c2cc7f9b6d1375e83e5a6 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 29 Aug 2025 18:59:24 +0000 Subject: [PATCH 046/367] Device component: - Extended IETF ACTN driver to use network topology to discover underlaying topologies. --- .../drivers/ietf_actn/IetfActnDriver.py | 13 +- .../handlers/NetworkTopologyHandler.py | 199 ++++++++++++++++++ 2 files changed, 208 insertions(+), 4 deletions(-) create mode 100644 src/device/service/drivers/ietf_actn/handlers/NetworkTopologyHandler.py diff --git a/src/device/service/drivers/ietf_actn/IetfActnDriver.py b/src/device/service/drivers/ietf_actn/IetfActnDriver.py index 431674e4e..e78dc7976 100644 --- a/src/device/service/drivers/ietf_actn/IetfActnDriver.py +++ b/src/device/service/drivers/ietf_actn/IetfActnDriver.py @@ -15,10 +15,12 @@ import json, logging, requests, threading from typing import Any, Iterator, List, Optional, Tuple, Union from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method +from common.tools.client.RestConfClient import RestConfClient from common.type_checkers.Checkers import chk_string, chk_type from device.service.driver_api._Driver import _Driver, RESOURCE_ENDPOINTS, RESOURCE_SERVICES from .handlers.EthtServiceHandler import EthtServiceHandler from .handlers.OsuTunnelHandler import OsuTunnelHandler +from .handlers.NetworkTopologyHandler import NetworkTopologyHandler from .handlers.RestApiClient import RestApiClient from .Tools import get_etht_services, get_osu_tunnels, parse_resource_key @@ -39,8 +41,10 @@ class IetfActnDriver(_Driver): self.__started = threading.Event() self.__terminate = threading.Event() self._rest_api_client = RestApiClient(address, port, settings=settings) - self._handler_osu_tunnel = OsuTunnelHandler(self._rest_api_client) + self._rest_conf_client = RestConfClient(address, port, **settings) self._handler_etht_service = EthtServiceHandler(self._rest_api_client) + self._handler_net_topology = NetworkTopologyHandler(self._rest_conf_client, **settings) + self._handler_osu_tunnel = OsuTunnelHandler(self._rest_api_client) def Connect(self) -> bool: with self.__lock: @@ -81,9 +85,10 @@ class IetfActnDriver(_Driver): if resource_key == RESOURCE_ENDPOINTS: # Add mgmt endpoint by default - resource_key = '/endpoints/endpoint[mgmt]' - resource_value = {'uuid': 'mgmt', 'name': 'mgmt', 'type': 'mgmt'} - results.append((resource_key, resource_value)) + #resource_key = '/endpoints/endpoint[mgmt]' + #resource_value = {'uuid': 'mgmt', 'name': 'mgmt', 'type': 'mgmt'} + #results.append((resource_key, resource_value)) + results.extend(self._handler_net_topology.get()) elif resource_key == RESOURCE_SERVICES: get_osu_tunnels(self._handler_osu_tunnel, _results) get_etht_services(self._handler_etht_service, _results) diff --git a/src/device/service/drivers/ietf_actn/handlers/NetworkTopologyHandler.py b/src/device/service/drivers/ietf_actn/handlers/NetworkTopologyHandler.py new file mode 100644 index 000000000..7c569d8bd --- /dev/null +++ b/src/device/service/drivers/ietf_actn/handlers/NetworkTopologyHandler.py @@ -0,0 +1,199 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, requests +from typing import Dict, List, Optional, Tuple, Union +from common.Constants import DEFAULT_TOPOLOGY_NAME +from common.DeviceTypes import DeviceTypeEnum +from common.proto.context_pb2 import ( + DEVICEDRIVER_UNDEFINED, DEVICEOPERATIONALSTATUS_DISABLED, + DEVICEOPERATIONALSTATUS_ENABLED, DeviceOperationalStatusEnum +) +from common.tools.client.RestApiClient import RestApiClient +from common.tools.client.RestConfClient import RestConfClient +from device.service.driver_api.ImportTopologyEnum import ( + ImportTopologyEnum, get_import_topology +) +from .RestApiClient import ( + HTTP_STATUS_CREATED, HTTP_STATUS_NO_CONTENT, HTTP_STATUS_OK, + RestApiClient +) + + +GET_CONTEXT_IDS_URL = '/tfs-api/context_ids' +GET_DEVICES_URL = '/tfs-api/devices' +GET_LINKS_URL = '/tfs-api/links' +L3VPN_URL = '/restconf/data/ietf-l3vpn-svc:l3vpn-svc/vpn-services' + + +MAPPING_STATUS = { + 'DEVICEOPERATIONALSTATUS_UNDEFINED': 0, + 'DEVICEOPERATIONALSTATUS_DISABLED' : 1, + 'DEVICEOPERATIONALSTATUS_ENABLED' : 2, +} + + +MAPPING_DRIVER = { + 'DEVICEDRIVER_UNDEFINED' : 0, + 'DEVICEDRIVER_OPENCONFIG' : 1, + 'DEVICEDRIVER_TRANSPORT_API' : 2, + 'DEVICEDRIVER_P4' : 3, + 'DEVICEDRIVER_IETF_NETWORK_TOPOLOGY': 4, + 'DEVICEDRIVER_ONF_TR_532' : 5, + 'DEVICEDRIVER_XR' : 6, + 'DEVICEDRIVER_IETF_L2VPN' : 7, + 'DEVICEDRIVER_GNMI_OPENCONFIG' : 8, + 'DEVICEDRIVER_OPTICAL_TFS' : 9, + 'DEVICEDRIVER_IETF_ACTN' : 10, + 'DEVICEDRIVER_OC' : 11, + 'DEVICEDRIVER_QKD' : 12, + 'DEVICEDRIVER_IETF_L3VPN' : 13, + 'DEVICEDRIVER_IETF_SLICE' : 14, + 'DEVICEDRIVER_NCE' : 15, + 'DEVICEDRIVER_SMARTNIC' : 16, + 'DEVICEDRIVER_MORPHEUS' : 17, + 'DEVICEDRIVER_RYU' : 18, +} + + +LOGGER = logging.getLogger(__name__) + + +class NetworkTopologyHandler: + def __init__(self, rest_conf_client : RestConfClient, **settings) -> None: + self._rest_conf_client = rest_conf_client + self._object_name = 'NetworkTopology' + self._subpath_root = '/ietf-network:networks' + self._subpath_item = self._subpath_root + '/network="{network_id:s}"' + + # Options are: + # disabled --> just import endpoints as usual + # devices --> imports sub-devices but not links connecting them. + # (a remotely-controlled transport domain might exist between them) + # topology --> imports sub-devices and links connecting them. + # (not supported by XR driver) + self._import_topology = get_import_topology(settings, default=ImportTopologyEnum.TOPOLOGY) + + + def get(self, network_id : Optional[str] = None) -> List[Dict]: + if network_id is None: network_id = DEFAULT_TOPOLOGY_NAME + endpoint = self._subpath_item.format(network_id=network_id) + networks = self._rest_conf_client.get(endpoint) + + if 'ietf-network:networks' not in networks: + raise Exception('Malformed reply. "ietf-network:networks" missing') + networks = networks['ietf-network:networks'] + + if 'network' not in networks: return list() + networks = networks['network'] + if len(networks) == 0: return list() + + network = next(iter([ + n for n in networks if n['network-id'] == network_id + ]), default=None) + + if network is None: + raise Exception('Network({:s}) not found'.format(str(network_id))) + + MSG = '[get] import_topology={:s}' + LOGGER.debug(MSG.format(str(self._import_topology))) + + result = list() + if self._import_topology == ImportTopologyEnum.DISABLED: + LOGGER.debug('[get] abstract controller; returning') + return result + + device_type = DeviceTypeEnum.EMULATED_PACKET_SWITCH.value + endpoint_type = '' + if 'network-types' in network: + nnt = network['network-types'] + if 'ietf-te-topology:te-topology' in nnt: + nnt_tet = nnt['ietf-te-topology:te-topology'] + if 'ietf-otn-topology:otn-topology' in nnt_tet: + device_type = DeviceTypeEnum.EMULATED_OPTICAL_ROADM.value + endpoint_type = 'optical' + elif 'ietf-eth-te-topology:eth-tran-topology' in nnt_tet: + device_type = DeviceTypeEnum.EMULATED_PACKET_SWITCH.value + endpoint_type = 'copper' + elif 'ietf-l3-unicast-topology:l3-unicast-topology' in nnt_tet: + device_type = DeviceTypeEnum.EMULATED_PACKET_ROUTER.value + endpoint_type = 'copper' + + for node in network['node']: + node_id = node['node-id'] + + node_name = node_id + node_is_up = True + if 'ietf-te-topology:te' in node: + nte = node['ietf-te-topology:te'] + + if 'oper-status' in nte: + node_is_up = nte['oper-status'] == 'up' + + if 'te-node-attributes' in nte: + ntea = nte['te-node-attributes'] + if 'name' in ntea: + node_name = ntea['name'] + + device_url = '/devices/device[{:s}]'.format(node_id) + device_data = { + 'uuid': node_id, + 'name': node_name, + 'type': device_type, + 'status': DEVICEOPERATIONALSTATUS_ENABLED if node_is_up else DEVICEOPERATIONALSTATUS_DISABLED, + 'drivers': [DEVICEDRIVER_UNDEFINED], + } + result.append((device_url, device_data)) + + for tp in node['ietf-network-topology:termination-point']: + tp_id = tp['tp-id'] + + tp_name = tp_id + if 'ietf-te-topology:te' in tp: + tpte = tp['ietf-te-topology:te'] + if 'name' in tpte: + tp_name = tpte['name'] + + endpoint_url = '/endpoints/endpoint[{:s}, {:s}]'.format(node_id, tp_id) + endpoint_data = { + 'device_uuid': node_id, + 'uuid': tp_id, + 'name': tp_name, + 'type': endpoint_type, + } + result.append((endpoint_url, endpoint_data)) + + if self._import_topology == ImportTopologyEnum.DEVICES: + LOGGER.debug('[get] devices only; returning') + return result + +# for json_link in links['links']: +# link_uuid : str = json_link['link_id']['link_uuid']['uuid'] +# link_url = '/links/link[{:s}]'.format(link_uuid) +# link_endpoint_ids = [ +# ( +# json_endpoint_id['device_id']['device_uuid']['uuid'], +# json_endpoint_id['endpoint_uuid']['uuid'], +# ) +# for json_endpoint_id in json_link['link_endpoint_ids'] +# ] +# link_data = { +# 'uuid': json_link['link_id']['link_uuid']['uuid'], +# 'name': json_link['name'], +# 'endpoints': link_endpoint_ids, +# } +# result.append((link_url, link_data)) + + LOGGER.debug('[get] topology; returning') + return result -- GitLab From 703770b43710dc825485cf2e854be26f57b4c762 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 29 Aug 2025 19:01:23 +0000 Subject: [PATCH 047/367] Tests - Tools - Mock NCE-T Controller - Implemented new mock of NCE-T controller based on libyang - Added support for automatic YANG model discovery and load - Keeps a consistent in-memory database - Is initialized with an example startup data --- src/tests/tools/mock_nce_t_ctrl/Dockerfile | 61 + src/tests/tools/mock_nce_t_ctrl/README.md | 23 + src/tests/tools/mock_nce_t_ctrl/build.sh | 21 + src/tests/tools/mock_nce_t_ctrl/deploy.sh | 27 + src/tests/tools/mock_nce_t_ctrl/destroy.sh | 22 + .../mock_nce_t_ctrl/nce_t_ctrl/Dispatch.py | 148 + .../mock_nce_t_ctrl/nce_t_ctrl/HostMeta.py | 50 + .../nce_t_ctrl/HttpStatusCodesEnum.py | 27 + .../mock_nce_t_ctrl/nce_t_ctrl/YangHandler.py | 144 + .../nce_t_ctrl/YangModelDiscoverer.py | 195 + .../mock_nce_t_ctrl/nce_t_ctrl/__init__.py | 14 + .../mock_nce_t_ctrl/nce_t_ctrl/__main__.py | 26 + .../tools/mock_nce_t_ctrl/nce_t_ctrl/app.py | 70 + .../tools/mock_nce_t_ctrl/requirements.in | 25 + .../mock_nce_t_ctrl/run_ctrl_gunicorn.sh | 20 + .../mock_nce_t_ctrl/run_ctrl_standalone.sh | 19 + src/tests/tools/mock_nce_t_ctrl/startup.json | 63 + .../ietf-eth-tran-service.yang | 1010 ++++ .../ietf-eth-tran-types.yang | 460 ++ .../ietf-trans-client-service.yang | 325 ++ .../ietf-trans-client-svc-types.yang | 63 + .../ietf-eth-te-topology.yang | 2278 +++++++++ .../ietf-otn-topology.yang | 2230 ++++++++ .../ietf-te-packet-types.yang | 835 +++ .../ietf-te-types.yang | 4473 +++++++++++++++++ .../ietf-te-device.yang | 595 +++ .../draft-ietf-teas-yang-te-34/ietf-te.yang | 1516 ++++++ .../draft-layer1-types/ietf-layer1-types.yang | 1361 +++++ .../yang/rfc6991/ietf-inet-types.yang | 458 ++ .../yang/rfc6991/ietf-yang-types.yang | 474 ++ .../yang/rfc8294/iana-routing-types.yang | 471 ++ .../yang/rfc8294/ietf-routing-types.yang | 771 +++ .../yang/rfc8343/ietf-interfaces.yang | 1123 +++++ .../yang/rfc8345/ietf-network-topology.yang | 294 ++ .../yang/rfc8345/ietf-network.yang | 192 + .../rfc8346/ietf-l3-unicast-topology.yang | 359 ++ .../yang/rfc8795/ietf-te-topology.yang | 1952 +++++++ .../mock_nce_t_ctrl/yang/yang-repo-url.txt | 1 + 38 files changed, 22196 insertions(+) create mode 100644 src/tests/tools/mock_nce_t_ctrl/Dockerfile create mode 100644 src/tests/tools/mock_nce_t_ctrl/README.md create mode 100755 src/tests/tools/mock_nce_t_ctrl/build.sh create mode 100755 src/tests/tools/mock_nce_t_ctrl/deploy.sh create mode 100755 src/tests/tools/mock_nce_t_ctrl/destroy.sh create mode 100644 src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/Dispatch.py create mode 100644 src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/HostMeta.py create mode 100644 src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/HttpStatusCodesEnum.py create mode 100644 src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/YangHandler.py create mode 100644 src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/YangModelDiscoverer.py create mode 100644 src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/__init__.py create mode 100644 src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/__main__.py create mode 100644 src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py create mode 100644 src/tests/tools/mock_nce_t_ctrl/requirements.in create mode 100755 src/tests/tools/mock_nce_t_ctrl/run_ctrl_gunicorn.sh create mode 100755 src/tests/tools/mock_nce_t_ctrl/run_ctrl_standalone.sh create mode 100644 src/tests/tools/mock_nce_t_ctrl/startup.json create mode 100644 src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-ccamp-client-signal-yang-10/ietf-eth-tran-service.yang create mode 100644 src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-ccamp-client-signal-yang-10/ietf-eth-tran-types.yang create mode 100644 src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-ccamp-client-signal-yang-10/ietf-trans-client-service.yang create mode 100644 src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-ccamp-client-signal-yang-10/ietf-trans-client-svc-types.yang create mode 100644 src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-ccamp-eth-client-te-topo-yang-09/ietf-eth-te-topology.yang create mode 100644 src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-ccamp-otn-topo-yang-20/ietf-otn-topology.yang create mode 100644 src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-teas-rfc8776-update-18/ietf-te-packet-types.yang create mode 100644 src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-teas-rfc8776-update-18/ietf-te-types.yang create mode 100644 src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-teas-yang-te-34/ietf-te-device.yang create mode 100644 src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-teas-yang-te-34/ietf-te.yang create mode 100644 src/tests/tools/mock_nce_t_ctrl/yang/draft-layer1-types/ietf-layer1-types.yang create mode 100644 src/tests/tools/mock_nce_t_ctrl/yang/rfc6991/ietf-inet-types.yang create mode 100644 src/tests/tools/mock_nce_t_ctrl/yang/rfc6991/ietf-yang-types.yang create mode 100644 src/tests/tools/mock_nce_t_ctrl/yang/rfc8294/iana-routing-types.yang create mode 100644 src/tests/tools/mock_nce_t_ctrl/yang/rfc8294/ietf-routing-types.yang create mode 100644 src/tests/tools/mock_nce_t_ctrl/yang/rfc8343/ietf-interfaces.yang create mode 100644 src/tests/tools/mock_nce_t_ctrl/yang/rfc8345/ietf-network-topology.yang create mode 100644 src/tests/tools/mock_nce_t_ctrl/yang/rfc8345/ietf-network.yang create mode 100644 src/tests/tools/mock_nce_t_ctrl/yang/rfc8346/ietf-l3-unicast-topology.yang create mode 100644 src/tests/tools/mock_nce_t_ctrl/yang/rfc8795/ietf-te-topology.yang create mode 100644 src/tests/tools/mock_nce_t_ctrl/yang/yang-repo-url.txt diff --git a/src/tests/tools/mock_nce_t_ctrl/Dockerfile b/src/tests/tools/mock_nce_t_ctrl/Dockerfile new file mode 100644 index 000000000..a1f70694e --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/Dockerfile @@ -0,0 +1,61 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM python:3.9-slim + +# Install dependencies +RUN apt-get --yes --quiet --quiet update && \ + apt-get --yes --quiet --quiet install git build-essential cmake libpcre2-dev python3-dev python3-cffi && \ + rm -rf /var/lib/apt/lists/* + +# Download, build and install libyang. Note that APT package is outdated +# - Ref: https://github.com/CESNET/libyang +# - Ref: https://github.com/CESNET/libyang-python/ +RUN mkdir -p /var/libyang +RUN git clone https://github.com/CESNET/libyang.git /var/libyang +WORKDIR /var/libyang +RUN git fetch +RUN git checkout v2.1.148 +RUN mkdir -p /var/libyang/build +WORKDIR /var/libyang/build +RUN cmake -D CMAKE_BUILD_TYPE:String="Release" .. +RUN make +RUN make install +RUN ldconfig + +# Set Python to show logs as they occur +ENV PYTHONUNBUFFERED=0 + +# Get generic Python packages +RUN python3 -m pip install --upgrade pip +RUN python3 -m pip install --upgrade setuptools wheel +RUN python3 -m pip install --upgrade pip-tools + +# Create component sub-folders, get specific Python packages +RUN mkdir -p /var/teraflow/nce_t_ctrl/ +WORKDIR /var/teraflow/nce_t_ctrl/ +COPY ./requirements.in ./requirements.in +RUN pip-compile --quiet --output-file=requirements.txt requirements.in +RUN python3 -m pip install -r requirements.txt + +# Add component files into working directory +COPY ./yang/*.yang ./yang/ +COPY ./nce_t_ctrl/*.py ./nce_t_ctrl/ +COPY ./startup.json ./startup.json + +# Configure Flask for production +ENV FLASK_ENV=production + +# Start the service +ENTRYPOINT ["gunicorn", "--workers", "1", "--worker-class", "eventlet", "--bind", "0.0.0.0:8080", "nce_t_ctrl.app:app"] diff --git a/src/tests/tools/mock_nce_t_ctrl/README.md b/src/tests/tools/mock_nce_t_ctrl/README.md new file mode 100644 index 000000000..8d5a4dfbc --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/README.md @@ -0,0 +1,23 @@ +# RESTCONF-based NCE-T Controller + +This server implements a basic RESTCONF Server that can load, potentially, any YANG data model. +In this case, it is prepared to load a NCE-T Controller based on: +- IETF Network Topology +- IETF YANG Data Model for Transport Network Client Signals +- IETF YANG Data Model for Traffic Engineering Tunnels, Label Switched Paths and Interfaces + + +## Build the Docker image +```bash +./build.sh +``` + +## Deploy the Controller +```bash +./deploy.sh +``` + +## Destroy the Controller +```bash +./destroy.sh +``` diff --git a/src/tests/tools/mock_nce_t_ctrl/build.sh b/src/tests/tools/mock_nce_t_ctrl/build.sh new file mode 100755 index 000000000..16b8903bb --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/build.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Make folder containing the script the root folder for its execution +cd $(dirname $0) + +docker buildx build -t nce-t-ctrl:test -f Dockerfile . +#docker tag nce-t-ctrl:test localhost:32000/tfs/nce-t-ctrl:test +#docker push localhost:32000/tfs/nce-t-ctrl:test diff --git a/src/tests/tools/mock_nce_t_ctrl/deploy.sh b/src/tests/tools/mock_nce_t_ctrl/deploy.sh new file mode 100755 index 000000000..b4dbfc7a6 --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/deploy.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Cleanup +docker rm --force nce-t-ctrl + +# Create NCE-T Controller +docker run --detach --name nce-t-ctrl --publish 8080:8080 nce-t-ctrl:test + +sleep 2 + +# Dump Docker containers +docker ps -a + +echo "Bye!" diff --git a/src/tests/tools/mock_nce_t_ctrl/destroy.sh b/src/tests/tools/mock_nce_t_ctrl/destroy.sh new file mode 100755 index 000000000..726535128 --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/destroy.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Cleanup +docker rm --force nce-t-ctrl + +# Dump Docker containers +docker ps -a + +echo "Bye!" diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/Dispatch.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/Dispatch.py new file mode 100644 index 000000000..319aa9f7b --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/Dispatch.py @@ -0,0 +1,148 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import json, logging +from flask import Response, abort, jsonify, request +from flask_restful import Resource +from .HttpStatusCodesEnum import HttpStatusCodesEnum +from .YangHandler import YangHandler + +LOGGER = logging.getLogger(__name__) + +class RestConfDispatch(Resource): + def __init__(self, yang_handler : YangHandler) -> None: + super().__init__() + self._yang_handler = yang_handler + + def get(self, subpath : str = '/') -> Response: + data = self._yang_handler.get(subpath) + if data is None: + abort( + HttpStatusCodesEnum.CLI_ERR_NOT_FOUND.value, + description='Path({:s}) not found'.format(str(subpath)) + ) + + LOGGER.info('[GET] {:s} => {:s}'.format(subpath, str(data))) + + response = jsonify(json.loads(data)) + response.status_code = HttpStatusCodesEnum.SUCCESS_OK.value + return response + + def post(self, subpath : str) -> Response: + # TODO: client should not provide identifier of element to be created, add it to subpath + try: + payload = request.get_json(force=True) + except Exception: + LOGGER.exception('Invalid JSON') + abort(HttpStatusCodesEnum.CLI_ERR_BAD_REQUEST.value, desctiption='Invalid JSON') + + data = self._yang_handler.get(subpath) + if data is not None: + abort( + HttpStatusCodesEnum.CLI_ERR_CONFLICT.value, + description='Path({:s}) already exists'.format(str(subpath)) + ) + + try: + json_data = self._yang_handler.create(subpath, payload) + except Exception as e: + LOGGER.exception('Create failed') + abort( + HttpStatusCodesEnum.CLI_ERR_NOT_ACCEPTABLE.value, + description=str(e) + ) + + LOGGER.info('[POST] {:s} {:s} => {:s}'.format(subpath, str(payload), str(json_data))) + + response = jsonify({'status': 'created'}) + response.status_code = HttpStatusCodesEnum.SUCCESS_CREATED.value + return response + + def put(self, subpath : str) -> Response: + # NOTE: client should provide identifier of element to be created/replaced + try: + payload = request.get_json(force=True) + except Exception: + LOGGER.exception('Invalid JSON') + abort(HttpStatusCodesEnum.CLI_ERR_BAD_REQUEST.value, desctiption='Invalid JSON') + + try: + json_data = self._yang_handler.update(subpath, payload) + except Exception as e: + LOGGER.exception('Update failed') + abort( + HttpStatusCodesEnum.CLI_ERR_NOT_ACCEPTABLE.value, + description=str(e) + ) + + LOGGER.info('[PUT] {:s} {:s} => {:s}'.format(subpath, str(payload), str(json_data))) + updated = False # TODO: compute if create or update + + response = jsonify({'status': ( + 'updated' if updated else 'created' + )}) + response.status_code = ( + HttpStatusCodesEnum.SUCCESS_NO_CONTENT.value + if updated else + HttpStatusCodesEnum.SUCCESS_CREATED.value + ) + return response + + def patch(self, subpath : str) -> Response: + # NOTE: client should provide identifier of element to be patched + try: + payload = request.get_json(force=True) + except Exception: + LOGGER.exception('Invalid JSON') + abort(HttpStatusCodesEnum.CLI_ERR_BAD_REQUEST.value, desctiption='Invalid JSON') + + try: + json_data = self._yang_handler.update(subpath, payload) + except Exception as e: + LOGGER.exception('Update failed') + abort( + HttpStatusCodesEnum.CLI_ERR_NOT_ACCEPTABLE.value, + description=str(e) + ) + + LOGGER.info('[PATCH] {:s} {:s} => {:s}'.format(subpath, str(payload), str(json_data))) + + response = jsonify({'status': 'patched'}) + response.status_code = HttpStatusCodesEnum.SUCCESS_NO_CONTENT.value + return response + + def delete(self, subpath : str) -> Response: + # NOTE: client should provide identifier of element to be patched + + try: + deleted_node = self._yang_handler.delete(subpath) + except Exception as e: + LOGGER.exception('Delete failed') + abort( + HttpStatusCodesEnum.CLI_ERR_NOT_ACCEPTABLE.value, + description=str(e) + ) + + LOGGER.info('[DELETE] {:s} => {:s}'.format(subpath, str(deleted_node))) + + if deleted_node is None: + abort( + HttpStatusCodesEnum.CLI_ERR_NOT_FOUND.value, + description='Path({:s}) not found'.format(str(subpath)) + ) + + response = jsonify({}) + response.status_code = HttpStatusCodesEnum.SUCCESS_NO_CONTENT.value + return response diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/HostMeta.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/HostMeta.py new file mode 100644 index 000000000..95ef34b19 --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/HostMeta.py @@ -0,0 +1,50 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import xml.etree.ElementTree as ET +from flask import abort, jsonify, make_response, request +from flask_restful import Resource +from .HttpStatusCodesEnum import HttpStatusCodesEnum + +XRD_NS = 'http://docs.oasis-open.org/ns/xri/xrd-1.0' +ET.register_namespace('', XRD_NS) + +class HostMeta(Resource): + def __init__(self, restconf_prefix : str) -> None: + super().__init__() + self._restconf_prefix = restconf_prefix + + def get(self): + best = request.accept_mimetypes.best_match([ + 'application/xrd+xml', 'application/json' + ], default='application/xrd+xml') + + if best == 'application/xrd+xml': + xrd = ET.Element('{{{:s}}}XRD'.format(str(XRD_NS))) + ET.SubElement(xrd, '{{{:s}}}Link'.format(str(XRD_NS)), attrib={ + 'rel': 'restconf', 'href': self._restconf_prefix + }) + xml_string = ET.tostring(xrd, encoding='utf-8', xml_declaration=True).decode() + response = make_response(str(xml_string)) + response.status_code = 200 + response.content_type = best + return response + elif best == 'application/json': + response = jsonify({'links': [{'rel': 'restconf', 'href': self._restconf_prefix}]}) + response.status_code = 200 + response.content_type = best + return response + else: + abort(HttpStatusCodesEnum.CLI_ERR_NOT_ACCEPTABLE) diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/HttpStatusCodesEnum.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/HttpStatusCodesEnum.py new file mode 100644 index 000000000..c44d135c0 --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/HttpStatusCodesEnum.py @@ -0,0 +1,27 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import enum + +class HttpStatusCodesEnum(enum.IntEnum): + SUCCESS_OK = 200 + SUCCESS_CREATED = 201 + SUCCESS_ACCEPTED = 202 + SUCCESS_NO_CONTENT = 204 + CLI_ERR_BAD_REQUEST = 400 + CLI_ERR_NOT_FOUND = 404 + CLI_ERR_NOT_ACCEPTABLE = 406 + CLI_ERR_CONFLICT = 409 + SVR_ERR_NOT_IMPLEMENTED = 501 diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/YangHandler.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/YangHandler.py new file mode 100644 index 000000000..76e5ae6c2 --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/YangHandler.py @@ -0,0 +1,144 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import json, libyang, logging +from typing import Dict, List, Optional, Type + + +LOGGER = logging.getLogger(__name__) + + +def walk_schema(node : libyang.SNode, path : str = '') -> Dict[str, Type]: + schema_paths : Dict[str, Type] = dict() + current_path = f'{path}/{node.name()}' + schema_paths[current_path] = type(node) + for child in node.children(): + if isinstance(child, (libyang.SLeaf, libyang.SLeafList)): continue + schema_paths.update(walk_schema(child, current_path)) + return schema_paths + +def extract_schema_paths(yang_module : libyang.Module) -> Dict[str, Type]: + schema_paths : Dict[str, Type] = dict() + for node in yang_module.children(): + schema_paths.update(walk_schema(node)) + return schema_paths + +class YangHandler: + def __init__( + self, yang_search_path : str, yang_module_names : List[str], + yang_startup_data : Dict + ) -> None: + self._yang_context = libyang.Context(yang_search_path) + self._loaded_modules = set() + self._yang_module_paths : Dict[str, Type] = dict() + for yang_module_name in yang_module_names: + LOGGER.info('Loading module: {:s}'.format(str(yang_module_name))) + yang_module = self._yang_context.load_module(yang_module_name) + yang_module.feature_enable_all() + self._loaded_modules.add(yang_module_name) + self._yang_module_paths.update(extract_schema_paths(yang_module)) + + self._datastore = self._yang_context.parse_data_mem( + json.dumps(yang_startup_data), fmt='json' + ) + + def destroy(self) -> None: + self._yang_context.destroy() + + def get_module_paths(self) -> Dict[str, Type]: + return self._yang_module_paths + + def get(self, path : str) -> Optional[str]: + if not path.startswith('/'): path = '/' + path + data = self._datastore.find_path(path) + if data is None: return None + json_data = data.print_mem( + fmt='json', with_siblings=True, pretty=True, + keep_empty_containers=True, include_implicit_defaults=True + ) + return json_data + + def get_xpath(self, xpath : str) -> List[str]: + if not path.startswith('/'): path = '/' + path + nodes = self._datastore.find_all(xpath) + result = list() + for node in nodes: + result.append(node.print_mem( + fmt='json', with_siblings=True, pretty=True, + keep_empty_containers=True, include_implicit_defaults=True + )) + return result + + def create(self, path : str, payload : Dict) -> str: + if not path.startswith('/'): path = '/' + path + # TODO: client should not provide identifier of element to be created, add it to subpath + dnode_parsed : Optional[libyang.DNode] = self._yang_context.parse_data_mem( + json.dumps(payload), 'json', strict=True, parse_only=False, + validate_present=True, validate_multi_error=True + ) + if dnode_parsed is None: raise Exception('Unable to parse Data({:s})'.format(str(payload))) + #LOGGER.info('parsed = {:s}'.format(json.dumps(dnode.print_dict()))) + + dnode : Optional[libyang.DNode] = self._yang_context.create_data_path( + path, parent=self._datastore, value=dnode_parsed, update=False + ) + self._datastore.merge(dnode_parsed, with_siblings=True, defaults=True) + + json_data = dnode.print_mem( + fmt='json', with_siblings=True, pretty=True, + keep_empty_containers=True, include_implicit_defaults=True + ) + return json_data + + def update(self, path : str, payload : Dict) -> str: + if not path.startswith('/'): path = '/' + path + # NOTE: client should provide identifier of element to be updated + dnode_parsed : Optional[libyang.DNode] = self._yang_context.parse_data_mem( + json.dumps(payload), 'json', strict=True, parse_only=False, + validate_present=True, validate_multi_error=True + ) + if dnode_parsed is None: raise Exception('Unable to parse Data({:s})'.format(str(payload))) + #LOGGER.info('parsed = {:s}'.format(json.dumps(dnode.print_dict()))) + + dnode = self._yang_context.create_data_path( + path, parent=self._datastore, value=dnode_parsed, update=True + ) + self._datastore.merge(dnode_parsed, with_siblings=True, defaults=True) + + json_data = dnode.print_mem( + fmt='json', with_siblings=True, pretty=True, + keep_empty_containers=True, include_implicit_defaults=True + ) + return json_data + + def delete(self, path : str) -> Optional[str]: + if not path.startswith('/'): path = '/' + path + + # NOTE: client should provide identifier of element to be deleted + + node : libyang.DNode = self._datastore.find_path(path) + if node is None: return None + + LOGGER.info('node = {:s}'.format(str(node))) + json_data = str(node.print_mem( + fmt='json', with_siblings=True, pretty=True, + keep_empty_containers=True, include_implicit_defaults=True + )) + LOGGER.info('json_data = {:s}'.format(json_data)) + + node.unlink() + node.free() + + return json_data diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/YangModelDiscoverer.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/YangModelDiscoverer.py new file mode 100644 index 000000000..f31305280 --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/YangModelDiscoverer.py @@ -0,0 +1,195 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging, re +from collections import defaultdict +from graphlib import TopologicalSorter, CycleError +from pathlib import Path +from typing import Dict, List, Optional, Set, Tuple + + +COMMENT_SINGLE_RE = re.compile(r"//.*?$", re.MULTILINE) +COMMENT_MULTI_RE = re.compile(r"/\*.*?\*/", re.DOTALL) + +# module / submodule name +MODNAME_RE = re.compile(r"\b(module|submodule)\s+([A-Za-z0-9_.-]+)\s*\{") + +# import foo { ... } (most common form) +IMPORT_BLOCK_RE = re.compile(r"\bimport\s+([A-Za-z0-9_.-]+)\s*\{", re.IGNORECASE) + +# import foo; (very rare, but we’ll support it) +IMPORT_SEMI_RE = re.compile(r"\bimport\s+([A-Za-z0-9_.-]+)\s*;", re.IGNORECASE) + + +def _parse_yang_file(path: Path) -> Tuple[Optional[str], Set[str]]: + path_stem = path.stem # file name without extension + expected_module_name = path_stem.split('@', 1)[0] + + try: + data = path.read_text(encoding='utf-8', errors='ignore') + except Exception: + data = path.read_bytes().decode('utf-8', errors='ignore') + + data = COMMENT_MULTI_RE.sub('', data) + data = COMMENT_SINGLE_RE.sub('', data) + + match = MODNAME_RE.search(data) + if match is None: + return None, set() + module_name = match.group(2) + if module_name != expected_module_name: + MSG = 'Module({:s}) mismatches its FileName({:s})' + raise Exception(MSG.format(str(module_name), str(expected_module_name))) + + module_imports = set() + if module_name is not None: + module_imports.update(IMPORT_BLOCK_RE.findall(data)) + module_imports.update(IMPORT_SEMI_RE.findall(data)) + + # ignore modules importing themselves, just in case + module_imports.discard(module_name) + + return module_name, module_imports + + +class YangModuleDiscoverer: + def __init__(self, yang_search_path : str) -> None: + self._yang_search_path = yang_search_path + + self._module_to_paths : Dict[str, List[Path]] = defaultdict(list) + self._module_to_imports : Dict[str, Set[str]] = defaultdict(set) + self._ordered_module_names : Optional[List[str]] = None + + + def run( + self, do_print_order : bool = False, do_log_order : bool = False, + logger : Optional[logging.Logger] = None, level : int = logging.INFO + ) -> List[str]: + if self._ordered_module_names is None: + self._scan_modules() + self._sort_modules() + + if do_print_order: + self.print_order() + + if do_log_order: + if logger is None: logger = logging.getLogger(__name__) + self.log_order(logger, level=level) + + return self._ordered_module_names + + def _scan_modules(self) -> None: + yang_root = Path(self._yang_search_path).resolve() + if not yang_root.exists(): + MSG = 'Path({:s}) not found' + raise Exception(MSG.format(str(self._yang_search_path))) + + for yang_path in yang_root.rglob('*.yang'): + module_name, module_imports = _parse_yang_file(yang_path) + if module_name is None: continue + self._module_to_paths[module_name].append(yang_path) + self._module_to_imports[module_name] = module_imports + + if len(self._module_to_paths) == 0: + MSG = 'No modules found in Path({:s})' + raise Exception(MSG.format(str(self._yang_search_path))) + + self._check_duplicated_module_declaration() + self._check_missing_modules() + + + def _check_duplicated_module_declaration(self) -> None: + duplicate_module_declarations : List[str] = list() + for module_name, paths in self._module_to_paths.items(): + if len(paths) == 1: continue + str_paths = [str(p) for p in paths] + duplicate_module_declarations.append( + ' {:s} => {:s}'.format(module_name, str_paths) + ) + + if len(duplicate_module_declarations) > 0: + MSG = 'Duplicate module declarations:\n{:s}' + str_dup_mods = '\n'.join(duplicate_module_declarations) + raise Exception(MSG.format(str_dup_mods)) + + + def _check_missing_modules(self) -> None: + local_module_names = set(self._module_to_imports.keys()) + missing_modules : List[str] = list() + for module_name, imported_modules in self._module_to_imports.items(): + missing = imported_modules.difference(local_module_names) + if len(missing) == 0: continue + missing_modules.append( + ' {:s} => {:s}'.format(module_name, str(missing)) + ) + + if len(missing_modules) > 0: + MSG = 'Missing modules:\n{:s}' + str_mis_mods = '\n'.join(missing_modules) + raise Exception(MSG.format(str_mis_mods)) + + + def _sort_modules(self) -> None: + ts = TopologicalSorter() + for module_name, imported_modules in self._module_to_imports.items(): + ts.add(module_name, *imported_modules) + + try: + self._ordered_module_names = list(ts.static_order()) # raises CycleError on cycles + except CycleError as e: + cycle = list(dict.fromkeys(e.args[1])) # de-dup while preserving order + MSG = 'Circular dependencies between modules: {:s}' + raise Exception(MSG.format(str(cycle))) # pylint: disable=raise-missing-from + + + def dump_order(self) -> List[Tuple[int, str, List[str]]]: + if self._ordered_module_names is None: + raise Exception('First process the YANG Modules running method .run()') + + module_order : List[Tuple[int, str, List[str]]] = list() + for i, module_name in enumerate(self._ordered_module_names, 1): + module_imports = sorted(self._module_to_imports[module_name]) + module_order.append((i, module_name, module_imports)) + + return module_order + + + def print_order(self) -> None: + print('Ordered Modules:') + for i, module_name, module_imports in self.dump_order(): + MSG = '{:2d} : {:s} => {:s}' + print(MSG.format(i, module_name, str(module_imports))) + + + def log_order(self, logger : logging.Logger, level : int = logging.INFO) -> None: + logger.log(level, 'Ordered Modules:') + for i, module_name, module_imports in self.dump_order(): + MSG = '{:2d} : {:s} => {:s}' + logger.log(level, MSG.format(i, module_name, str(module_imports))) + + +def main() -> None: + logging.basicConfig(level=logging.INFO) + + ymd = YangModuleDiscoverer('./yang') + ordered_module_names = ymd.run( + do_print_order=True, + do_log_order=True + ) + print('ordered_module_names', ordered_module_names) + + +if __name__ == '__main__': + main() diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/__init__.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/__init__.py new file mode 100644 index 000000000..3ccc21c7d --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/__main__.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/__main__.py new file mode 100644 index 000000000..2c84d92ef --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/__main__.py @@ -0,0 +1,26 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from .app import app + +BIND_ADDRESS = '0.0.0.0' +BIND_PORT = 8080 + +if __name__ == '__main__': + # Only used to run it locally during development stage; + # otherwise, app is directly launched by gunicorn. + app.run( + host=BIND_ADDRESS, port=BIND_PORT, debug=True, use_reloader=False + ) diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py new file mode 100644 index 000000000..48ac8061e --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py @@ -0,0 +1,70 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import json, logging, secrets +from flask import Flask +from flask_restful import Api +from .Dispatch import RestConfDispatch +from .HostMeta import HostMeta +from .YangHandler import YangHandler +from .YangModelDiscoverer import YangModuleDiscoverer + + +logging.basicConfig( + level=logging.INFO, + format="[Worker-%(process)d][%(asctime)s] %(levelname)s:%(name)s:%(message)s", +) +LOGGER = logging.getLogger(__name__) + + +RESTCONF_PREFIX = '/restconf' +SECRET_KEY = secrets.token_hex(64) + + +YANG_SEARCH_PATH = './yang' + +ymd = YangModuleDiscoverer(YANG_SEARCH_PATH) +YANG_MODULE_NAMES = ymd.run(do_log_order=True) + +STARTUP_FILE = './startup.json' +with open(STARTUP_FILE, mode='r', encoding='UTF-8') as fp: + YANG_STARTUP_DATA = json.loads(fp.read()) + + +yang_handler = YangHandler( + YANG_SEARCH_PATH, YANG_MODULE_NAMES, YANG_STARTUP_DATA +) +restconf_paths = yang_handler.get_module_paths() + +app = Flask(__name__) +app.config['SECRET_KEY'] = SECRET_KEY + +api = Api(app) +api.add_resource( + HostMeta, + '/.well-known/host-meta', + resource_class_args=(RESTCONF_PREFIX,) +) +api.add_resource( + RestConfDispatch, + RESTCONF_PREFIX + '/data', + RESTCONF_PREFIX + '/data/', + RESTCONF_PREFIX + '/data/', + resource_class_args=(yang_handler,) +) + +LOGGER.info('Available RESTCONF paths:') +for restconf_path in restconf_paths: + LOGGER.info('- {:s}'.format(str(restconf_path))) diff --git a/src/tests/tools/mock_nce_t_ctrl/requirements.in b/src/tests/tools/mock_nce_t_ctrl/requirements.in new file mode 100644 index 000000000..17155ed58 --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/requirements.in @@ -0,0 +1,25 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cryptography==39.0.1 +eventlet==0.39.0 +Flask-HTTPAuth==4.5.0 +Flask-RESTful==0.3.9 +Flask==2.1.3 +gunicorn==23.0.0 +jsonschema==4.4.0 +libyang==2.8.4 +pyopenssl==23.0.0 +requests==2.27.1 +werkzeug==2.3.7 diff --git a/src/tests/tools/mock_nce_t_ctrl/run_ctrl_gunicorn.sh b/src/tests/tools/mock_nce_t_ctrl/run_ctrl_gunicorn.sh new file mode 100755 index 000000000..593347cb8 --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/run_ctrl_gunicorn.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Make folder containing the script the root folder for its execution +cd $(dirname $0) + +export FLASK_ENV=development +gunicorn -w 1 --worker-class eventlet -b 0.0.0.0:8080 --log-level DEBUG nce_t_ctrl.app:app diff --git a/src/tests/tools/mock_nce_t_ctrl/run_ctrl_standalone.sh b/src/tests/tools/mock_nce_t_ctrl/run_ctrl_standalone.sh new file mode 100755 index 000000000..9b47a3e21 --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/run_ctrl_standalone.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Make folder containing the script the root folder for its execution +cd $(dirname $0) + +python -m nce_t_ctrl diff --git a/src/tests/tools/mock_nce_t_ctrl/startup.json b/src/tests/tools/mock_nce_t_ctrl/startup.json new file mode 100644 index 000000000..c88f00378 --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/startup.json @@ -0,0 +1,63 @@ +{ + "ietf-network:networks": { + "network": [ + { + "network-id": "admin", + "ietf-te-topology:te": { + "name": "admin" + }, + "network-types": { + "ietf-te-topology:te-topology": { + "ietf-otn-topology:otn-topology": {} + } + }, + "node": [ + { + "node-id": "O-PE1", "ietf-te-topology:te-node-id": "172.16.182.25", + "ietf-te-topology:te": {"te-node-attributes": {"otn-node": {}, "name": "O-PE1", "admin-status": "up"}, "oper-status": "up"}, + "ietf-network-topology:termination-point": [ + {"tp-id": "500", "ietf-te-topology:te": {"name": "500"}}, + {"tp-id": "501", "ietf-te-topology:te": {"name": "501"}}, + {"tp-id": "200", "ietf-te-topology:te": {"name": "200"}, "ietf-te-topology:te-tp-id": "128.32.33.254"} + ] + }, + { + "node-id": "O-P1", "ietf-te-topology:te-node-id": "172.16.185.31", + "ietf-te-topology:te": {"te-node-attributes": {"otn-node": {}, "name": "O-P1", "admin-status": "up"}, "oper-status": "up"}, + "ietf-network-topology:termination-point": [ + {"tp-id": "500", "ietf-te-topology:te": {"name": "500"}}, + {"tp-id": "501", "ietf-te-topology:te": {"name": "501"}} + ] + }, + { + "node-id": "O-P2", "ietf-te-topology:te-node-id": "172.16.185.33", + "ietf-te-topology:te": {"te-node-attributes": {"otn-node": {}, "name": "O-P2", "admin-status": "up"}, "oper-status": "up"}, + "ietf-network-topology:termination-point": [ + {"tp-id": "500", "ietf-te-topology:te": {"name": "500"}}, + {"tp-id": "501", "ietf-te-topology:te": {"name": "501"}} + ] + }, + { + "node-id": "O-PE2", "ietf-te-topology:te-node-id": "172.16.185.32", + "ietf-te-topology:te": {"te-node-attributes": {"otn-node": {}, "name": "O-PE2", "admin-status": "up"}, "oper-status": "up"}, + "ietf-network-topology:termination-point": [ + {"tp-id": "500", "ietf-te-topology:te": {"name": "500"}}, + {"tp-id": "501", "ietf-te-topology:te": {"name": "501"}}, + {"tp-id": "200", "ietf-te-topology:te": {"name": "200"}, "ietf-te-topology:te-tp-id": "128.32.33.254"} + ] + } + ], + "ietf-network-topology:link": [ + {"link-id": "L7ab", "source": {"source-node": "O-PE1", "source-tp": "501"}, "destination": {"dest-node": "O-P1", "dest-tp": "501"}}, + {"link-id": "L7ba", "source": {"source-node": "O-P1", "source-tp": "501"}, "destination": {"dest-node": "O-PE1", "dest-tp": "501"}}, + {"link-id": "L8ab", "source": {"source-node": "O-PE1", "source-tp": "500"}, "destination": {"dest-node": "O-P2", "dest-tp": "500"}}, + {"link-id": "L8ba", "source": {"source-node": "O-P2", "source-tp": "500"}, "destination": {"dest-node": "O-PE1", "dest-tp": "500"}}, + {"link-id": "L11ab", "source": {"source-node": "O-PE2", "source-tp": "500"}, "destination": {"dest-node": "O-P1", "dest-tp": "500"}}, + {"link-id": "L11ba", "source": {"source-node": "O-P1", "source-tp": "500"}, "destination": {"dest-node": "O-PE2", "dest-tp": "500"}}, + {"link-id": "L12ab", "source": {"source-node": "O-PE2", "source-tp": "501"}, "destination": {"dest-node": "O-P2", "dest-tp": "501"}}, + {"link-id": "L12ba", "source": {"source-node": "O-P2", "source-tp": "501"}, "destination": {"dest-node": "O-PE2", "dest-tp": "501"}} + ] + } + ] + } +} diff --git a/src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-ccamp-client-signal-yang-10/ietf-eth-tran-service.yang b/src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-ccamp-client-signal-yang-10/ietf-eth-tran-service.yang new file mode 100644 index 000000000..633d74715 --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-ccamp-client-signal-yang-10/ietf-eth-tran-service.yang @@ -0,0 +1,1010 @@ +module ietf-eth-tran-service { + yang-version 1.1; + namespace "urn:ietf:params:xml:ns:yang:ietf-eth-tran-service"; + + prefix "ethtsvc"; + import ietf-yang-types { + prefix "yang"; + reference "RFC 6991 - Common YANG Data Types"; + } + + import ietf-network { + prefix "nw"; + reference "RFC8345 - A YANG Data Model for Network Topologies"; + } + + import ietf-network-topology { + prefix "nt"; + reference "RFC8345 - A YANG Data Model for Network Topologies"; + } + + import ietf-te-types { + prefix "te-types"; + reference "RFC 8776 - Traffic Engineering Common YANG Types"; + } + + import ietf-eth-tran-types { + prefix "etht-types"; + reference "RFC XXXX - A YANG Data Model for Transport + Network Client Signals"; + } + + import ietf-routing-types { + prefix "rt-types"; + reference "RFC 8294 - Common YANG Data Types for the + Routing Area"; + + } + + import ietf-te { + prefix "te"; + reference "RFC YYYY - A YANG Data Model for Traffic + Engineering Tunnels and Interfaces"; + } + + organization + "Internet Engineering Task Force (IETF) CCAMP WG"; + contact + " + WG List: + + ID-draft editor: + Haomian Zheng (zhenghaomian@huawei.com); + Italo Busi (italo.busi@huawei.com); + Aihua Guo (aihuaguo.ietf@gmail.com); + Anton Snitser (antons@sedonasys.com);0 + Francesco Lazzeri (francesco.lazzeri@ericsson.com); + Yunbin Xu (xuyunbin@caict.ac.cn); + Yang Zhao (zhaoyangyjy@chinamobile.com); + Xufeng Liu (xufeng.liu.ietf@gmail.com); + Giuseppe Fioccola (giuseppe.fioccola@huawei.com); + Chaode Yu (yuchaode@huawei.com) + "; + + description + "This module defines a YANG data model for describing + the Ethernet services. The model fully conforms to the + Network Management Datastore Architecture (NMDA). + + Copyright (c) 2021 IETF Trust and the persons + identified as authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + This version of this YANG module is part of RFC XXXX; see + the RFC itself for full legal notices."; + + revision 2023-10-23 { + description + "version -04 as an WG document"; + reference + "draft-ietf-ccamp-client-signal-yang"; + } + + /* + * Groupings + */ + + grouping vlan-classification { + description + "A grouping which represents classification + on an 802.1Q VLAN tag."; + + leaf tag-type { + type etht-types:eth-tag-classify; + description + "The tag type used for VLAN classification."; + } + choice individual-bundling-vlan { + description + "VLAN based classification can be individual + or bundling."; + + case individual-vlan { + leaf vlan-value { + type etht-types:vlanid; + description + "VLAN ID value."; + } + } + + case vlan-bundling { + leaf vlan-range { + type etht-types:vid-range-type; + description + "List of VLAN ID values."; + } + } + } + } + + grouping vlan-write { + description + "A grouping which represents push/pop operations + of an 802.1Q VLAN tag."; + + leaf tag-type { + type etht-types:eth-tag-type; + description + "The VLAN tag type to push/swap."; + } + leaf vlan-value { + type etht-types:vlanid; + description + "The VLAN ID value to push/swap."; + } +/* + * To be added: this attribute is used when: + * a) the ETH service has only one CoS (as in current version) + * b) as a default when a mapping between a given CoS value + * and the PCP value is not defined (in future versions) + */ + leaf default-pcp { + type uint8 { + range "0..7"; + } + description + "The default Priority Code Point (PCP) value to push/swap"; + } + } + + grouping vlan-operations { + description + "A grouping which represents VLAN operations."; + + leaf pop-tags { + type uint8 { + range "1..2"; + } + description + "The number of VLAN tags to pop (or swap if used in + conjunction with push-tags)"; + } + container push-tags { + description + "The VLAN tags to push (or swap if used in + conjunction with pop-tags)"; + + container outer-tag { + presence + "Indicates existence of the outermost VLAN tag to + push/swap"; + + description + "The outermost VLAN tag to push/swap."; + + uses vlan-write; + } + container second-tag { + must + '../outer-tag/tag-type = "etht-types:s-vlan-tag-type" and ' + + 'tag-type = "etht-types:c-vlan-tag-type"' + { + + error-message + " + When pushing/swapping two tags, the outermost tag must + be specified and of S-VLAN type and the second + outermost tag must be of C-VLAN tag type. + "; + description + " + For IEEE 802.1Q interoperability, when pushing/swapping + two tags, it is required that the outermost tag exists + and is an S-VLAN, and the second outermost tag is a + C-VLAN. + "; + } + + presence + "Indicates existence of a second outermost VLAN tag to + push/swap"; + + description + "The second outermost VLAN tag to push/swap."; + uses vlan-write; + } + } + } + + grouping named-or-value-bandwidth-profile { + description + "A grouping to configure a bandwdith profile either by + referencing a named bandwidth profile or by + configuring the values of the bandwidth profile attributes."; + choice style { + description + "Whether the bandwidth profile is named or defined by value"; + + case named { + description + "Named bandwidth profile."; + leaf bandwidth-profile-name { + type leafref { + path "/ethtsvc:etht-svc/ethtsvc:globals/" + + "ethtsvc:named-bandwidth-profiles/" + + "ethtsvc:bandwidth-profile-name"; + } + description + "Name of the bandwidth profile."; + } + } + case value { + description + "Bandwidth profile configured by value."; + uses etht-types:etht-bandwidth-profiles; + } + } + } + + grouping bandwidth-profiles { + description + "A grouping which represent bandwidth profile configuration."; + + choice direction { + description + "Whether the bandwidth profiles are symmetrical or + asymmetrical"; + case symmetrical { + description + "The same bandwidth profile is used to describe both + the ingress and the egress bandwidth profile."; + container ingress-egress-bandwidth-profile { + description + "The bandwdith profile used in both directions."; + uses named-or-value-bandwidth-profile; + } + } + case asymmetrical { + description + "Ingress and egress bandwidth profiles can be specified."; + container ingress-bandwidth-profile { + description + "The bandwdith profile used in the ingress direction."; + uses named-or-value-bandwidth-profile; + } + container egress-bandwidth-profile { + description + "The bandwdith profile used in the egress direction."; + uses named-or-value-bandwidth-profile; + } + } + } + } + + grouping etht-svc-access-parameters { + description + "ETH services access parameters"; + + leaf access-node-id { + type te-types:te-node-id; + description + "The identifier of the access node in + the ETH TE topology."; + } + + leaf access-node-uri { + type nw:node-id; + description + "The identifier of the access node in the network."; + } + + leaf access-ltp-id { + type te-types:te-tp-id; + description + "The TE link termination point identifier, used + together with access-node-id to identify the + access LTP."; + } + + leaf access-ltp-uri { + type nt:tp-id; + description + "The link termination point identifier in network topology, + used together with access-node-uri to identify the + access LTP."; + } + + leaf access-role { + type identityref { + base etht-types:access-role; + } + description + "Indicate the role of access, e.g., working or protection. "; + } + + container pm-config { + uses pm-config-grouping; + description + "This grouping is used to set the threshold value for + performance monitoring. "; + } + + container state { + config false; + description + "The state is used to monitor the status of service. "; + leaf operational-state { + type identityref { + base te-types:tunnel-state-type; + } + description + "Indicating the operational state of client signal. "; + } + leaf provisioning-state { + type identityref { + base te-types:lsp-state-type; + } + description + "Indicating the provisional state of client signal, + especially when there is a change, i.e., revise, create. "; + } + } + + leaf performance { + type identityref { + base etht-types:performance; + } + config false; + description + "Performance Monitoring for the service. "; + } + + } + + grouping etht-svc-tunnel-parameters { + description + "ETH services tunnel parameters."; + choice technology { + description + "Service multiplexing is optional and flexible."; + + case native-ethernet { + /* + placeholder to support proprietary multiplexing + (for further discussion) + */ + list eth-tunnels { + key name; + description + "ETH Tunnel list in native Ethernet scenario."; + uses tunnels-grouping; + } + } + + case frame-base { + list otn-tunnels { + key name; + description + "OTN Tunnel list in Frame-based scenario."; + uses tunnels-grouping; + } + } + + case mpls-tp { + container pw { + description + "Pseudowire information for Ethernet over MPLS-TP."; + uses pw-segment-grouping; + } + } + } + +/* + * Open issue: can we constraints it to be used only with mp services? + */ + leaf src-split-horizon-group { + type string; + description + "Identify a split horizon group at the Tunnel source TTP"; + } + leaf dst-split-horizon-group { + type string; + description + "Identify a split horizon group at the Tunnel destination TTP"; + } + } + + grouping etht-svc-pm-threshold-config { + description + "Configuraiton parameters for Ethernet service PM thresholds."; + + leaf sending-rate-high { + type uint64; + description + "High threshold of packet sending rate in kbps."; + } + leaf sending-rate-low { + type uint64; + description + "Low threshold of packet sending rate in kbps."; + } + leaf receiving-rate-high { + type uint64; + description + "High threshold of packet receiving rate in kbps."; + } + leaf receiving-rate-low { + type uint64; + description + "Low threshold of packet receiving rate in kbps."; + } + } + + grouping etht-svc-pm-stats { + description + "Ethernet service PM statistics."; + + leaf sending-rate-too-high { + type uint32; + description + "Counter that indicates the number of times the + sending rate is above the high threshold"; + } + leaf sending-rate-too-low { + type uint32; + description + "Counter that indicates the number of times the + sending rate is below the low threshold"; + } + leaf receiving-rate-too-high { + type uint32; + description + "Counter that indicates the number of times the + receiving rate is above the high threshold"; + } + leaf receiving-rate-too-low { + type uint32; + description + "Counter that indicates the number of times the + receiving rate is below the low threshold"; + } + } + + grouping etht-svc-instance-config { + description + "Configuraiton parameters for Ethernet services."; + + leaf etht-svc-name { + type string; + description + "Name of the ETH service."; + } + + leaf etht-svc-title { + type string; + description + "The Identifier of the ETH service."; + } + + leaf user-label { + type string; + description + "Alias of the ETH service."; + } + + leaf etht-svc-descr { + type string; + description + "Description of the ETH service."; + } + + leaf etht-svc-customer { + type string; + description + "Customer of the ETH service."; + } + + leaf etht-svc-type { + type etht-types:service-type; + description + "Type of ETH service (p2p, mp2mp or rmp)."; + /* Add default as p2p */ + } + + leaf etht-svc-lifecycle { + type etht-types:lifecycle-status; + description + "Lifecycle state of ETH service."; + /* Add default as installed */ + } + uses te-types:te-topology-identifier; + + uses resilience-grouping; + list etht-svc-end-points { + key etht-svc-end-point-name; + description + "The logical end point for the ETH service. "; + uses etht-svc-end-point-grouping; + } + + + container alarm-shreshold { + description "threshold configuration for the E2E client signal"; + uses alarm-shreshold-grouping; + } + + container underlay { + description + "The unterlay tunnel information that carrying the + ETH service. "; + uses etht-svc-tunnel-parameters; + } + + leaf admin-status { + type identityref { + base te-types:tunnel-admin-state-type; + } + default te-types:tunnel-admin-state-up; + description "ETH service administrative state."; + } + } + + grouping etht-svc-instance-state { + description + "State parameters for Ethernet services."; + + leaf operational-state { + type identityref { + base te-types:tunnel-state-type; + } + default te-types:tunnel-state-up; + description "ETH service operational state."; + } + leaf provisioning-state { + type identityref { + base te-types:lsp-state-type; + } + description "ETH service provisioning state."; + } + leaf creation-time { + type yang:date-and-time; + description + "Time of ETH service creation."; + } + leaf last-updated-time { + type yang:date-and-time; + description + "Time of ETH service last update."; + } + + leaf created-by { + type string; + description + "The client signal is created by whom, + can be a system or staff ID."; + } + leaf last-updated-by { + type string; + description + "The client signal is last updated by whom, + can be a system or staff ID."; + } + leaf owned-by { + type string; + description + "The client signal is last updated by whom, + can be a system ID."; + } + container pm-state { + description + "PM data of E2E Ethernet service"; + uses pm-state-grouping; + } + container error-info { + description "error messages of configuration"; + uses error-info-grouping; + } + } + + grouping pm-state-grouping { + leaf latency { + description + "latency value of the E2E Ethernet service"; + type uint32; + units microsecond; + } + } + + grouping error-info-grouping { + leaf error-code { + description "error code"; + type uint16; + } + + leaf error-description { + description "detail message of error"; + type string; + } + + leaf error-timestamp { + description "the date and time error is happened"; + type yang:date-and-time; + } + } + + grouping alarm-shreshold-grouping { + leaf latency-threshold { + description "a threshold for the E2E client signal service's + latency. Once the latency value exceed this threshold, an alarm + should be triggered."; + type uint32; + units microsecond; + } + } + + /* + * Data nodes + */ + + container etht-svc { + description + "ETH services."; + + container globals { + description + "Globals Ethernet configuration data container"; + list named-bandwidth-profiles { + key bandwidth-profile-name; + description + "List of named bandwidth profiles used by + Ethernet services."; + + leaf bandwidth-profile-name { + type string; + description + "Name of the bandwidth profile."; + } + uses etht-types:etht-bandwidth-profiles; + } + } + + list etht-svc-instances { + key etht-svc-name; + description + "The list of p2p ETH service instances"; + + uses etht-svc-instance-config; + + container state { + config false; + description + "Ethernet Service states."; + + uses etht-svc-instance-state; + } + } + } + + grouping resilience-grouping { + description + "Grouping for resilience configuration. "; + container resilience { + description + "To configure the data plane protection parameters, + currently a placeholder only, future candidate attributes + include, Revert, WTR, Hold-off Timer, ..."; + uses te:protection-restoration-properties; + } + } + + grouping etht-svc-end-point-grouping { + description + "Grouping for the end point configuration."; + leaf etht-svc-end-point-name { + type string; + description + "The name of the logical end point of ETH service. "; + } + + leaf etht-svc-end-point-id { + type string; + description + "The identifier of the logical end point of ETH service."; + } + + leaf etht-svc-end-point-descr { + type string; + description + "The description of the logical end point of ETH service. "; + } + + leaf topology-role { + type identityref { + base etht-types:topology-role; + } + description + "Indicating the underlay topology role, + e.g., hub,spoke, any-to-any "; + } + + container resilience { + description + "Placeholder for resilience configuration, for future study. "; + } + + list etht-svc-access-points { + key access-point-id; + min-elements "1"; +/* + Open Issue: + Is it possible to limit the max-elements only for p2p services? + max-elements "2"; +*/ + description + "List of the ETH trasport services access point instances."; + + leaf access-point-id { + type string; + description + "ID of the service access point instance"; + } + uses etht-svc-access-parameters; + } + + leaf service-classification-type { + type identityref { + base etht-types:service-classification-type; + } + description + "Service classification type."; + } + + choice service-classification { + description + "Access classification can be port-based or + VLAN based."; + + case port-classification { + /* no additional information */ + } + + case vlan-classification { + container outer-tag { + presence "The outermost VLAN tag exists"; + description + "Classifies traffic using the outermost VLAN tag."; + + uses vlan-classification; + } + container second-tag { + must + '../outer-tag/tag-type = "etht-types:classify-s-vlan" and ' + + 'tag-type = "etht-types:classify-c-vlan"' + { + error-message + " + When matching two tags, the outermost tag must be + specified and of S-VLAN type and the second + outermost tag must be of C-VLAN tag type. + "; + description + " + For IEEE 802.1Q interoperability, when matching two + tags, it is required that the outermost tag exists + and is an S-VLAN, and the second outermost tag is a + C-VLAN. + "; + } + presence "The second outermost VLAN tag exists"; + + description + "Classifies traffic using the second outermost VLAN tag."; + + uses vlan-classification; + } + } + } + +/* + * Open issue: can we constraints it to be used only with mp services? + */ + leaf split-horizon-group { + type string; + description "Identify a split horizon group"; + } + + uses bandwidth-profiles; + + container vlan-operations { + description + "Configuration of VLAN operations."; + choice direction { + description + "Whether the VLAN operations are symmetrical or + asymmetrical"; + case symmetrical { + container symmetrical-operation { + uses vlan-operations; + description + "Symmetrical operations. + Expressed in the ingress direction, but + the reverse operation is applied to egress traffic"; + } + } + case asymmetrical { + container asymmetrical-operation { + description "Asymmetrical operations"; + container ingress { + uses vlan-operations; + description "Ingress operations"; + } + container egress { + uses vlan-operations; + description "Egress operations"; + } + } + } + } + } + } + + grouping pm-config-grouping { + description + "Grouping used for Performance Monitoring Configuration. "; + leaf pm-enable { + type boolean; + description + "Whether to enable the performance monitoring."; + } + + leaf sending-rate-high { + type uint64; + description + "The upperbound of sending rate."; + } + + leaf sending-rate-low { + type uint64; + description + "The lowerbound of sending rate."; + } + + leaf receiving-rate-high { + type uint64; + description + "The upperbound of receiving rate."; + } + + leaf receiving-rate-low { + type uint64; + description + "The lowerbound of receiving rate."; + } + } + + grouping pw-segment-grouping { + description + "Grouping used for PW configuration. "; + leaf pw-id { + type string; + description + "The Identifier information of pseudowire. "; + } + + leaf pw-name { + type string; + description + "The name information of pseudowire."; + } + + leaf transmit-label { + type rt-types:mpls-label; + description + "Transmit label information in PW. "; + } + + leaf receive-label { + type rt-types:mpls-label; + description + "Receive label information in PW. "; + } + + leaf encapsulation-type { + type identityref { + base etht-types:encapsulation-type; + } + description + "The encapsulation type, raw or tag. "; + } + + leaf oper-status { + type identityref { + base te-types:tunnel-state-type; + } + config false; + description + "The operational state of the PW segment. "; + } + + container ingress-bandwidth-profile { + description + "Bandwidth Profile for ingress. "; + uses pw-segment-named-or-value-bandwidth-profile; + } + + list pw-paths { + key path-id; + description + "A list of pw paths. "; + + leaf path-id { + type uint8; + description + "The identifier of pw paths. "; + + } + + list tp-tunnels { + key name; + description + "Names of TP Tunnel underlay"; + leaf name { + type string; + description + "Names of TP Tunnel underlay"; + } + } + } + + } + + grouping pw-segment-named-or-value-bandwidth-profile { + description + "A grouping to configure a bandwdith profile either by + referencing a named bandwidth profile or by + configuring the values of the bandwidth profile attributes."; + choice style { + description + "Whether the bandwidth profile is named or defined by value"; + case named { + description + "Named bandwidth profile."; + leaf bandwidth-profile-name { + type leafref { + path "/ethtsvc:etht-svc/ethtsvc:globals/" + + "ethtsvc:named-bandwidth-profiles/" + + "ethtsvc:bandwidth-profile-name"; + } + description + "Name of the bandwidth profile."; + } + } + case value { + description + "Bandwidth profile configured by value."; + uses etht-types:pw-segement-bandwidth-profile-grouping; + } + } + } + + grouping tunnels-grouping { + description + "A group of tunnels. "; + leaf name { + type leafref { + path "/te:te/te:tunnels/te:tunnel/te:name"; + require-instance false; + } + description "Dependency tunnel name"; + } + leaf encoding { + type identityref { + base te-types:lsp-encoding-types; + } + description "LSP encoding type"; + reference "RFC3945"; + } + leaf switching-type { + type identityref { + base te-types:switching-capabilities; + } + description "LSP switching type"; + reference "RFC3945"; + } + } +} diff --git a/src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-ccamp-client-signal-yang-10/ietf-eth-tran-types.yang b/src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-ccamp-client-signal-yang-10/ietf-eth-tran-types.yang new file mode 100644 index 000000000..3d152c058 --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-ccamp-client-signal-yang-10/ietf-eth-tran-types.yang @@ -0,0 +1,460 @@ +module ietf-eth-tran-types { + yang-version 1.1; + namespace "urn:ietf:params:xml:ns:yang:ietf-eth-tran-types"; + + prefix "etht-types"; + + organization + "Internet Engineering Task Force (IETF) CCAMP WG"; + contact + " + WG List: + + ID-draft editor: + Haomian Zheng (zhenghaomian@huawei.com); + Italo Busi (italo.busi@huawei.com); + Aihua Guo (aihuaguo.ietf@gmail.com); + Anton Snitser (antons@sedonasys.com); + Francesco Lazzeri (francesco.lazzeri@ericsson.com); + Yunbin Xu (xuyunbin@caict.ac.cn); + Yang Zhao (zhaoyangyjy@chinamobile.com); + Xufeng Liu (xufeng.liu.ietf@gmail.com); + Giuseppe Fioccola (giuseppe.fioccola@huawei.com); + Chaode Yu (yuchaode@huawei.com) + "; + + description + "This module defines the ETH types. + The model fully conforms to the Network Management + Datastore Architecture (NMDA). + + Copyright (c) 2019 IETF Trust and the persons + identified as authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + This version of this YANG module is part of RFC XXXX; see + the RFC itself for full legal notices."; + + revision 2023-10-23 { + description + "version -05 as a WG draft"; + reference + "draft-ietf-ccamp-client-signal-yang"; + } + + /* + * Identities + */ + + identity eth-vlan-tag-type { + description + "ETH VLAN tag type."; + } + + identity c-vlan-tag-type { + base eth-vlan-tag-type; + description + "802.1Q Customer VLAN"; + } + + identity s-vlan-tag-type { + base eth-vlan-tag-type; + description + "802.1Q Service VLAN (QinQ)"; + } + + identity service-classification-type { + description + "Service classification."; + } + + identity port-classification { + base service-classification-type; + description + "Port classification."; + } + + identity vlan-classification { + base service-classification-type; + description + "VLAN classification."; + } + + identity eth-vlan-tag-classify { + description + "VLAN tag classification."; + } + + identity classify-c-vlan { + base eth-vlan-tag-classify; + description + "Classify 802.1Q Customer VLAN tag. + Only C-tag type is accepted"; + } + + identity classify-s-vlan { + base eth-vlan-tag-classify; + description + "Classify 802.1Q Service VLAN (QinQ) tag. + Only S-tag type is accepted"; + } + + identity classify-s-or-c-vlan { + base eth-vlan-tag-classify; + description + "Classify S-VLAN or C-VLAN tag-classify. + Either tag is accepted"; + } + + identity bandwidth-profile-type { + description + "Bandwidth Profile Types"; + } + + identity mef-10-bwp { + base bandwidth-profile-type; + description + "MEF 10 Bandwidth Profile"; + } + + identity rfc-2697-bwp { + base bandwidth-profile-type; + description + "RFC 2697 Bandwidth Profile"; + } + + identity rfc-2698-bwp { + base bandwidth-profile-type; + description + "RFC 2698 Bandwidth Profile"; + } + + identity rfc-4115-bwp { + base bandwidth-profile-type; + description + "RFC 4115 Bandwidth Profile"; + } + + identity service-type { + description + "Type of Ethernet service."; + } + + identity p2p-svc { + base service-type; + description + "Ethernet point-to-point service (EPL, EVPL)."; + } + + identity rmp-svc { + base service-type; + description + "Ethernet rooted-multitpoint service (E-TREE, EP-TREE)."; + } + + identity mp2mp-svc { + base service-type; + description + "Ethernet multipoint-to-multitpoint service (E-LAN, EP-LAN)."; + } + + identity lifecycle-status { + description + "Lifecycle Status."; + } + + identity installed { + base lifecycle-status; + description + "Installed."; + } + + identity planned { + base lifecycle-status; + description + "Planned."; + } + + identity pending-removal { + base lifecycle-status; + description + "Pending Removal."; + } + + /* + * Type Definitions + */ + + typedef eth-tag-type { + type identityref { + base eth-vlan-tag-type; + } + description + "Identifies a specific ETH VLAN tag type."; + } + + typedef eth-tag-classify { + type identityref { + base eth-vlan-tag-classify; + } + description + "Identifies a specific VLAN tag classification."; + } + + typedef vlanid { + type uint16 { + range "1..4094"; + } + description + "The 12-bit VLAN-ID used in the VLAN Tag header."; + } + + typedef vid-range-type { + type string { + pattern "([1-9][0-9]{0,3}(-[1-9][0-9]{0,3})?" + + "(,[1-9][0-9]{0,3}(-[1-9][0-9]{0,3})?)*)"; + } + description + "A list of VLAN Ids, or non overlapping VLAN ranges, in + ascending order, between 1 and 4094. + This type is used to match an ordered list of VLAN Ids, or + contiguous ranges of VLAN Ids. Valid VLAN Ids must be in the + range 1 to 4094, and included in the list in non overlapping + ascending order. + + For example: 1,10-100,50,500-1000"; + } + + typedef bandwidth-profile-type { + type identityref { + base bandwidth-profile-type; + } + description + "Identifies a specific Bandwidth Profile type."; + } + + typedef service-type { + type identityref { + base service-type; + } + description + "Identifies the type of Ethernet service."; + } + + typedef lifecycle-status { + type identityref { + base lifecycle-status; + } + description + "Identifies the lLifecycle Status ."; + } + + /* + * Grouping Definitions + */ + + grouping etht-bandwidth-profiles { + description + "Bandwidth profile configuration paramters."; + + leaf bandwidth-profile-type { + type etht-types:bandwidth-profile-type; + description + "The type of bandwidth profile."; + } + leaf CIR { + type uint64; + description + "Committed Information Rate in Kbps"; + } + leaf CBS { + type uint64; + description + "Committed Burst Size in in KBytes"; + } + leaf EIR { + type uint64; + /* Need to indicate that EIR is not supported by RFC 2697 + + must + '../bw-profile-type = "mef-10-bwp" or ' + + '../bw-profile-type = "rfc-2698-bwp" or ' + + '../bw-profile-type = "rfc-4115-bwp"' + + must + '../bw-profile-type != "rfc-2697-bwp"' + */ + description + "Excess Information Rate in Kbps + In case of RFC 2698, PIR = CIR + EIR"; + } + leaf EBS { + type uint64; + description + "Excess Burst Size in KBytes. + In case of RFC 2698, PBS = CBS + EBS"; + } + leaf color-aware { + type boolean; + description + "Indicates weather the color-mode is + color-aware or color-blind."; + } + leaf coupling-flag { + type boolean; + /* Need to indicate that Coupling Flag is defined only for MEF 10 + + must + '../bw-profile-type = "mef-10-bwp"' + */ + description + "Coupling Flag."; + } + } + + identity topology-role { + description + "The role of underlay topology: e.g., hub, spoke, + any-to-any."; + } + + identity resilience { + description + "Placeholder for resilience information in data plane, + for future study. "; + } + + identity access-role { + description + "Indicating whether the access is a working or protection access."; + } + + identity root-primary { + base access-role; + description + "Designates the primary root UNI of an E-Tree service, and may also + designates the UNI access role of E-LINE and E-LAN service."; + } + + identity root-backup { + base access-role; + description + "Designates the backup root UNI of an E-Tree service."; + } + + identity leaf-access { + base access-role; + description + "Designates the leaf UNI of an E-Tree service."; + } + + identity leaf-edge { + base access-role; + description ""; + } + + identity performance { + description + "Placeholder for performance information, for future study."; + } + + identity encapsulation-type { + description + "Indicating how the service is encapsulated (to PW), e.g, raw or tag. "; + } + grouping pw-segement-bandwidth-profile-grouping { + description + "bandwidth profile grouping for PW segment. "; + leaf bandwidth-profile-type { + type etht-types:bandwidth-profile-type; + description + "The type of bandwidth profile."; + } + leaf CIR { + type uint64; + description + "Committed Information Rate in Kbps"; + } + leaf CBS { + type uint64; + description + "Committed Burst Size in in KBytes"; + } + leaf EIR { + type uint64; + /* Need to indicate that EIR is not supported by RFC 2697 + + must + '../bw-profile-type = "mef-10-bwp" or ' + + '../bw-profile-type = "rfc-2698-bwp" or ' + + '../bw-profile-type = "rfc-4115-bwp"' + + must + '../bw-profile-type != "rfc-2697-bwp"' + */ + description + "Excess Information Rate in Kbps + In case of RFC 2698, PIR = CIR + EIR"; + } + leaf EBS { + type uint64; + description + "Excess Burst Size in KBytes. + In case of RFC 2698, PBS = CBS + EBS"; + } + } + grouping eth-bandwidth { + description + "Available bandwith for ethernet."; + leaf eth-bandwidth { + type uint64{ + range "0..10000000000"; + } + units "Kbps"; + description + "Available bandwith value expressed in kilobits per second"; + } + } + + grouping eth-label-restriction { + description + "Label Restriction for ethernet."; + leaf tag-type { + type etht-types:eth-tag-type; + description "VLAN tag type."; + } + leaf priority { + type uint8; + description "priority."; + } + } + grouping eth-label { + description + "Label for ethernet."; + leaf vlanid { + type etht-types:vlanid; + description + "VLAN tag id."; + } + } + + grouping eth-label-step { + description "Label step for Ethernet VLAN"; + leaf eth-step { + type uint16 { + range "1..4095"; + } + default 1; + description + "Label step which represent possible increments for + an Ethernet VLAN tag."; + reference + "IEEE 802.1ad: Provider Bridges."; + } + } +} diff --git a/src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-ccamp-client-signal-yang-10/ietf-trans-client-service.yang b/src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-ccamp-client-signal-yang-10/ietf-trans-client-service.yang new file mode 100644 index 000000000..f84cae94c --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-ccamp-client-signal-yang-10/ietf-trans-client-service.yang @@ -0,0 +1,325 @@ +module ietf-trans-client-service { + /* TODO: FIXME */ + yang-version 1.1; + + namespace "urn:ietf:params:xml:ns:yang:ietf-trans-client-service"; + prefix "clntsvc"; + + import ietf-network { + prefix "nw"; + reference "RFC8345 - A YANG Data Model for Network Topologies"; + } + + import ietf-network-topology { + prefix "nt"; + reference "RFC8345 - A YANG Data Model for Network Topologies"; + } + + import ietf-te-types { + prefix "te-types"; + reference "RFC 8776 - Traffic Engineering Common YANG Types"; + } + + import ietf-layer1-types { + prefix "layer1-types"; + reference "RFC ZZZZ - A YANG Data Model for Layer 1 Types"; + } + + import ietf-yang-types { + prefix "yang"; + reference "RFC 6991 - Common YANG Data Types"; + } + + import ietf-trans-client-svc-types { + prefix "clntsvc-types"; + reference "RFC XXXX - A YANG Data Model for + Transport Network Client Signals"; + } + + organization + "Internet Engineering Task Force (IETF) CCAMP WG"; + contact + " + ID-draft editor: + Haomian Zheng (zhenghaomian@huawei.com); + Aihua Guo (aihuaguo.ietf@gmail.com); + Italo Busi (italo.busi@huawei.com); + Anton Snitser (antons@sedonasys.com); + Francesco Lazzeri (francesco.lazzeri@ericsson.com); + Yunbin Xu (xuyunbin@caict.ac.cn); + Yang Zhao (zhaoyangyjy@chinamobile.com); + Xufeng Liu (Xufeng_Liu@jabil.com); + Giuseppe Fioccola (giuseppe.fioccola@huawei.com); + Chaode Yu (yuchaode@huawei.com); + "; + + description + "This module defines a YANG data model for describing + transport network client services. The model fully conforms + to the Network Management Datastore Architecture (NMDA). + + Copyright (c) 2021 IETF Trust and the persons + identified as authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + This version of this YANG module is part of RFC XXXX; see + the RFC itself for full legal notices."; + revision 2023-10-23 { + description + "version -04 as a WG document"; + reference + "draft-ietf-ccamp-client-signal-yang"; + } + + /* + * Groupings + */ + grouping client-svc-access-parameters { + description + "Transport network client signals access parameters"; + + leaf access-node-id { + type te-types:te-node-id; + description + "The identifier of the access node in the TE topology."; + } + + leaf access-node-uri { + type nw:node-id; + description + "The identifier of the access node in the network."; + } + + leaf access-ltp-id { + type te-types:te-tp-id; + description + "The TE link termination point identifier in TE topology, used + together with access-node-id to identify the access LTP."; + } + + leaf access-ltp-uri { + type nt:tp-id; + description + "The link termination point identifier in network topology, + used together with access-node-uri to identify the access LTP"; + } + + leaf client-signal { + type identityref { + base layer1-types:client-signal; + } + description + "Identify the client signal type associated with this port"; + } + + } + + grouping pm-state-grouping { + leaf latency { + description "latency value of the E2E client signal service"; + type uint32; + units microsecond; + } + } + + grouping error-info-grouping { + leaf error-code { + description "error code"; + type uint16; + } + + leaf error-description { + description "detail message of error"; + type string; + } + + leaf error-timestamp { + description "the date and time error is happened"; + type yang:date-and-time; + } + } + + grouping alarm-shreshold-grouping { + leaf latency-threshold { + description "a threshold for the E2E client signal service's + latency. Once the latency value exceed this threshold, an alarm + should be triggered."; + type uint32; + units microsecond; + } + } + + grouping client-svc-tunnel-parameters { + description + "Transport network client signals tunnel parameters"; + + leaf tunnel-name { + type string; + description + "TE tunnel instance name."; + } + } + + grouping client-svc-instance-config { + description + "Configuration parameters for client services."; + leaf client-svc-name { + type string; + description + "Identifier of the p2p transport network client signals."; + } + + leaf client-svc-title { + type string; + description + "Name of the p2p transport network client signals."; + } + + leaf user-label { + type string; + description + "Alias of the p2p transport network client signals."; + } + + leaf client-svc-descr { + type string; + description + "Description of the transport network client signals."; + } + + leaf client-svc-customer { + type string; + description + "Customer of the transport network client signals."; + } + + container resilience { + description "Place holder for resilience functionalities"; + } + + uses te-types:te-topology-identifier; + + leaf admin-status { + type identityref { + base te-types:tunnel-admin-state-type; + } + default te-types:tunnel-admin-state-up; + description "Client signals administrative state."; + } + + container src-access-ports { + description + "Source access port of a client signal."; + uses client-svc-access-parameters; + } + container dst-access-ports { + description + "Destination access port of a client signal."; + uses client-svc-access-parameters; + } + + container pm-state { + config false; + description "PM data of E2E client signal"; + uses pm-state-grouping; + } + + container error-info { + config false; + description "error messages of configuration"; + uses error-info-grouping; + } + + container alarm-shreshold { + description "threshold configuration for the E2E client signal"; + uses alarm-shreshold-grouping; + } + + leaf direction { + type identityref { + base clntsvc-types:direction; + } + description "Uni-dir or Bi-dir for the client signal."; + } + + list svc-tunnels { + key tunnel-name; + description + "List of the TE Tunnels supporting the client signal."; + uses client-svc-tunnel-parameters; + } + } + + grouping client-svc-instance-state { + description + "State parameters for client services."; + leaf operational-state { + type identityref { + base te-types:tunnel-state-type; + } + config false; + description "Client signal operational state."; + } + leaf provisioning-state { + type identityref { + base te-types:lsp-state-type; + } + config false; + description "Client signal provisioning state."; + } + leaf creation-time { + type yang:date-and-time; + config false; + description "The time of the client signal be created."; + } + leaf last-updated-time { + type yang:date-and-time; + config false; + description "The time of the client signal's latest update."; + } + leaf created-by { + type string; + config false; + description + "The client signal is created by whom, + can be a system or staff ID."; + } + leaf last-updated-by { + type string; + config false; + description + "The client signal is last updated by whom, + can be a system or staff ID."; + } + leaf owned-by { + type string; + config false; + description + "The client signal is owned by whom, + can be a system ID."; + } + } + + /* + * Data nodes + */ + + container client-svc { + description + "Transport client services."; + + list client-svc-instances { + key client-svc-name; + description + "The list of p2p transport client service instances"; + + uses client-svc-instance-config; + uses client-svc-instance-state; + } + } +} diff --git a/src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-ccamp-client-signal-yang-10/ietf-trans-client-svc-types.yang b/src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-ccamp-client-signal-yang-10/ietf-trans-client-svc-types.yang new file mode 100644 index 000000000..925511735 --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-ccamp-client-signal-yang-10/ietf-trans-client-svc-types.yang @@ -0,0 +1,63 @@ +module ietf-trans-client-svc-types { + namespace "urn:ietf:params:xml:ns:yang:ietf-trans-client-svc-types"; + prefix "clntsvc-types"; + + organization + "Internet Engineering Task Force (IETF) CCAMP WG"; + contact + " + ID-draft editor: + Haomian Zheng (zhenghaomian@huawei.com); + Aihua Guo (aihuaguo.ietf@gmail.com); + Italo Busi (italo.busi@huawei.com); + Anton Snitser (antons@sedonasys.com); + Francesco Lazzeri (francesco.lazzeri@ericsson.com); + Yunbin Xu (xuyunbin@caict.ac.cn); + Yang Zhao (zhaoyangyjy@chinamobile.com); + Xufeng Liu (Xufeng_Liu@jabil.com); + Giuseppe Fioccola (giuseppe.fioccola@huawei.com); + Chaode Yu (yuchaode@huawei.com); + "; + + description + "This module defines a YANG data model for describing + transport network client types. The model fully conforms + to the Network Management Datastore Architecture (NMDA). + + Copyright (c) 2019 IETF Trust and the persons + identified as authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + This version of this YANG module is part of RFC XXXX; see + the RFC itself for full legal notices."; + + revision 2023-10-23 { + description + "version -01 as a WG document"; + reference + "draft-ietf-ccamp-client-signal-yang"; + } + + identity direction { + description + "Direction information of Client Signal."; + } + + identity bidirectional { + base direction; + description + "Client Signal is bi-directional."; + } + + identity unidirectional { + base direction; + description + "Client Signal is uni-directional."; + } + +} diff --git a/src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-ccamp-eth-client-te-topo-yang-09/ietf-eth-te-topology.yang b/src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-ccamp-eth-client-te-topo-yang-09/ietf-eth-te-topology.yang new file mode 100644 index 000000000..a04eb213d --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-ccamp-eth-client-te-topo-yang-09/ietf-eth-te-topology.yang @@ -0,0 +1,2278 @@ +module ietf-eth-te-topology { + yang-version 1.1; + namespace "urn:ietf:params:xml:ns:yang:ietf-eth-te-topology"; + prefix "etht"; + + import ietf-network { + prefix "nw"; + reference + "RFC 8345: A YANG Data Model for Network Topologies"; + } + + import ietf-network-topology { + prefix "nt"; + reference + "RFC 8345: A YANG Data Model for Network Topologies"; + } + + import ietf-te-topology { + prefix "tet"; + reference + "RFC 8795: YANG Data Model for Traffic Engineering + (TE) Topologies"; + } + + import ietf-yang-types { + prefix "yang"; + reference + "RFC 6991: Common YANG Data Types"; + } + + import ietf-eth-tran-types { + prefix "etht-types"; + reference + "RFC YYYY: A YANG Data Model for Transport Network Client + Signals"; + } + // RFC Ed.: replace YYYY with actual RFC number, update date + // information and remove this note + + organization + "IETF CCAMP Working Group"; + contact + "WG Web: + WG List: + + Editor: Haomian Zheng + + + Editor: Italo Busi + + + Editor: Aihua Guo + + + Editor: Yunbin Xu + + + Editor: Yang Zhao + + + Editor: Xufeng Liu + "; + + description + "This module defines a YANG data model for describing + layer-2 Ethernet transport topologies. The model fully + conforms to the Network Management Datastore + Architecture (NMDA). + + Copyright (c) 2023 IETF Trust and the persons identified + as authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Revised BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC XXXX; see + the RFC itself for full legal notices. + + The key words 'MUST', 'MUST NOT', 'REQUIRED', 'SHALL', 'SHALL + NOT', 'SHOULD', 'SHOULD NOT', 'RECOMMENDED', 'NOT RECOMMENDED', + 'MAY', and 'OPTIONAL' in this document are to be interpreted as + described in BCP 14 (RFC 2119) (RFC 8174) when, and only when, + they appear in all capitals, as shown here."; + + revision 2023-09-28 { + description + "Initial Revision"; + reference + "RFC XXXX: A YANG Data Model for Ethernet TE Topology"; + // RFC Ed.: replace XXXX with actual RFC number, update date + // information and remove this note + } + + /* + * Groupings + */ + + grouping label-range-info { + description + "Ethernet technology-specific label range related + information with a presence container indicating that the + label range is an Ethernet technology-specific label range. + + This grouping SHOULD be used together with the + eth-label and eth-label-step groupings to provide Ethernet + technology-specific label information to the models which + use the label-restriction-info grouping defined in the module + ietf-te-types."; + + container ethernet-label-range { + presence + "Indicates the label range is an Ethernet label range. + + This container must not be present if there are other + presence containers or attributes indicating another type + of label range."; + description + "Ethernet-specific label range related information."; + + uses etht-types:eth-label-restriction; + } + } + + grouping eth-tran-topology-type { + description + "Identifies the Ethernet Transport topology type"; + + container eth-tran-topology { + presence "indicates a topology type of + Ethernet Transport Network."; + description "Eth transport topology type"; + } + } + + grouping ltp-bandwidth-profiles { + description + "A grouping which represents the bandwidth profile(s) + for the ETH LTP."; + + choice direction { + description + "Whether the bandwidth profiles are symmetrical or + asymmetrical"; + case symmetrical { + description + "The same bandwidth profile is used to describe the ingress + and the egress bandwidth profile."; + + container ingress-egress-bandwidth-profile { + description + "The bandwith profile used in the ingress and egress + direction."; + uses etht-types:etht-bandwidth-profiles; + } + } + case asymmetrical { + description + "Different ingress and egress bandwidth profiles + can be specified."; + container ingress-bandwidth-profile { + description + "The bandwidth profile used in the ingress direction."; + uses etht-types:etht-bandwidth-profiles; + } + container egress-bandwidth-profile { + description + "The bandwidth profile used in the egress direction."; + uses etht-types:etht-bandwidth-profiles; + } + } + } + } + + grouping eth-ltp-attributes { + description + "Ethernet transport Link Termination Point (LTP) attributes"; + + leaf ltp-mac-address { + type yang:mac-address; + description + "The MAC address of the Ethernet LTP."; + } + leaf port-vlan-id { + type etht-types:vlanid; + description + "The Port VLAN ID of the Ethernet LTP."; + reference + "IEEE 802.1Q: Virtual Bridged Local Area Networks"; + } + leaf maximum-frame-size { + type uint16 { + range "64 .. 65535"; + } + description + "Maximum frame size"; + reference + "IEEE 802.1Q: Virtual Bridged Local Area Networks"; + } + uses ltp-bandwidth-profiles; + } + + grouping svc-vlan-classification { + description + "Grouping defining the capabilities for VLAN classification."; + + leaf-list supported-tag-types { + type etht-types:eth-tag-classify; + description + "List of VLAN tag types that can be used for the VLAN + classification. In case VLAN classification is not + supported, the list is empty."; + } + leaf vlan-bundling { + type boolean; + description + "In case VLAN classification is supported, indicates whether + VLAN bundling classification is also supported."; + reference + "MEF 10.3: Ethernet Services Attributes Phase 3"; + } + leaf vlan-range { + type etht-types:vid-range-type; + description + "In case VLAN classification is supported, indicates the + of available VLAN ID values."; + } + } + + grouping svc-vlan-push { + description + "Grouping defining the capabilities for VLAN push or swap + operations."; + + leaf-list supported-tag-types { + type etht-types:eth-tag-type; + description + "List of VLAN tag types that can be used to push or swap a + VLAN tag. In case VLAN push/swap is not supported, the list + is empty."; + reference + "IEEE 802.1Q: Virtual Bridged Local Area Networks"; + } + leaf vlan-range { + type etht-types:vid-range-type; + description + "In case VLAN push/swap operation is supported, the range + of available VLAN ID values."; + } + } + + grouping eth-svc-attributes { + description + "Ethernet Link Termination Point (LTP) service attributes."; + + container supported-classification { + description + "Service classification capability supported by the + Ethernet Link Termination Point (LTP)."; + + leaf port-classification { + type boolean; + description + "Indicates that the ETH LTP support port-based service + classification."; + } + container vlan-classification { + description + "Service classification capabilities based on the VLAN + tag(s) supported by the ETH LTP."; + + leaf vlan-tag-classification { + type boolean; + description + "Indicates that the ETH LTP supports VLAN service + classification."; + } + container outer-tag { + description + "Service classification capabilities based on the outer + VLAN tag, supported by the ETH LTP."; + uses svc-vlan-classification; + } + container second-tag { + description + "Service classification capabilities based on the second + VLAN tag, supported by the ETH LTP."; + leaf second-tag-classification { + type boolean; + must ". = 'false' or " + + "../../vlan-tag-classification = 'true'" { + description + "VLAN service classification based on the second + VLAN tag can be supported only when VLAN service + classification"; + } + description + "Indicates that the ETH LTP support VLAN service + classification based on the second VLAN tag."; + } + uses svc-vlan-classification; + } + } + } + + container supported-vlan-operations { + description + "Reports the VLAN operations supported by the ETH LTP."; + + leaf asymmetrical-operations { + type boolean; + description + "Indicates whether the ETH LTP supports also asymmetrical + VLAN operations.It is assumed that symmetrical VLAN + operations are alwyas supported."; + } + leaf transparent-vlan-operations { + type boolean; + description + "Indicates that the ETH LTP supports transparent + operations."; + } + container vlan-pop { + description + "Indicates VLAN pop or swap operations capabilities."; + + leaf vlan-pop-operations { + type boolean; + description + "Indicates that the ETH LTP supports VLAN pop or + swap operations."; + } + leaf max-pop-tags { + type uint8 { + range "1..2"; + } + description + "Indicates the maximum number of tags that can be + popped/swapped."; + } + } + container vlan-push { + description + "Indicates VLAN push or swap operations capabilities."; + + leaf vlan-push-operation { + type boolean; + description + "Indicates that the ETH LTP supports VLAN push or + swap operations."; + } + container outer-tag { + description + "Indicates the supported VLAN operation capabilities + on the outer VLAN tag."; + uses svc-vlan-push; + } + container second-tag { + description + "Indicates the supported VLAN operation capabilities + on the second VLAN tag."; + leaf push-second-tag { + type boolean; + description + "Indicates that the ETH LTP supports VLAN push or swap + operations for the second VLAN tag."; + } + uses svc-vlan-push; + } + } + } + } + + /* + * Data nodes + */ + + augment "/nw:networks/nw:network/nw:network-types/" + + "tet:te-topology" { + description + "Augment network types to include ETH transport newtork"; + + uses eth-tran-topology-type; + } + + augment "/nw:networks/nw:network/nw:node/tet:te" + + "/tet:te-node-attributes" { + when "../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description "Augment only for Ethernet transport network."; + } + description "Augment TE node attributes."; + container eth-node { + presence "The TE node is an Ethernet node."; + description + "Presence container used only to indicate that the TE node + is an Ethernet node."; + } + } + + augment "/nw:networks/nw:network/nt:link" { + when "../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description "Augment only for Ethernet transport network."; + } + description "Augment link configuration"; + + container eth-svc { + presence + "When present, indicates that the Link supports Ethernet + client signals."; + description + "Presence container used only to indicate that the link + supports Ethernet client signals."; + } + } + + augment "/nw:networks/nw:network/nw:node/nt:termination-point" { + when "../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description "Augment only for Ethernet transport network."; + } + description + "Augment ETH LTP attributes"; + + container eth-svc { + presence + "When present, indicates that the Link Termination Point + (LTP) supports Ethernet client signals."; + description + "ETH LTP Service attributes."; + + uses eth-svc-attributes; + } + container eth-link-tp { + description + "Attributes of the Ethernet Link Termination Point (LTP)."; + uses eth-ltp-attributes; + } + } + + /* + * Augment TE bandwidth + */ + + augment "/nw:networks/nw:network/nw:node/nt:termination-point/" + + "tet:te/" + + "tet:interface-switching-capability/tet:max-lsp-bandwidth/" + + "tet:te-bandwidth/tet:technology" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment maximum LSP TE bandwidth for the link termination + point (LTP)."; + case eth { + uses etht-types:eth-bandwidth; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:path-constraints/tet:te-bandwidth/tet:technology" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE bandwidth path constraints of the TE node + connectivity matrices."; + case eth { + uses etht-types:eth-bandwidth; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:path-constraints/tet:te-bandwidth/tet:technology" { + when "../../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE bandwidth path constraints of the + connectivity matrix entry."; + case eth { + uses etht-types:eth-bandwidth; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:path-constraints/tet:te-bandwidth/tet:technology" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE bandwidth path constraints of the TE node + connectivity matrices information source."; + case eth { + uses etht-types:eth-bandwidth; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:path-constraints/tet:te-bandwidth/tet:technology" { + when "../../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE bandwidth path constraints of the + connectivity matrix entry information source"; + case eth { + uses etht-types:eth-bandwidth; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:client-layer-adaptation/tet:switching-capability/" + + "tet:te-bandwidth/tet:technology" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment client TE bandwidth of the tunnel termination point + (TTP)"; + case eth { + uses etht-types:eth-bandwidth; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/tet:path-constraints/" + + "tet:te-bandwidth/tet:technology" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE bandwidth path constraints for the TTP + Local Link Connectivities."; + case eth { + uses etht-types:eth-bandwidth; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/tet:path-constraints/" + + "tet:te-bandwidth/tet:technology" { + when "../../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE bandwidth path constraints for the TTP + Local Link Connectivity entry."; + case eth { + uses etht-types:eth-bandwidth; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:interface-switching-capability/tet:max-lsp-bandwidth/" + + "tet:te-bandwidth/tet:technology" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment maximum LSP TE bandwidth for the TE link."; + case eth { + uses etht-types:eth-bandwidth; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:max-link-bandwidth/" + + "tet:te-bandwidth" { + when "../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment maximum TE bandwidth for the TE link"; + uses etht-types:eth-bandwidth; + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:max-resv-link-bandwidth/" + + "tet:te-bandwidth" { + when "../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment maximum reservable TE bandwidth for the TE link"; + uses etht-types:eth-bandwidth; + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:unreserved-bandwidth/" + + "tet:te-bandwidth" { + when "../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment unreserved TE bandwidth for the TE Link"; + uses etht-types:eth-bandwidth; + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:information-source-entry/" + + "tet:interface-switching-capability/" + + "tet:max-lsp-bandwidth/" + + "tet:te-bandwidth/tet:technology" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment maximum LSP TE bandwidth for the TE link + information source"; + case eth { + uses etht-types:eth-bandwidth; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:information-source-entry/" + + "tet:max-link-bandwidth/" + + "tet:te-bandwidth" { + when "../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment maximum TE bandwidth for the TE link + information source"; + uses etht-types:eth-bandwidth; + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:information-source-entry/" + + "tet:max-resv-link-bandwidth/" + + "tet:te-bandwidth" { + when "../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment maximum reservable TE bandwidth for the TE link + information-source"; + uses etht-types:eth-bandwidth; + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:information-source-entry/" + + "tet:unreserved-bandwidth/" + + "tet:te-bandwidth" { + when "../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment unreserved TE bandwidth of the TE link + information source"; + uses etht-types:eth-bandwidth; + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:interface-switching-capability/" + + "tet:max-lsp-bandwidth/" + + "tet:te-bandwidth/tet:technology" { + description + "Augment maximum LSP TE bandwidth of the TE link + template"; + case eth { + uses etht-types:eth-bandwidth; + } + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:max-link-bandwidth/" + + "tet:te-bandwidth" { + description + "Augment maximum TE bandwidth the TE link template"; + uses etht-types:eth-bandwidth; + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:max-resv-link-bandwidth/" + + "tet:te-bandwidth" { + description + "Augment maximum reservable TE bandwidth for the TE link + template."; + uses etht-types:eth-bandwidth; + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:unreserved-bandwidth/" + + "tet:te-bandwidth" { + description + "Augment unreserved TE bandwidth the TE link template"; + uses etht-types:eth-bandwidth; + } + + /* + * Augment TE label range information + */ + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:label-restrictions/tet:label-restriction" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range information for the TE node + connectivity matrices."; + uses label-range-info; + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:from/" + + "tet:label-restrictions/tet:label-restriction" { + when "../../../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range information for the source LTP + of the connectivity matrix entry."; + uses label-range-info; + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:to/" + + "tet:label-restrictions/tet:label-restriction" { + when "../../../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range information for the destination LTP + of the connectivity matrix entry."; + uses label-range-info; + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/" + + "tet:connectivity-matrices/tet:label-restrictions/" + + "tet:label-restriction" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range information for the TE node + connectivity matrices information source."; + uses label-range-info; + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:from/tet:label-restrictions/tet:label-restriction" { + when "../../../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range information for the source LTP + of the connectivity matrix entry information source."; + uses label-range-info; + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:to/tet:label-restrictions/tet:label-restriction" { + when "../../../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range information for the destination LTP + of the connectivity matrix entry information source."; + uses label-range-info; + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:label-restrictions/tet:label-restriction" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range information for the TTP + Local Link Connectivities."; + uses label-range-info; + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/" + + "tet:label-restrictions/tet:label-restriction" { + when "../../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range information for the TTP + Local Link Connectivity entry."; + uses label-range-info; + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:label-restrictions/tet:label-restriction" { + when "../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range information for the TE link."; + uses label-range-info; + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:information-source-entry/" + + "tet:label-restrictions/tet:label-restriction" { + when "../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range information for the TE link + information source."; + uses label-range-info; + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:label-restrictions/tet:label-restriction" { + description + "Augment TE label range information for the TE link template."; + uses label-range-info; + } + + /* + * Augment TE label. + */ + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-start/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range start for the TE node + connectivity matrices"; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:label-restrictions/" + + "tet:label-restriction/tet:label-end/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range end for the TE node + connectivity matrices"; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:label-restrictions/" + + "tet:label-restriction/tet:label-step/" + + "tet:technology" { + when "../../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range step for the TE node + connectivity matrices"; + case eth { + uses etht-types:eth-label-step; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:underlay/tet:primary-path/tet:path-element/" + + "tet:type/tet:label/tet:label-hop/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the underlay primary path of the + TE node connectivity matrices"; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:underlay/tet:backup-path/tet:path-element/" + + "tet:type/tet:label/tet:label-hop/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the underlay backup path of the + TE node connectivity matrices"; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-exclude-objects/" + + "tet:route-object-exclude-object/" + + "tet:type/tet:label/tet:label-hop/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the explicit route objects excluded + by the path computation of the TE node connectivity + matrices"; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-include-objects/" + + "tet:route-object-include-object/" + + "tet:type/tet:label/tet:label-hop/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the explicit route objects included + by the path computation of the TE node connectivity + matrices"; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:path-properties/tet:path-route-objects/" + + "tet:path-route-object/tet:type/tet:label/tet:label-hop/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the computed path route objects + of the TE node connectivity matrices"; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:from/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-start/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range start for the source LTP + of the connectivity matrix entry."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:from/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-end/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range end for the source LTP + of the connectivity matrix entry."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:from/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-step/" + + "tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range step for the source LTP + of the connectivity matrix entry."; + case eth { + uses etht-types:eth-label-step; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:to/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-start/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range start for the destination LTP + of the connectivity matrix entry."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:to/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-end/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range end for the destination LTP + of the connectivity matrix entry."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:to/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-step/" + + "tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range step for the destination LTP + of the connectivity matrix entry."; + case eth { + uses etht-types:eth-label-step; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:underlay/tet:primary-path/tet:path-element/" + + "tet:type/tet:label/tet:label-hop/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the underlay primary path + of the connectivity matrix entry."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:underlay/tet:backup-path/tet:path-element/" + + "tet:type/tet:label/tet:label-hop/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the underlay backup path + of the connectivity matrix entry."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:optimizations/" + + "tet:algorithm/tet:metric/tet:optimization-metric/" + + "tet:explicit-route-exclude-objects/" + + "tet:route-object-exclude-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the explicit route objects excluded + by the path computation of the connectivity matrix entry."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:optimizations/" + + "tet:algorithm/tet:metric/tet:optimization-metric/" + + "tet:explicit-route-include-objects/" + + "tet:route-object-include-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the explicit route objects included + by the path computation of the connectivity matrix entry."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:path-properties/tet:path-route-objects/" + + "tet:path-route-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the computed path route objects + of the connectivity matrix entry."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/" + + "tet:connectivity-matrices/tet:label-restrictions/" + + "tet:label-restriction/" + + "tet:label-start/tet:te-label/tet:technology" { + when "../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range start for the TE node connectivity + matrices information source."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/" + + "tet:connectivity-matrices/tet:label-restrictions/" + + "tet:label-restriction/" + + "tet:label-end/tet:te-label/tet:technology" { + when "../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range end for the TE node connectivity + matrices information source."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/" + + "tet:connectivity-matrices/tet:label-restrictions/" + + "tet:label-restriction/" + + "tet:label-step/tet:technology" { + when "../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range step for the TE node connectivity + matrices information source."; + case eth { + uses etht-types:eth-label-step; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:underlay/tet:primary-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the underlay primary path + of the TE node connectivity matrices of the information + source entry."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:underlay/tet:backup-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the underlay backup path + of the TE node connectivity matrices of the information + source entry."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-exclude-objects/" + + "tet:route-object-exclude-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the explicit route objects excluded + by the path computation of the TE node connectivity matrices + information source."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-include-objects/" + + "tet:route-object-include-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the explicit route objects included + by the path computation of the TE node connectivity matrices + information source."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:path-properties/tet:path-route-objects/" + + "tet:path-route-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the computed path route objects + of the TE node connectivity matrices information source."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:from/tet:label-restrictions/" + + "tet:label-restriction/" + + "tet:label-start/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range start for the source LTP + of the connectivity matrix entry information source."; + case eth { + uses etht-types:eth-label; + } + } + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:from/tet:label-restrictions/" + + "tet:label-restriction/" + + "tet:label-end/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range end for the source LTP + of the connectivity matrix entry information source."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:from/tet:label-restrictions/" + + "tet:label-restriction/" + + "tet:label-step/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range step for the source LTP + of the connectivity matrix entry information source."; + case eth { + uses etht-types:eth-label-step; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:to/tet:label-restrictions/tet:label-restriction/" + + "tet:label-start/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range start for the destination LTP + of the connectivity matrix entry information source."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:to/tet:label-restrictions/tet:label-restriction/" + + "tet:label-end/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range end for the destination LTP + of the connectivity matrix entry information source."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:to/tet:label-restrictions/tet:label-restriction/" + + "tet:label-step/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range step for the destination LTP + of the connectivity matrix entry information source."; + case eth { + uses etht-types:eth-label-step; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:underlay/tet:primary-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the underlay primary path + of the connectivity matrix entry information source."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:underlay/tet:backup-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the underlay backup path + of the connectivity matrix entry information source."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-exclude-objects/" + + "tet:route-object-exclude-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the explicit route objects excluded + by the path computation of the connectivity matrix entry + information source."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-include-objects/" + + "tet:route-object-include-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the explicit route objects included + by the path computation of the connectivity matrix entry + information source."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:path-properties/tet:path-route-objects/" + + "tet:path-route-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the computed path route objects + of the connectivity matrix entry information source."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-start/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range start for the TTP + Local Link Connectivities."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-end/" + + "tet:te-label/tet:technology"{ + when "../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range end for the TTP + Local Link Connectivities."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-step/" + + "tet:technology"{ + when "../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range step for the TTP + Local Link Connectivities."; + case eth { + uses etht-types:eth-label-step; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:underlay/tet:primary-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the underlay primary path + of the TTP Local Link Connectivities."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:underlay/tet:backup-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the underlay backup path + of the TTP Local Link Connectivities."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-exclude-objects/" + + "tet:route-object-exclude-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the explicit route objects excluded + by the path computation of the TTP Local Link + Connectivities."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-include-objects/" + + "tet:route-object-include-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the explicit route objects included + by the path computation of the TTP Local Link + Connectivities."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:path-properties/tet:path-route-objects/" + + "tet:path-route-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the computed path route objects + of the TTP Local Link Connectivities."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-start/tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range start for the TTP + Local Link Connectivity entry."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-end/tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range end for the TTP + Local Link Connectivity entry."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-step/tet:technology" { + when "../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range step for the TTP + Local Link Connectivity entry."; + case eth { + uses etht-types:eth-label-step; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/" + + "tet:underlay/tet:primary-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the underlay primary path + of the TTP Local Link Connectivity entry."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/" + + "tet:underlay/tet:backup-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the underlay backup path + of the TTP Local Link Connectivity entry."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-exclude-objects/" + + "tet:route-object-exclude-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the explicit route objects excluded + by the path computation of the TTP Local Link + Connectivity entry."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-include-objects/" + + "tet:route-object-include-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the explicit route objects included + by the path computation of the TTP Local Link + Connectivity entry."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/" + + "tet:path-properties/tet:path-route-objects/" + + "tet:path-route-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the computed path route objects + of the TTP Local Link Connectivity entry."; + case eth { + uses etht-types:eth-label; + } + } + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:underlay/tet:primary-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the underlay primary path + of the TE link."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:underlay/tet:backup-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the underlay backup path + of the TE link."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-start/tet:te-label/tet:technology" { + when "../../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range start for the TE link."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-end/tet:te-label/tet:technology" { + when "../../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range end for the TE link."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-step/tet:technology" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range step for the TE link."; + case eth { + uses etht-types:eth-label-step; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:information-source-entry/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-start/tet:te-label/tet:technology" { + when "../../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range start for the TE link + information source."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:information-source-entry/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-end/tet:te-label/tet:technology" { + when "../../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range end for the TE link + information source."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:information-source-entry/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-step/tet:technology" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range step for the TE link + information source."; + case eth { + uses etht-types:eth-label-step; + } + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:underlay/tet:primary-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + description + "Augment TE label hop for the underlay primary path + of the TE link template."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:underlay/tet:backup-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + description + "Augment TE label hop for the underlay backup path + of the TE link template."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-start/tet:te-label/tet:technology" { + description + "Augment TE label range start for the TE link template."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-end/tet:te-label/tet:technology" { + description + "Augment TE label range end for the TE link template."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-step/tet:technology" { + description + "Augment TE label range step for the TE link template."; + case eth { + uses etht-types:eth-label-step; + } + } + +} \ No newline at end of file diff --git a/src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-ccamp-otn-topo-yang-20/ietf-otn-topology.yang b/src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-ccamp-otn-topo-yang-20/ietf-otn-topology.yang new file mode 100644 index 000000000..15e7ac508 --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-ccamp-otn-topo-yang-20/ietf-otn-topology.yang @@ -0,0 +1,2230 @@ +module ietf-otn-topology { + yang-version 1.1; + namespace "urn:ietf:params:xml:ns:yang:ietf-otn-topology"; + prefix "otnt"; + + import ietf-network { + prefix "nw"; + reference + "RFC 8345: A YANG Data Model for Network Topologies"; + } + + import ietf-network-topology { + prefix "nt"; + reference + "RFC 8345: A YANG Data Model for Network Topologies"; + } + + import ietf-te-topology { + prefix "tet"; + reference + "RFC 8795: YANG Data Model for Traffic Engineering + (TE) Topologies"; + } + + import ietf-layer1-types { + prefix "l1-types"; + reference + "RFC YYYY: A YANG Data Model for Layer 1 Types"; + } + // RFC Editor: replace YYYY with actual RFC number assigned to + // [I-D.ietf-ccamp-layer1-types] and remove this note + + organization + "IETF CCAMP Working Group"; + contact + "WG Web: + WG List: + + Editor: Haomian Zheng + + + Editor: Italo Busi + + + Editor: Xufeng Liu + + + Editor: Sergio Belotti + + + Editor: Oscar Gonzalez de Dios + "; + + description + "This module defines a protocol independent Layer 1/ODU topology + data model. The model fully conforms + to the Network Management Datastore Architecture (NMDA). + + Copyright (c) 2024 IETF Trust and the persons identified + as authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Revised BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC XXXX; see + the RFC itself for full legal notices. + + The key words 'MUST', 'MUST NOT', 'REQUIRED', 'SHALL', 'SHALL + NOT', 'SHOULD', 'SHOULD NOT', 'RECOMMENDED', 'NOT RECOMMENDED', + 'MAY', and 'OPTIONAL' in this document are to be interpreted as + described in BCP 14 (RFC 2119) (RFC 8174) when, and only when, + they appear in all capitals, as shown here."; + + revision 2024-06-21 { + description + "Initial Revision"; + reference + "RFC XXXX: A YANG Data Model for Optical Transport Network + Topology"; + } + // RFC Editor: replace XXXX with actual RFC number, update date + // information and remove this note + + /* + * Groupings + */ + + grouping label-range-info { + description + "OTN technology-specific label range related information with + a presence container indicating that the label range is an + OTN technology-specific label range. + + This grouping SHOULD be used together with the + otn-label-start-end and otn-label-step groupings to provide + OTN technology-specific label information to the models which + use the label-restriction-info grouping defined in the module + ietf-te-types."; + uses l1-types:otn-label-range-info { + refine otn-label-range { + presence + "Indicates the label range is an OTN label range. + + This container MUST NOT be present if there are other + presence containers or attributes indicating another type + of label range."; + } + } + } + + /* + * Data nodes + */ + + augment "/nw:networks/nw:network/nw:network-types/" + + "tet:te-topology" { + container otn-topology { + presence "indicates a topology type of Optical Transport + Network (OTN)-electrical layer."; + description "OTN topology type"; + } + description "augment network types to include OTN."; + } + + augment "/nw:networks/nw:network/nw:node/tet:te" + + "/tet:te-node-attributes" { + when "../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description "Augment only for OTN."; + } + description "Augment TE node attributes."; + container otn-node { + presence "The TE node is an OTN node."; + description + "Introduce new TE node type for OTN node."; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes" { + when "../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description "Augment only for OTN."; + } + description "Augment link configuration"; + + container otn-link { + description + "Attributes of the OTN Link."; + leaf odtu-flex-type { + type l1-types:odtu-flex-type; + description + "The type of Optical Data Tributary Unit (ODTU) + whose nominal bitrate is used to compute the number of + Tributary Slots (TS) required by the ODUflex LSPs set up + on this OTN Link."; + } + leaf tsg { + type identityref { + base l1-types:tributary-slot-granularity; + } + description "Tributary slot granularity."; + reference + "ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN)"; + } + leaf distance { + type uint32; + description "distance in the unit of kilometers"; + } + } + container client-svc { + presence + "When present, indicates that the Link supports Constant + Bit Rate (CBR) client signals."; + description + "Attributes of the Link supporting CBR client signals."; + leaf-list supported-client-signal { + type identityref { + base l1-types:client-signal; + } + min-elements 1; + description + "List of client signal types supported by the Link."; + } + } + } + + augment "/nw:networks/nw:network/nw:node/nt:termination-point/" + + "tet:te" { + when "../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description "Augment only for OTN."; + } + description + "Augment link termination point (LTP) configuration."; + + container otn-link-tp { + description + "Attributes of the OTN Link Termination Point (LTP)."; + leaf odtu-flex-type { + type l1-types:odtu-flex-type; + description + "The type of Optical Data Tributary Unit (ODTU) + whose nominal bitrate is used to compute the number of + Tributary Slots (TS) required by the ODUflex LSPs set up + on this OTN Link Termination Point (LTP)."; + } + } + container client-svc { + presence + "When present, indicates that the Link Termination Point + (LTP) supports Constant Bit Rate (CBR) client signals."; + description + "OTN LTP Service attributes."; + leaf-list supported-client-signal { + type identityref { + base l1-types:client-signal; + } + description + "List of client signal types supported by the LTP."; + } + } + } + + /* + * Augment TE bandwidth + */ + + augment "/nw:networks/nw:network/nw:node/nt:termination-point/" + + "tet:te/" + + "tet:interface-switching-capability/tet:max-lsp-bandwidth/" + + "tet:te-bandwidth/tet:technology" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment maximum LSP TE bandwidth for the link termination + point (LTP)."; + case otn { + uses l1-types:otn-max-path-bandwidth { + description + "The odtu-flex-type attribute of the OTN Link Termination + Point (LTP) is used to compute the number of Tributary + Slots (TS) required by the ODUflex LSPs set up on this + OTN LTP."; + } + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:path-constraints/tet:te-bandwidth/tet:technology" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE bandwidth path constraints of the TE node + connectivity matrices."; + case otn { + uses l1-types:otn-link-bandwidth { + augment otn-bandwidth { + description + "Augment OTN link bandwidth information."; + leaf odtu-flex-type { + type l1-types:odtu-flex-type; + description + "The type of Optical Data Tributary Unit (ODTU) + whose nominal bitrate is used to compute the number of + Tributary Slots (TS) required by the ODUflex LSPs + set up along the underlay paths of these OTN + connectivity matrices."; + } + } + } + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:path-constraints/tet:te-bandwidth/tet:technology" { + when "../../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE bandwidth path constraints of the + connectivity matrix entry."; + case otn { + uses l1-types:otn-link-bandwidth { + augment otn-bandwidth { + description + "Augment OTN link bandwidth information."; + leaf odtu-flex-type { + type l1-types:odtu-flex-type; + description + "The type of Optical Data Tributary Unit (ODTU) + whose nominal bitrate is used to compute the number of + Tributary Slots (TS) required by the ODUflex LSPs + set up along the underlay path of this OTN + connectivity matrix entry."; + } + } + } + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:path-constraints/tet:te-bandwidth/tet:technology" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE bandwidth path constraints of the TE node + connectivity matrices information source."; + case otn { + uses l1-types:otn-link-bandwidth { + augment otn-bandwidth { + description + "Augment OTN link bandwidth information."; + leaf odtu-flex-type { + type l1-types:odtu-flex-type; + description + "The type of Optical Data Tributary Unit (ODTU) + whose nominal bitrate is used to compute the number of + Tributary Slots (TS) required by the ODUflex LSPs + set up along the underlay paths of these OTN + connectivity matrices."; + } + } + } + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:path-constraints/tet:te-bandwidth/tet:technology" { + when "../../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE bandwidth path constraints of the + connectivity matrix entry information source"; + case otn { + uses l1-types:otn-link-bandwidth { + augment otn-bandwidth { + description + "Augment OTN link bandwidth information."; + leaf odtu-flex-type { + type l1-types:odtu-flex-type; + description + "The type of Optical Data Tributary Unit (ODTU) + whose nominal bitrate is used to compute the number of + Tributary Slots (TS) required by the ODUflex LSPs + set up along the underlay path of this OTN + connectivity matrix entry."; + } + } + } + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:client-layer-adaptation/tet:switching-capability/" + + "tet:te-bandwidth/tet:technology" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment client TE bandwidth of the tunnel termination point + (TTP)"; + case otn { + uses l1-types:otn-link-bandwidth { + augment otn-bandwidth { + description + "Augment OTN link bandwidth information."; + leaf odtu-flex-type { + type l1-types:odtu-flex-type; + description + "The type of Optical Data Tributary Unit (ODTU) + whose nominal bitrate is used to compute the number of + Tributary Slots (TS) required by the ODUflex LSPs + terminated on this OTN Tunnel Termination Point + (TTP)."; + } + } + } + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/tet:path-constraints/" + + "tet:te-bandwidth/tet:technology" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE bandwidth path constraints for the TTP + Local Link Connectivities."; + case otn { + uses l1-types:otn-link-bandwidth { + augment otn-bandwidth { + description + "Augment OTN link bandwidth information."; + leaf odtu-flex-type { + type l1-types:odtu-flex-type; + description + "The type of Optical Data Tributary Unit (ODTU) + whose nominal bitrate is used to compute the number of + Tributary Slots (TS) required by the ODUflex LSPs + set up along the underlay paths of these OTN Local + Link Connectivities."; + } + } + } + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/tet:path-constraints/" + + "tet:te-bandwidth/tet:technology" { + when "../../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE bandwidth path constraints for the TTP + Local Link Connectivity entry."; + case otn { + uses l1-types:otn-link-bandwidth { + augment otn-bandwidth { + description + "Augment OTN link bandwidth information."; + leaf odtu-flex-type { + type l1-types:odtu-flex-type; + description + "The type of Optical Data Tributary Unit (ODTU) + whose nominal bitrate is used to compute the number of + Tributary Slots (TS) required by the ODUflex LSPs + set up along the underlay path of this OTN Local + Link Connectivity entry."; + } + } + } + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:interface-switching-capability/tet:max-lsp-bandwidth/" + + "tet:te-bandwidth/tet:technology" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment maximum LSP TE bandwidth for the TE link."; + case otn { + uses l1-types:otn-max-path-bandwidth { + description + "The odtu-flex-type attribute of the OTN Link is used + to compute the number of Tributary Slots (TS) required + by the ODUflex LSPs set up on this OTN Link."; + } + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:max-link-bandwidth/" + + "tet:te-bandwidth" { + when "../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment maximum TE bandwidth for the TE link"; + uses l1-types:otn-link-bandwidth { + description + "The odtu-flex-type attribute of the OTN Link is used + to compute the number of Tributary Slots (TS) required + by the ODUflex LSPs set up on this OTN Link."; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:max-resv-link-bandwidth/" + + "tet:te-bandwidth" { + when "../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment maximum reservable TE bandwidth for the TE link"; + uses l1-types:otn-link-bandwidth { + description + "The odtu-flex-type attribute of the OTN Link is used + to compute the number of Tributary Slots (TS) required + by the ODUflex LSPs set up on this OTN Link."; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:unreserved-bandwidth/" + + "tet:te-bandwidth" { + when "../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment unreserved TE bandwidth for the TE Link"; + uses l1-types:otn-link-bandwidth { + description + "The odtu-flex-type attribute of the OTN Link is used + to compute the number of Tributary Slots (TS) required + by the ODUflex LSPs set up on this OTN Link."; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:information-source-entry/" + + "tet:interface-switching-capability/" + + "tet:max-lsp-bandwidth/" + + "tet:te-bandwidth/tet:technology" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment maximum LSP TE bandwidth for the TE link + information source"; + case otn { + uses l1-types:otn-max-path-bandwidth { + description + "The odtu-flex-type attribute of the OTN Link is used + to compute the number of Tributary Slots (TS) required + by the ODUflex LSPs set up on this OTN Link."; + } + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:information-source-entry/" + + "tet:max-link-bandwidth/" + + "tet:te-bandwidth" { + when "../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment maximum TE bandwidth for the TE link + information source"; + uses l1-types:otn-link-bandwidth { + description + "The odtu-flex-type attribute of the OTN Link is used + to compute the number of Tributary Slots (TS) required + by the ODUflex LSPs set up on this OTN Link."; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:information-source-entry/" + + "tet:max-resv-link-bandwidth/" + + "tet:te-bandwidth" { + when "../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment maximum reservable TE bandwidth for the TE link + information-source"; + uses l1-types:otn-link-bandwidth { + description + "The odtu-flex-type attribute of the OTN Link is used + to compute the number of Tributary Slots (TS) required + by the ODUflex LSPs set up on this OTN Link."; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:information-source-entry/" + + "tet:unreserved-bandwidth/" + + "tet:te-bandwidth" { + when "../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment unreserved TE bandwidth of the TE link + information source"; + uses l1-types:otn-link-bandwidth { + description + "The odtu-flex-type attribute of the OTN Link is used + to compute the number of Tributary Slots (TS) required + by the ODUflex LSPs set up on this OTN Link."; + } + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:interface-switching-capability/" + + "tet:max-lsp-bandwidth/" + + "tet:te-bandwidth/tet:technology" { + description + "Augment maximum LSP TE bandwidth of the TE link + template"; + case otn { + uses l1-types:otn-max-path-bandwidth { + description + "The odtu-flex-type attribute of the OTN Link is used + to compute the number of Tributary Slots (TS) required + by the ODUflex LSPs set up on the OTN Link that uses this + Link Template."; + } + } + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:max-link-bandwidth/" + + "tet:te-bandwidth" { + description + "Augment maximum TE bandwidth the TE link template"; + uses l1-types:otn-link-bandwidth { + description + "The odtu-flex-type attribute of the OTN Link is used + to compute the number of Tributary Slots (TS) required + by the ODUflex LSPs set up on the OTN Link that uses this + Link Template."; + } + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:max-resv-link-bandwidth/" + + "tet:te-bandwidth" { + description + "Augment maximum reservable TE bandwidth for the TE link + template."; + uses l1-types:otn-link-bandwidth { + description + "The odtu-flex-type attribute of the OTN Link is used + to compute the number of Tributary Slots (TS) required + by the ODUflex LSPs set up on the OTN Link that uses this + Link Template."; + } + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:unreserved-bandwidth/" + + "tet:te-bandwidth" { + description + "Augment unreserved TE bandwidth the TE link template"; + uses l1-types:otn-link-bandwidth { + description + "The odtu-flex-type attribute of the OTN Link is used + to compute the number of Tributary Slots (TS) required + by the ODUflex LSPs set up on the OTN Link that uses this + Link Template."; + } + } + + /* + * Augment TE label range information + */ + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:label-restrictions/tet:label-restriction" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range information for the TE node + connectivity matrices."; + uses label-range-info; + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:from/" + + "tet:label-restrictions/tet:label-restriction" { + when "../../../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range information for the source LTP + of the connectivity matrix entry."; + uses label-range-info; + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:to/" + + "tet:label-restrictions/tet:label-restriction" { + when "../../../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range information for the destination LTP + of the connectivity matrix entry."; + uses label-range-info; + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/" + + "tet:connectivity-matrices/tet:label-restrictions/" + + "tet:label-restriction" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range information for the TE node + connectivity matrices information source."; + uses label-range-info; + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:from/tet:label-restrictions/tet:label-restriction" { + when "../../../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range information for the source LTP + of the connectivity matrix entry information source."; + uses label-range-info; + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:to/tet:label-restrictions/tet:label-restriction" { + when "../../../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range information for the destination LTP + of the connectivity matrix entry information source."; + uses label-range-info; + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:label-restrictions/tet:label-restriction" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range information for the TTP + Local Link Connectivities."; + uses label-range-info; + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/" + + "tet:label-restrictions/tet:label-restriction" { + when "../../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range information for the TTP + Local Link Connectivity entry."; + uses label-range-info; + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:label-restrictions/tet:label-restriction" { + when "../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range information for the TE link."; + uses label-range-info; + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:information-source-entry/" + + "tet:label-restrictions/tet:label-restriction" { + when "../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range information for the TE link + information source."; + uses label-range-info; + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:label-restrictions/tet:label-restriction" { + description + "Augment TE label range information for the TE link template."; + uses label-range-info; + } + + /* + * Augment TE label + */ + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-start/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range start for the TE node + connectivity matrices"; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:label-restrictions/" + + "tet:label-restriction/tet:label-end/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range end for the TE node + connectivity matrices"; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:label-restrictions/" + + "tet:label-restriction/tet:label-step/" + + "tet:technology" { + when "../../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range step for the TE node + connectivity matrices"; + case otn { + uses l1-types:otn-label-step; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:underlay/tet:primary-path/tet:path-element/" + + "tet:type/tet:label/tet:label-hop/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the underlay primary path of the + TE node connectivity matrices"; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:underlay/tet:backup-path/tet:path-element/" + + "tet:type/tet:label/tet:label-hop/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the underlay backup path of the + TE node connectivity matrices"; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-exclude-objects/" + + "tet:route-object-exclude-object/" + + "tet:type/tet:label/tet:label-hop/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the explicit route objects excluded + by the path computation of the TE node connectivity + matrices"; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-include-objects/" + + "tet:route-object-include-object/" + + "tet:type/tet:label/tet:label-hop/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the explicit route objects included + by the path computation of the TE node connectivity + matrices"; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:path-properties/tet:path-route-objects/" + + "tet:path-route-object/tet:type/tet:label/tet:label-hop/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the computed path route objects + of the TE node connectivity matrices"; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:from/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-start/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range start for the source LTP + of the connectivity matrix entry."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:from/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-end/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range end for the source LTP + of the connectivity matrix entry."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:from/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-step/" + + "tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range step for the source LTP + of the connectivity matrix entry."; + case otn { + uses l1-types:otn-label-step; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:to/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-start/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range start for the destination LTP + of the connectivity matrix entry."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:to/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-end/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range end for the destination LTP + of the connectivity matrix entry."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:to/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-step/" + + "tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range step for the destination LTP + of the connectivity matrix entry."; + case otn { + uses l1-types:otn-label-step; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:underlay/tet:primary-path/tet:path-element/" + + "tet:type/tet:label/tet:label-hop/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the underlay primary path + of the connectivity matrix entry."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:underlay/tet:backup-path/tet:path-element/" + + "tet:type/tet:label/tet:label-hop/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the underlay backup path + of the connectivity matrix entry."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:optimizations/" + + "tet:algorithm/tet:metric/tet:optimization-metric/" + + "tet:explicit-route-exclude-objects/" + + "tet:route-object-exclude-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the explicit route objects excluded + by the path computation of the connectivity matrix entry."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:optimizations/" + + "tet:algorithm/tet:metric/tet:optimization-metric/" + + "tet:explicit-route-include-objects/" + + "tet:route-object-include-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the explicit route objects included + by the path computation of the connectivity matrix entry."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:path-properties/tet:path-route-objects/" + + "tet:path-route-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the computed path route objects + of the connectivity matrix entry."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/" + + "tet:connectivity-matrices/tet:label-restrictions/" + + "tet:label-restriction/" + + "tet:label-start/tet:te-label/tet:technology" { + when "../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range start for the TE node connectivity + matrices information source."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/" + + "tet:connectivity-matrices/tet:label-restrictions/" + + "tet:label-restriction/" + + "tet:label-end/tet:te-label/tet:technology" { + when "../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range end for the TE node connectivity + matrices information source."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/" + + "tet:connectivity-matrices/tet:label-restrictions/" + + "tet:label-restriction/" + + "tet:label-step/tet:technology" { + when "../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range step for the TE node connectivity + matrices information source."; + case otn { + uses l1-types:otn-label-step; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:underlay/tet:primary-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the underlay primary path + of the TE node connectivity matrices of the information + source entry."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:underlay/tet:backup-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the underlay backup path + of the TE node connectivity matrices of the information + source entry."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-exclude-objects/" + + "tet:route-object-exclude-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the explicit route objects excluded + by the path computation of the TE node connectivity matrices + information source."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-include-objects/" + + "tet:route-object-include-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the explicit route objects included + by the path computation of the TE node connectivity matrices + information source."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:path-properties/tet:path-route-objects/" + + "tet:path-route-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the computed path route objects + of the TE node connectivity matrices information source."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:from/tet:label-restrictions/" + + "tet:label-restriction/" + + "tet:label-start/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range start for the source LTP + of the connectivity matrix entry information source."; + case otn { + uses l1-types:otn-label-start-end; + } + } + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:from/tet:label-restrictions/" + + "tet:label-restriction/" + + "tet:label-end/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range end for the source LTP + of the connectivity matrix entry information source."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:from/tet:label-restrictions/" + + "tet:label-restriction/" + + "tet:label-step/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range step for the source LTP + of the connectivity matrix entry information source."; + case otn { + uses l1-types:otn-label-step; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:to/tet:label-restrictions/tet:label-restriction/" + + "tet:label-start/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range start for the destination LTP + of the connectivity matrix entry information source."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:to/tet:label-restrictions/tet:label-restriction/" + + "tet:label-end/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range end for the destination LTP + of the connectivity matrix entry information source."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:to/tet:label-restrictions/tet:label-restriction/" + + "tet:label-step/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range step for the destination LTP + of the connectivity matrix entry information source."; + case otn { + uses l1-types:otn-label-step; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:underlay/tet:primary-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the underlay primary path + of the connectivity matrix entry information source."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:underlay/tet:backup-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the underlay backup path + of the connectivity matrix entry information source."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-exclude-objects/" + + "tet:route-object-exclude-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the explicit route objects excluded + by the path computation of the connectivity matrix entry + information source."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-include-objects/" + + "tet:route-object-include-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the explicit route objects included + by the path computation of the connectivity matrix entry + information source."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:path-properties/tet:path-route-objects/" + + "tet:path-route-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the computed path route objects + of the connectivity matrix entry information source."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-start/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range start for the TTP + Local Link Connectivities."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-end/" + + "tet:te-label/tet:technology"{ + when "../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range end for the TTP + Local Link Connectivities."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-step/" + + "tet:technology"{ + when "../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range step for the TTP + Local Link Connectivities."; + case otn { + uses l1-types:otn-label-step; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:underlay/tet:primary-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the underlay primary path + of the TTP Local Link Connectivities."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:underlay/tet:backup-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the underlay backup path + of the TTP Local Link Connectivities."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-exclude-objects/" + + "tet:route-object-exclude-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the explicit route objects excluded + by the path computation of the TTP Local Link + Connectivities."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-include-objects/" + + "tet:route-object-include-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the explicit route objects included + by the path computation of the TTP Local Link + Connectivities."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:path-properties/tet:path-route-objects/" + + "tet:path-route-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the computed path route objects + of the TTP Local Link Connectivities."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-start/tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range start for the TTP + Local Link Connectivity entry."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-end/tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range end for the TTP + Local Link Connectivity entry."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-step/tet:technology" { + when "../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range step for the TTP + Local Link Connectivity entry."; + case otn { + uses l1-types:otn-label-step; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/" + + "tet:underlay/tet:primary-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the underlay primary path + of the TTP Local Link Connectivity entry."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/" + + "tet:underlay/tet:backup-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the underlay backup path + of the TTP Local Link Connectivity entry."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-exclude-objects/" + + "tet:route-object-exclude-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the explicit route objects excluded + by the path computation of the TTP Local Link + Connectivity entry."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-include-objects/" + + "tet:route-object-include-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the explicit route objects included + by the path computation of the TTP Local Link + Connectivity entry."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/" + + "tet:path-properties/tet:path-route-objects/" + + "tet:path-route-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the computed path route objects + of the TTP Local Link Connectivity entry."; + case otn { + uses l1-types:otn-label-hop; + } + } + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:underlay/tet:primary-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the underlay primary path + of the TE link."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:underlay/tet:backup-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the underlay backup path + of the TE link."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-start/tet:te-label/tet:technology" { + when "../../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range start for the TE link."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-end/tet:te-label/tet:technology" { + when "../../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range end for the TE link."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-step/tet:technology" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range step for the TE link."; + case otn { + uses l1-types:otn-label-step; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:information-source-entry/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-start/tet:te-label/tet:technology" { + when "../../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range start for the TE link + information source."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:information-source-entry/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-end/tet:te-label/tet:technology" { + when "../../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range end for the TE link + information source."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:information-source-entry/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-step/tet:technology" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range step for the TE link + information source."; + case otn { + uses l1-types:otn-label-step; + } + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:underlay/tet:primary-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + description + "Augment TE label hop for the underlay primary path + of the TE link template."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:underlay/tet:backup-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + description + "Augment TE label hop for the underlay backup path + of the TE link template."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-start/tet:te-label/tet:technology" { + description + "Augment TE label range start for the TE link template."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-end/tet:te-label/tet:technology" { + description + "Augment TE label range end for the TE link template."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-step/tet:technology" { + description + "Augment TE label range step for the TE link template."; + case otn { + uses l1-types:otn-label-step; + } + } +} \ No newline at end of file diff --git a/src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-teas-rfc8776-update-18/ietf-te-packet-types.yang b/src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-teas-rfc8776-update-18/ietf-te-packet-types.yang new file mode 100644 index 000000000..834e78bcd --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-teas-rfc8776-update-18/ietf-te-packet-types.yang @@ -0,0 +1,835 @@ +module ietf-te-packet-types { + yang-version 1.1; + namespace "urn:ietf:params:xml:ns:yang:ietf-te-packet-types"; + prefix te-packet-types; + + import ietf-yang-types { + prefix yang; + reference + "RFC 6991: Common YANG Data Types"; + } + import ietf-te-types { + prefix te-types; + reference + "RFC XXXX: Common YANG Data Types for Traffic Engineering"; + } + + // RFC Editor: replace XXXX with actual RFC number + // and remove this note + + organization + "IETF Traffic Engineering Architecture and Signaling (TEAS) + Working Group"; + contact + "WG Web: + WG List: + + Editor: Tarek Saad + + + Editor: Rakesh Gandhi + + + Editor: Vishnu Pavan Beeram + + + Editor: Xufeng Liu + + + Editor: Igor Bryskin + "; + description + "This YANG module contains a collection of generally useful YANG + data type definitions specific to Packet Traffic Engineering + (TE). + + The model conforms to the Network Management Datastore + Architecture (NMDA). + + The key words 'MUST', 'MUST NOT', 'REQUIRED', 'SHALL', 'SHALL + NOT', 'SHOULD', 'SHOULD NOT', 'RECOMMENDED', 'NOT RECOMMENDED', + 'MAY', and 'OPTIONAL' in this document are to be interpreted as + described in BCP 14 (RFC 2119) (RFC 8174) when, and only when, + they appear in all capitals, as shown here. + + Copyright (c) 2025 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject to + the license terms contained in, the Revised BSD License set + forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC XXXX + (https://www.rfc-editor.org/info/rfcXXXX); see the RFC itself + for full legal notices."; + + revision 2025-01-24 { + description + "This revision adds the following new identities: + - bandwidth-profile-type; + - link-metric-delay-variation; + - link-metric-loss; + - path-metric-delay-variation; + - path-metric-loss. + + This revision adds the following new groupings: + - bandwidth-profile-parameters; + - te-packet-path-bandwidth; + - te-packet-link-bandwidth. + + This revision provides also few editorial changes."; + reference + "RFC XXXX: Common YANG Data Types for Traffic Engineering"; + } + + // RFC Editor: replace XXXX with actual RFC number, update date + // information and remove this note + + revision 2020-06-10 { + description + "Latest revision of TE MPLS types."; + reference + "RFC 8776: Common YANG Data Types for Traffic Engineering"; + } + + /* + * Identities + */ + + identity bandwidth-profile-type { + description + "Bandwidth Profile Types"; + } + + identity mef-10 { + base bandwidth-profile-type; + description + "MEF 10 Bandwidth Profile"; + reference + "MEF 10.3: Ethernet Services Attributes Phase 3"; + } + + identity rfc-2697 { + base bandwidth-profile-type; + description + "RFC 2697 Bandwidth Profile"; + reference + "RFC 2697: A Single Rate Three Color Marker"; + } + + identity rfc-2698 { + base bandwidth-profile-type; + description + "RFC 2698 Bandwidth Profile"; + reference + "RFC 2698: A Two Rate Three Color Marker"; + } + + // Derived identities from te-types:link-metric-type + + identity link-metric-delay-variation { + base te-types:link-metric-type; + description + "The Unidirectional Delay Variation Metric, + measured in units of microseconds."; + reference + "RFC 7471: OSPF Traffic Engineering (TE) Metric Extensions, + Section 4.3 + RFC 8570: IS-IS Traffic Engineering (TE) Metric Extensions, + Section 4.3"; + } + + identity link-metric-loss { + base te-types:link-metric-type; + description + "The Unidirectional Link Loss Metric, + measured in units of 0.000003%."; + reference + "RFC 7471: OSPF Traffic Engineering (TE) Metric Extensions, + Section 4.4 + RFC 8570: IS-IS Traffic Engineering (TE) Metric Extensions, + Section 4.4"; + } + + // Derived identities from te-types:link-metric-type + + identity path-metric-delay-variation { + base te-types:path-metric-type; + description + "The Path Delay Variation Metric, + measured in units of microseconds."; + reference + "RFC 8233: Extensions to the Path Computation Element + Communication Protocol (PCEP) to Compute + Service-Aware Label Switched Paths (LSPs), + Section 3.1.2"; + } + + identity path-metric-loss { + base te-types:path-metric-type; + description + "The Path Loss Metric, measured in units of 0.000003%."; + reference + "RFC 8233: Extensions to the Path Computation Element + Communication Protocol (PCEP) to Compute + Service-Aware Label Switched Paths (LSPs), + Section 3.1.3"; + } + + identity backup-protection-type { + description + "Base identity for the backup protection type."; + } + + identity backup-protection-link { + base backup-protection-type; + description + "Backup provides link protection only."; + } + + identity backup-protection-node-link { + base backup-protection-type; + description + "Backup offers node (preferred) or link protection."; + } + + identity bc-model-type { + description + "Base identity for the Diffserv-TE Bandwidth Constraints + Model type."; + reference + "RFC 4124: Protocol Extensions for Support of Diffserv-aware + MPLS Traffic Engineering"; + } + + identity bc-model-rdm { + base bc-model-type; + description + "Russian Dolls Bandwidth Constraints Model type."; + reference + "RFC 4127: Russian Dolls Bandwidth Constraints Model for + Diffserv-aware MPLS Traffic Engineering"; + } + + identity bc-model-mam { + base bc-model-type; + description + "Maximum Allocation Bandwidth Constraints Model type."; + reference + "RFC 4125: Maximum Allocation Bandwidth Constraints Model for + Diffserv-aware MPLS Traffic Engineering"; + } + + identity bc-model-mar { + base bc-model-type; + description + "Maximum Allocation with Reservation Bandwidth Constraints + Model type."; + reference + "RFC 4126: Max Allocation with Reservation Bandwidth + Constraints Model for Diffserv-aware MPLS Traffic + Engineering & Performance Comparisons"; + } + + /* + * Typedefs + */ + + typedef te-bandwidth-requested-type { + type enumeration { + enum specified-value { + description + "Bandwidth value is explicitly specified."; + } + enum specified-profile { + description + "Bandwidth profile is explicitly specified."; + } + enum auto { + description + "Bandwidth is automatically computed."; + } + } + description + "Enumerated type for specifying whether bandwidth is + explicitly specified or automatically computed."; + } + + typedef te-class-type { + type uint8; + description + "Diffserv-TE Class-Type. + Defines a set of Traffic Trunks crossing a link that is + governed by a specific set of bandwidth constraints. + + Class-Type is used for the purposes of link bandwidth + allocation, constraint-based routing, and admission control."; + reference + "RFC 4124: Protocol Extensions for Support of Diffserv-aware + MPLS Traffic Engineering"; + } + + typedef bc-type { + type uint8 { + range "0..7"; + } + description + "Diffserv-TE bandwidth constraints as defined in RFC 4124."; + reference + "RFC 4124: Protocol Extensions for Support of Diffserv-aware + MPLS Traffic Engineering"; + } + + typedef bandwidth-kbps { + type uint64; + units "kilobits per second"; + description + "Bandwidth values, expressed in kilobits per second."; + } + + typedef bandwidth-mbps { + type uint64; + units "megabits per second"; + description + "Bandwidth values, expressed in megabits per second."; + } + + typedef bandwidth-gbps { + type uint64; + units "gigabits per second"; + description + "Bandwidth values, expressed in gigabits per second."; + } + + /* + * Groupings + */ + + grouping performance-metrics-attributes-packet { + description + "Contains Performance Metrics (PM) information."; + uses te-types:performance-metrics-attributes { + augment "performance-metrics-one-way" { + description + "Performance Metrics (PM) one-way packet-specific + augmentation for a generic PM grouping."; + leaf one-way-min-delay { + type uint32 { + range "0..16777215"; + } + units "microseconds"; + description + "One-way minimum delay or latency."; + } + leaf one-way-min-delay-normality { + type te-types:performance-metrics-normality; + default "normal"; + description + "One-way minimum delay or latency normality."; + } + leaf one-way-max-delay { + type uint32 { + range "0..16777215"; + } + units "microseconds"; + description + "One-way maximum delay or latency."; + } + leaf one-way-max-delay-normality { + type te-types:performance-metrics-normality; + default "normal"; + description + "One-way maximum delay or latency normality."; + } + leaf one-way-delay-variation { + type uint32 { + range "0..16777215"; + } + units "microseconds"; + description + "One-way delay variation."; + reference + "RFC 5481: Packet Delay Variation Applicability + Statement, Section 4.2"; + } + leaf one-way-delay-variation-normality { + type te-types:performance-metrics-normality; + default "normal"; + description + "One-way delay variation normality."; + reference + "RFC 7471: OSPF Traffic Engineering (TE) Metric + Extensions + RFC 8570: IS-IS Traffic Engineering (TE) Metric + Extensions + RFC 7823: Performance-Based Path Selection for + Explicitly Routed Label Switched Paths (LSPs) + Using TE Metric Extensions"; + } + leaf one-way-packet-loss { + type decimal64 { + fraction-digits 6; + range "0..50.331642"; + } + units "%"; + description + "One-way packet loss as a percentage of the total traffic + sent over a configurable interval. + + The finest precision is 0.000003%."; + reference + "RFC 8570: IS-IS Traffic Engineering (TE) Metric + Extensions, Section 4.4"; + } + leaf one-way-packet-loss-normality { + type te-types:performance-metrics-normality; + default "normal"; + description + "Packet loss normality."; + reference + "RFC 7471: OSPF Traffic Engineering (TE) Metric + Extensions + RFC 8570: IS-IS Traffic Engineering (TE) Metric + Extensions + RFC 7823: Performance-Based Path Selection for + Explicitly Routed Label Switched Paths (LSPs) + Using TE Metric Extensions"; + } + } + augment "performance-metrics-two-way" { + description + "Performance Metrics (PM) two-way packet-specific + augmentation for a generic PM grouping."; + reference + "RFC 7471: OSPF Traffic Engineering (TE) Metric Extensions + RFC 8570: IS-IS Traffic Engineering (TE) Metric Extensions + RFC 7823: Performance-Based Path Selection for Explicitly + Routed Label Switched Paths (LSPs) Using TE + Metric Extensions"; + leaf two-way-min-delay { + type uint32 { + range "0..16777215"; + } + units "microseconds"; + default "0"; + description + "Two-way minimum delay or latency."; + } + leaf two-way-min-delay-normality { + type te-types:performance-metrics-normality; + default "normal"; + description + "Two-way minimum delay or latency normality."; + reference + "RFC 7471: OSPF Traffic Engineering (TE) Metric + Extensions + RFC 8570: IS-IS Traffic Engineering (TE) Metric + Extensions + RFC 7823: Performance-Based Path Selection for + Explicitly Routed Label Switched Paths (LSPs) + Using TE Metric Extensions"; + } + leaf two-way-max-delay { + type uint32 { + range "0..16777215"; + } + units "microseconds"; + default "0"; + description + "Two-way maximum delay or latency."; + } + leaf two-way-max-delay-normality { + type te-types:performance-metrics-normality; + default "normal"; + description + "Two-way maximum delay or latency normality."; + reference + "RFC 7471: OSPF Traffic Engineering (TE) Metric + Extensions + RFC 8570: IS-IS Traffic Engineering (TE) Metric + Extensions + RFC 7823: Performance-Based Path Selection for + Explicitly Routed Label Switched Paths (LSPs) + Using TE Metric Extensions"; + } + leaf two-way-delay-variation { + type uint32 { + range "0..16777215"; + } + units "microseconds"; + default "0"; + description + "Two-way delay variation."; + reference + "RFC 5481: Packet Delay Variation Applicability + Statement, Section 4.2"; + } + leaf two-way-delay-variation-normality { + type te-types:performance-metrics-normality; + default "normal"; + description + "Two-way delay variation normality."; + reference + "RFC 7471: OSPF Traffic Engineering (TE) Metric + Extensions + RFC 8570: IS-IS Traffic Engineering (TE) Metric + Extensions + RFC 7823: Performance-Based Path Selection for + Explicitly Routed Label Switched Paths (LSPs) + Using TE Metric Extensions"; + } + leaf two-way-packet-loss { + type decimal64 { + fraction-digits 6; + range "0..50.331642"; + } + units "%"; + default "0"; + description + "Two-way packet loss as a percentage of the total traffic + sent over a configurable interval. + + The finest precision is 0.000003%."; + } + leaf two-way-packet-loss-normality { + type te-types:performance-metrics-normality; + default "normal"; + description + "Two-way packet loss normality."; + } + } + } + } + + grouping one-way-performance-metrics-packet { + description + "One-way packet Performance Metrics (PM) throttle grouping."; + leaf one-way-min-delay { + type uint32 { + range "0..16777215"; + } + units "microseconds"; + default "0"; + description + "One-way minimum delay or latency."; + } + leaf one-way-max-delay { + type uint32 { + range "0..16777215"; + } + units "microseconds"; + default "0"; + description + "One-way maximum delay or latency."; + } + leaf one-way-delay-variation { + type uint32 { + range "0..16777215"; + } + units "microseconds"; + default "0"; + description + "One-way delay variation."; + } + leaf one-way-packet-loss { + type decimal64 { + fraction-digits 6; + range "0..50.331642"; + } + units "%"; + default "0"; + description + "One-way packet loss as a percentage of the total traffic + sent over a configurable interval. + + The finest precision is 0.000003%."; + } + } + + grouping one-way-performance-metrics-gauge-packet { + description + "One-way packet Performance Metrics (PM) throttle grouping. + + This grouping is used to report the same metrics defined in + the one-way-performance-metrics-packet grouping, using gauges + instead of uint32 data types and referencing IPPM RFCs + instead of IGP-TE RFCs."; + leaf one-way-min-delay { + type yang:gauge64; + units "microseconds"; + description + "One-way minimum delay or latency."; + } + leaf one-way-max-delay { + type yang:gauge64; + units "microseconds"; + description + "One-way maximum delay or latency."; + reference + "RFC 7679: A One-Way Delay Metric for IP Performance + Metrics (IPPM)"; + } + leaf one-way-delay-variation { + type yang:gauge64; + units "microseconds"; + description + "One-way delay variation."; + reference + "RFC 3393: IP Packet Delay Variation Metric for IP + Performance Metrics (IPPM)"; + } + leaf one-way-packet-loss { + type decimal64 { + fraction-digits 5; + range "0..100"; + } + description + "The ratio of packets dropped to packets transmitted between + two endpoints."; + reference + "RFC 7680: A One-Way Loss Metric for IP Performance + Metrics (IPPM)"; + } + } + + grouping two-way-performance-metrics-packet { + description + "Two-way packet Performance Metrics (PM) throttle grouping."; + leaf two-way-min-delay { + type uint32 { + range "0..16777215"; + } + units "microseconds"; + default "0"; + description + "Two-way minimum delay or latency."; + } + leaf two-way-max-delay { + type uint32 { + range "0..16777215"; + } + units "microseconds"; + default "0"; + description + "Two-way maximum delay or latency."; + } + leaf two-way-delay-variation { + type uint32 { + range "0..16777215"; + } + units "microseconds"; + default "0"; + description + "Two-way delay variation."; + } + leaf two-way-packet-loss { + type decimal64 { + fraction-digits 6; + range "0..50.331642"; + } + units "%"; + default "0"; + description + "Two-way packet loss as a percentage of the total traffic + sent over a configurable interval. + + The finest precision is 0.000003%."; + } + } + + grouping two-way-performance-metrics-gauge-packet { + description + "Two-way packet Performance Metrics (PM) throttle grouping. + + This grouping is used to report the same metrics defined in + the two-way-performance-metrics-packet grouping, using gauges + instead of uint32 data types and referencing IPPM RFCs + instead of IGP-TE RFCs."; + leaf two-way-min-delay { + type yang:gauge64; + units "microseconds"; + description + "Two-way minimum delay or latency."; + reference + "RFC 2681: A Round-trip Delay Metric for IPPM"; + } + leaf two-way-max-delay { + type yang:gauge64; + units "microseconds"; + description + "Two-way maximum delay or latency."; + reference + "RFC 2681: A Round-trip Delay Metric for IPPM"; + } + leaf two-way-delay-variation { + type yang:gauge64; + units "microseconds"; + description + "Two-way delay variation."; + reference + "RFC 5481: Packet Delay Variation Applicability Statement"; + } + leaf two-way-packet-loss { + type decimal64 { + fraction-digits 5; + range "0..100"; + } + description + "The ratio of packets dropped to packets transmitted between + two endpoints."; + } + } + + grouping performance-metrics-throttle-container-packet { + description + "Packet Performance Metrics (PM) threshold grouping."; + uses te-types:performance-metrics-throttle-container { + augment "throttle/threshold-out" { + description + "Performance Metrics (PM) threshold-out packet + augmentation for a generic grouping."; + uses one-way-performance-metrics-packet; + uses two-way-performance-metrics-packet; + } + augment "throttle/threshold-in" { + description + "Performance Metrics (PM) threshold-in packet augmentation + for a generic grouping."; + uses one-way-performance-metrics-packet; + uses two-way-performance-metrics-packet; + } + augment "throttle/threshold-accelerated-advertisement" { + description + "Performance Metrics (PM) accelerated advertisement packet + augmentation for a generic grouping."; + uses one-way-performance-metrics-packet; + uses two-way-performance-metrics-packet; + } + } + } + + grouping bandwidth-profile-parameters { + description + "Common parameters to define bandwidth profiles in packet + networks."; + leaf cir { + type uint64; + units "bits per second"; + description + "Committed Information Rate (CIR)."; + } + leaf cbs { + type uint64; + units "bytes"; + description + "Committed Burst Size (CBS)."; + } + leaf eir { + type uint64; + units "bits per second"; + description + "Excess Information Rate (EIR)."; + } + leaf ebs { + type uint64; + units "bytes"; + description + "Excess Burst Size (EBS)."; + } + leaf pir { + type uint64; + units "bits per second"; + description + "Peak Information Rate (PIR)."; + } + leaf pbs { + type uint64; + units "bytes"; + description + "Peak Burst Size (PBS)."; + } + } + + grouping te-packet-path-bandwidth { + description + "Bandwidth attributes for TE Packet paths."; + container packet-bandwidth { + description + "Bandwidth attributes for TE Packet paths."; + leaf specification-type { + type te-bandwidth-requested-type; + description + "The bandwidth specification type, either explicitly + specified or automatically computed."; + } + leaf set-bandwidth { + when "../specification-type = 'specified-value'" { + description + "When the bandwidth value is explicitly specified."; + } + type bandwidth-kbps; + description + "Set the bandwidth value explicitly, e.g., using offline + calculation."; + } + container bandwidth-profile { + when "../specification-type = 'specified-profile'" { + description + "When the bandwidth profile is explicitly specified."; + } + description + "Set the bandwidth profile attributes explicitly."; + leaf bandwidth-profile-name { + type string; + description + "Name of Bandwidth Profile."; + } + leaf bandwidth-profile-type { + type identityref { + base bandwidth-profile-type; + } + description + "Type of Bandwidth Profile."; + } + uses bandwidth-profile-parameters; + } + leaf class-type { + type te-types:te-ds-class; + description + "The Class-Type of traffic transported by the LSP."; + reference + "RFC 4124: Protocol Extensions for Support of + Diffserv-aware MPLS Traffic Engineering, + Section 4.3.1"; + } + leaf signaled-bandwidth { + type te-packet-types:bandwidth-kbps; + config false; + description + "The currently signaled bandwidth of the LSP. + + In the case where the bandwidth is specified + explicitly, then this will match the value of the + set-bandwidth leaf. + + In the cases where the bandwidth is dynamically + computed by the system, the current value of the + bandwidth should be reflected."; + } + } + } + + grouping te-packet-link-bandwidth { + description + "Bandwidth attributes for Packet TE links."; + leaf packet-bandwidth { + type uint64; + units "bits per second"; + description + "Bandwidth value for Packet TE links."; + } + } +} \ No newline at end of file diff --git a/src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-teas-rfc8776-update-18/ietf-te-types.yang b/src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-teas-rfc8776-update-18/ietf-te-types.yang new file mode 100644 index 000000000..aef9434ed --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-teas-rfc8776-update-18/ietf-te-types.yang @@ -0,0 +1,4473 @@ +module ietf-te-types { + yang-version 1.1; + namespace "urn:ietf:params:xml:ns:yang:ietf-te-types"; + prefix te-types; + + import ietf-inet-types { + prefix inet; + reference + "RFC 6991: Common YANG Data Types"; + } + import ietf-yang-types { + prefix yang; + reference + "RFC 6991: Common YANG Data Types"; + } + import ietf-routing-types { + prefix rt-types; + reference + "RFC 8294: Common YANG Data Types for the Routing Area"; + } + import ietf-network { + prefix nw; + reference + "RFC 8345: A YANG Data Model for Network Topologies"; + } + import ietf-network-topology { + prefix nt; + reference + "RFC 8345: A YANG Data Model for Network Topologies"; + } + + organization + "IETF Traffic Engineering Architecture and Signaling (TEAS) + Working Group"; + contact + "WG Web: + WG List: + + Editor: Tarek Saad + + + Editor: Rakesh Gandhi + + + Editor: Vishnu Pavan Beeram + + + Editor: Xufeng Liu + + + Editor: Igor Bryskin + "; + description + "This YANG module contains a collection of generally useful + YANG data type definitions specific to TE. + + The model conforms to the Network Management Datastore + Architecture (NMDA). + + The key words 'MUST', 'MUST NOT', 'REQUIRED', 'SHALL', 'SHALL + NOT', 'SHOULD', 'SHOULD NOT', 'RECOMMENDED', 'NOT RECOMMENDED', + 'MAY', and 'OPTIONAL' in this document are to be interpreted as + described in BCP 14 (RFC 2119) (RFC 8174) when, and only when, + they appear in all capitals, as shown here. + + Copyright (c) 2025 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject to + the license terms contained in, the Revised BSD License set + forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC XXXX + (https://www.rfc-editor.org/info/rfcXXXX); see the RFC itself + for full legal notices."; + + revision 2025-01-24 { + description + "This revision adds the following new identities: + - lsp-provisioning-error-reason; + - association-type-diversity; + - tunnel-admin-state-auto; + - lsp-restoration-restore-none; + - restoration-scheme-rerouting; + - path-metric-optimization-type; + - link-path-metric-type; + - link-metric-type and its derived identities; + - path-computation-error-reason and its derived identities; + - protocol-origin-type and its derived identities; + - svec-objective-function-type and its derived identities; + - svec-metric-type and its derived identities. + + This revision adds the following new data types: + - path-type. + + This revision adds the following new groupings: + - explicit-route-hop-with-srlg; + - encoding-and-switching-type; + - te-generic-node-id. + + This revision updates the following identities: + - objective-function-type; + - action-exercise; + - path-metric-type; + - path-metric-te; + - path-metric-igp; + - path-metric-hop; + - path-metric-delay-average; + - path-metric-delay-minimum; + - path-metric-residual-bandwidth; + - path-metric-optimize-includes; + - path-metric-optimize-excludes; + - te-optimization-criterion. + + This revision updates the following data types: + - te-node-id. + + This revision updates the following groupings: + - explicit-route-hop: + - adds the following leaves: + - node-id-uri; + - link-tp-id-uri; + - updates the following leaves: + - node-id; + - link-tp-id; + - record-route-state: + - adds the following leaves: + - node-id-uri; + - link-tp-id-uri; + - updates the following leaves: + - node-id; + - link-tp-id; + - optimization-metric-entry: + - updates the following leaves: + - metric-type; + - tunnel-constraints; + - adds the following leaves: + - network-id; + - path-constraints-route-objects: + - updates the following containers: + - explicit-route-objects-always; + - generic-path-metric-bounds: + - updates the following leaves: + - metric-type; + - generic-path-optimization + - adds the following leaves: + - tiebreaker; + - deprecate the following containers: + - tiebreakers. + + This revision obsoletes the following identities: + - of-minimize-agg-bandwidth-consumption; + - of-minimize-load-most-loaded-link; + - of-minimize-cost-path-set; + - lsp-protection-reroute-extra; + - lsp-protection-reroute. + + This revision provides also few editorial changes."; + reference + "RFC XXXX: Common YANG Data Types for Traffic Engineering"; + } + + // RFC Editor: replace XXXX with actual RFC number, update date + // information and remove this note + + revision 2020-06-10 { + description + "Initial Version of TE types."; + reference + "RFC 8776: Common YANG Data Types for Traffic Engineering"; + } + + /* + * Features + */ + + feature p2mp-te { + description + "Indicates support for Point-to-Multipoint TE (P2MP-TE)."; + reference + "RFC 4875: Extensions to Resource Reservation Protocol - + Traffic Engineering (RSVP-TE) for + Point-to-Multipoint TE Label Switched Paths (LSPs)"; + } + + feature frr-te { + description + "Indicates support for TE Fast Reroute (FRR)."; + reference + "RFC 4090: Fast Reroute Extensions to RSVP-TE for LSP Tunnels"; + } + + feature extended-admin-groups { + description + "Indicates support for TE link extended administrative + groups."; + reference + "RFC 7308: Extended Administrative Groups in MPLS Traffic + Engineering (MPLS-TE)"; + } + + feature named-path-affinities { + description + "Indicates support for named path affinities."; + } + + feature named-extended-admin-groups { + description + "Indicates support for named extended administrative groups."; + } + + feature named-srlg-groups { + description + "Indicates support for named Shared Risk Link Group (SRLG)."; + } + + feature named-path-constraints { + description + "Indicates support for named path constraints."; + } + + feature path-optimization-metric { + description + "Indicates support for path optimization metrics."; + } + + feature path-optimization-objective-function { + description + "Indicates support for path optimization objective functions."; + } + + /* + * Identities + */ + + identity lsp-provisioning-error-reason { + description + "Base identity for LSP provisioning errors."; + } + + identity session-attributes-flags { + description + "Base identity for the RSVP-TE session attributes flags."; + } + + identity local-protection-desired { + base session-attributes-flags; + description + "Local protection is desired."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels, + Section 4.7.1"; + } + + identity se-style-desired { + base session-attributes-flags; + description + "Shared explicit style, to allow the LSP to be established + and share resources with the old LSP."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels"; + } + + identity local-recording-desired { + base session-attributes-flags; + description + "Label recording is desired."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels, + Section 4.7.1"; + } + + identity bandwidth-protection-desired { + base session-attributes-flags; + description + "Requests FRR bandwidth protection on LSRs, if present."; + reference + "RFC 4090: Fast Reroute Extensions to RSVP-TE for LSP + Tunnels"; + } + + identity node-protection-desired { + base session-attributes-flags; + description + "Requests FRR node protection on LSRs, if present."; + reference + "RFC 4090: Fast Reroute Extensions to RSVP-TE for LSP + Tunnels"; + } + + identity path-reevaluation-request { + base session-attributes-flags; + description + "This flag indicates that a path re-evaluation (of the + current path in use) is requested. + + Note that this does not trigger any LSP reroutes but + instead just signals a request to evaluate whether a + preferable path exists."; + reference + "RFC 4736: Reoptimization of Multiprotocol Label Switching + (MPLS) Traffic Engineering (TE) Loosely Routed + Label Switched Path (LSP)"; + } + + identity soft-preemption-desired { + base session-attributes-flags; + description + "Soft preemption of LSP resources is desired."; + reference + "RFC 5712: MPLS Traffic Engineering Soft Preemption"; + } + + identity lsp-attributes-flags { + description + "Base identity for LSP attributes flags."; + } + + identity end-to-end-rerouting-desired { + base lsp-attributes-flags; + description + "Indicates end-to-end rerouting behavior for an LSP + undergoing establishment. + + This MAY also be used to specify the behavior of end-to-end + LSP recovery for established LSPs."; + reference + "RFC 4920: Crankback Signaling Extensions for MPLS and GMPLS + RSVP-TE + RFC 5420: Encoding of Attributes for MPLS LSP Establishment + Using Resource Reservation Protocol Traffic + Engineering (RSVP-TE) + RFC 7570: Label Switched Path (LSP) Attribute in the + Explicit Route Object (ERO)"; + } + + identity boundary-rerouting-desired { + base lsp-attributes-flags; + description + "Indicates boundary rerouting behavior for an LSP undergoing + establishment. + + This MAY also be used to specify segment-based LSP recovery + through nested crankback for established LSPs. + + The boundary Area Border Router (ABR) / Autonomous System + Border Router (ASBR) can decide to forward the PathErr + message upstream to either an upstream boundary ABR/ASBR or + the ingress LSR. + + Alternatively, it can try to select another egress boundary + LSR."; + reference + "RFC 4920: Crankback Signaling Extensions for MPLS and GMPLS + RSVP-TE + RFC 5420: Encoding of Attributes for MPLS LSP Establishment + Using Resource Reservation Protocol Traffic + Engineering (RSVP-TE) + RFC 7570: Label Switched Path (LSP) Attribute in the + Explicit Route Object (ERO)"; + } + + identity segment-based-rerouting-desired { + base lsp-attributes-flags; + description + "Indicates segment-based rerouting behavior for an LSP + undergoing establishment. + + This MAY also be used to specify segment-based LSP recovery + for established LSPs."; + reference + "RFC 4920: Crankback Signaling Extensions for MPLS and GMPLS + RSVP-TE + RFC 5420: Encoding of Attributes for MPLS LSP Establishment + Using Resource Reservation Protocol + Traffic Engineering (RSVP-TE) + RFC 7570: Label Switched Path (LSP) Attribute in the + Explicit Route Object (ERO)"; + } + + identity lsp-integrity-required { + base lsp-attributes-flags; + description + "Indicates that LSP integrity is required."; + reference + "RFC 4875: Extensions to Resource Reservation Protocol - + Traffic Engineering (RSVP-TE) for + Point-to-Multipoint TE Label Switched Paths (LSPs) + RFC 7570: Label Switched Path (LSP) Attribute in the + Explicit Route Object (ERO)"; + } + + identity contiguous-lsp-desired { + base lsp-attributes-flags; + description + "Indicates that a contiguous LSP is desired."; + reference + "RFC 5151: Inter-Domain MPLS and GMPLS Traffic Engineering -- + Resource Reservation Protocol-Traffic Engineering + (RSVP-TE) Extensions + RFC 7570: Label Switched Path (LSP) Attribute in the + Explicit Route Object (ERO)"; + } + + identity lsp-stitching-desired { + base lsp-attributes-flags; + description + "Indicates that LSP stitching is desired."; + reference + "RFC 5150: Label Switched Path Stitching with Generalized + Multiprotocol Label Switching Traffic Engineering + (GMPLS TE) + RFC 7570: Label Switched Path (LSP) Attribute in the + Explicit Route Object (ERO)"; + } + + identity pre-planned-lsp-flag { + base lsp-attributes-flags; + description + "Indicates that the LSP MUST be provisioned in the + control plane only."; + reference + "RFC 6001: Generalized MPLS (GMPLS) Protocol Extensions for + Multi-Layer and Multi-Region Networks (MLN/MRN) + RFC 7570: Label Switched Path (LSP) Attribute in the + Explicit Route Object (ERO)"; + } + + identity non-php-behavior-flag { + base lsp-attributes-flags; + description + "Indicates that non-PHP (non-Penultimate Hop Popping) + behavior for the LSP is desired."; + reference + "RFC 6511: Non-Penultimate Hop Popping Behavior and + Out-of-Band Mapping for RSVP-TE Label Switched + Paths + RFC 7570: Label Switched Path (LSP) Attribute in the + Explicit Route Object (ERO)"; + } + + identity oob-mapping-flag { + base lsp-attributes-flags; + description + "Indicates that signaling of the egress binding information + is out of band (e.g., via the Border Gateway Protocol + (BGP))."; + reference + "RFC 6511: Non-Penultimate Hop Popping Behavior and + Out-of-Band Mapping for RSVP-TE Label Switched + Paths + RFC 7570: Label Switched Path (LSP) Attribute in the + Explicit Route Object (ERO)"; + } + + identity entropy-label-capability { + base lsp-attributes-flags; + description + "Indicates entropy label capability."; + reference + "RFC 6790: The Use of Entropy Labels in MPLS Forwarding + RFC 7570: Label Switched Path (LSP) Attribute in the + Explicit Route Object (ERO)"; + } + + identity oam-mep-entity-desired { + base lsp-attributes-flags; + description + "OAM Maintenance Entity Group End Point (MEP) entities + desired."; + reference + "RFC 7260: GMPLS RSVP-TE Extensions for Operations, + Administration, and Maintenance (OAM) + Configuration"; + } + + identity oam-mip-entity-desired { + base lsp-attributes-flags; + description + "OAM Maintenance Entity Group Intermediate Points (MIP) + entities desired."; + reference + "RFC 7260: GMPLS RSVP-TE Extensions for Operations, + Administration, and Maintenance (OAM) + Configuration"; + } + + identity srlg-collection-desired { + base lsp-attributes-flags; + description + "Shared Risk Link Group (SRLG) collection desired."; + reference + "RFC 7570: Label Switched Path (LSP) Attribute in the + Explicit Route Object (ERO) + RFC 8001: RSVP-TE Extensions for Collecting Shared Risk + Link Group (SRLG) Information"; + } + + identity loopback-desired { + base lsp-attributes-flags; + description + "This flag indicates that a particular node on the LSP is + required to enter loopback mode. + + This can also be used to specify the loopback state of the + node."; + reference + "RFC 7571: GMPLS RSVP-TE Extensions for Lock Instruct and + Loopback"; + } + + identity p2mp-te-tree-eval-request { + base lsp-attributes-flags; + description + "P2MP-TE tree re-evaluation request."; + reference + "RFC 8149: RSVP Extensions for Reoptimization of Loosely + Routed Point-to-Multipoint Traffic Engineering + Label Switched Paths (LSPs)"; + } + + identity rtm-set-desired { + base lsp-attributes-flags; + description + "Residence Time Measurement (RTM) attribute flag requested."; + reference + "RFC 8169: Residence Time Measurement in MPLS Networks"; + } + + identity link-protection-type { + description + "Base identity for the link protection type."; + } + + identity link-protection-unprotected { + base link-protection-type; + description + "Unprotected link type."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + } + + identity link-protection-extra-traffic { + base link-protection-type; + description + "Extra-Traffic protected link type."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + } + + identity link-protection-shared { + base link-protection-type; + description + "Shared protected link type."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + } + + identity link-protection-1-for-1 { + base link-protection-type; + description + "One-for-one (1:1) protected link type."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + } + + identity link-protection-1-plus-1 { + base link-protection-type; + description + "One-plus-one (1+1) protected link type."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + } + + identity link-protection-enhanced { + base link-protection-type; + description + "A compound link protection type derived from the underlay + TE tunnel protection configuration supporting the TE link."; + } + + identity association-type { + description + "Base identity for the tunnel association."; + } + + identity association-type-recovery { + base association-type; + description + "Association type for recovery, used to associate LSPs of the + same tunnel for recovery."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery + RFC 6780: RSVP ASSOCIATION Object Extensions"; + } + + identity association-type-resource-sharing { + base association-type; + description + "Association type for resource sharing, used to enable + resource sharing during make-before-break."; + reference + "RFC 4873: GMPLS Segment Recovery + RFC 6780: RSVP ASSOCIATION Object Extensions"; + } + + identity association-type-double-sided-bidir { + base association-type; + description + "Association type for double-sided bidirectional LSPs, + used to associate two LSPs of two tunnels that are + independently configured on either endpoint."; + reference + "RFC 7551: RSVP-TE Extensions for Associated Bidirectional + Label Switched Paths (LSPs)"; + } + + identity association-type-single-sided-bidir { + base association-type; + description + "Association type for single-sided bidirectional LSPs, + used to associate two LSPs of two tunnels, where one + tunnel is configured on one side/endpoint and the other + tunnel is dynamically created on the other endpoint."; + reference + "RFC 6780: RSVP ASSOCIATION Object Extensions + RFC 7551: RSVP-TE Extensions for Associated Bidirectional + Label Switched Paths (LSPs)"; + } + + identity association-type-diversity { + base association-type; + description + "Association Type diversity used to associate LSPs whose + paths are to be diverse from each other."; + reference + "RFC 8800: Path Computation Element Communication Protocol + (PCEP) Extension for Label Switched Path (LSP) + Diversity Constraint Signaling"; + } + + identity objective-function-type { + description + "Base identity for path objective function types."; + } + + identity of-minimize-cost-path { + base objective-function-type; + description + "Objective function for minimizing path cost."; + reference + "RFC 5541: Encoding of Objective Functions in the Path + Computation Element Communication Protocol + (PCEP)"; + } + + identity of-minimize-load-path { + base objective-function-type; + description + "Objective function for minimizing the load on one or more + paths."; + reference + "RFC 5541: Encoding of Objective Functions in the Path + Computation Element Communication Protocol + (PCEP)"; + } + + identity of-maximize-residual-bandwidth { + base objective-function-type; + description + "Objective function for maximizing residual bandwidth."; + reference + "RFC 5541: Encoding of Objective Functions in the Path + Computation Element Communication Protocol + (PCEP)"; + } + + identity of-minimize-agg-bandwidth-consumption { + base objective-function-type; + status obsolete; + description + "Objective function for minimizing aggregate bandwidth + consumption. + + This identity has been obsoleted: the + 'svec-of-minimize-agg-bandwidth-consumption' identity SHOULD + be used instead."; + reference + "RFC 5541: Encoding of Objective Functions in the Path + Computation Element Communication Protocol + (PCEP)"; + } + + identity of-minimize-load-most-loaded-link { + base objective-function-type; + status obsolete; + description + "Objective function for minimizing the load on the link that + is carrying the highest load. + + This identity has been obsoleted: the + 'svec-of-minimize-load-most-loaded-link' identity SHOULD + be used instead."; + reference + "RFC 5541: Encoding of Objective Functions in the Path + Computation Element Communication Protocol + (PCEP)"; + } + + identity of-minimize-cost-path-set { + base objective-function-type; + status obsolete; + description + "Objective function for minimizing the cost on a path set. + + This identity has been obsoleted: the + 'svec-of-minimize-cost-path-set' identity SHOULD + be used instead."; + reference + "RFC 5541: Encoding of Objective Functions in the Path + Computation Element Communication Protocol + (PCEP)"; + } + + identity path-computation-method { + description + "Base identity for supported path computation mechanisms."; + } + + identity path-locally-computed { + base path-computation-method; + description + "Indicates a constrained-path LSP in which the + path is computed by the local LER."; + reference + "RFC 9522: Overview and Principles of Internet Traffic + Engineering, Section 4.4"; + } + + identity path-externally-queried { + base path-computation-method; + description + "Constrained-path LSP in which the path is obtained by + querying an external source, such as a PCE server. + In the case that an LSP is defined to be externally queried, + it may also have associated explicit definitions (provided + to the external source to aid computation). + + The path that is returned by the external source may + require further local computation on the device."; + reference + "RFC 9522: Overview and Principles of Internet Traffic + Engineering + RFC 4657: Path Computation Element (PCE) Communication + Protocol Generic Requirements"; + } + + identity path-explicitly-defined { + base path-computation-method; + description + "Constrained-path LSP in which the path is + explicitly specified as a collection of strict and/or loose + hops."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels + RFC 9522: Overview and Principles of Internet Traffic + Engineering"; + } + + identity lsp-metric-type { + description + "Base identity for the LSP metric specification types."; + } + + identity lsp-metric-relative { + base lsp-metric-type; + description + "The metric specified for the LSPs to which this identity + refers is specified as a value relative to the IGP metric + cost to the LSP's tail end."; + reference + "RFC 4657: Path Computation Element (PCE) Communication + Protocol Generic Requirements"; + } + + identity lsp-metric-absolute { + base lsp-metric-type; + description + "The metric specified for the LSPs to which this identity + refers is specified as an absolute value."; + reference + "RFC 4657: Path Computation Element (PCE) Communication + Protocol Generic Requirements"; + } + + identity lsp-metric-inherited { + base lsp-metric-type; + description + "The metric for the LSPs to which this identity refers is + not specified explicitly; rather, it is directly inherited + from the IGP cost."; + reference + "RFC 4657: Path Computation Element (PCE) Communication + Protocol Generic Requirements"; + } + + identity te-tunnel-type { + description + "Base identity from which specific tunnel types are derived."; + } + + identity te-tunnel-p2p { + base te-tunnel-type; + description + "TE Point-to-Point (P2P) tunnel type."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels"; + } + + identity te-tunnel-p2mp { + base te-tunnel-type; + description + "TE P2MP tunnel type."; + reference + "RFC 4875: Extensions to Resource Reservation Protocol - + Traffic Engineering (RSVP-TE) for + Point-to-Multipoint TE Label Switched Paths + (LSPs)"; + } + + identity tunnel-action-type { + description + "Base identity from which specific tunnel action types + are derived."; + } + + identity tunnel-action-resetup { + base tunnel-action-type; + description + "TE tunnel action that tears down the tunnel's current LSP + (if any) and attempts to re-establish a new LSP."; + } + + identity tunnel-action-reoptimize { + base tunnel-action-type; + description + "TE tunnel action that reoptimizes the placement of the + tunnel LSP(s)."; + } + + identity tunnel-action-switchpath { + base tunnel-action-type; + description + "TE tunnel action that switches the tunnel's LSP to use the + specified path."; + } + + identity te-action-result { + description + "Base identity from which specific TE action results + are derived."; + } + + identity te-action-success { + base te-action-result; + description + "TE action was successful."; + } + + identity te-action-fail { + base te-action-result; + description + "TE action failed."; + } + + identity tunnel-action-inprogress { + base te-action-result; + description + "TE action is in progress."; + } + + identity tunnel-admin-state-type { + description + "Base identity for TE tunnel administrative states."; + } + + identity tunnel-admin-state-up { + base tunnel-admin-state-type; + description + "Tunnel's administrative state is up."; + } + + identity tunnel-admin-state-down { + base tunnel-admin-state-type; + description + "Tunnel's administrative state is down."; + } + + identity tunnel-admin-state-auto { + base tunnel-admin-state-type; + description + "Tunnel administrative auto state. The administrative status + in state datastore transitions to 'tunnel-admin-up' when the + tunnel used by the client layer, and to 'tunnel-admin-down' + when it is not used by the client layer."; + } + + identity tunnel-state-type { + description + "Base identity for TE tunnel states."; + } + + identity tunnel-state-up { + base tunnel-state-type; + description + "Tunnel's state is up."; + } + + identity tunnel-state-down { + base tunnel-state-type; + description + "Tunnel's state is down."; + } + + identity lsp-state-type { + description + "Base identity for TE LSP states."; + } + + identity lsp-path-computing { + base lsp-state-type; + description + "State path computation is in progress."; + } + + identity lsp-path-computation-ok { + base lsp-state-type; + description + "State path computation was successful."; + } + + identity lsp-path-computation-failed { + base lsp-state-type; + description + "State path computation failed."; + } + + identity lsp-state-setting-up { + base lsp-state-type; + description + "State is being set up."; + } + + identity lsp-state-setup-ok { + base lsp-state-type; + description + "State setup was successful."; + } + + identity lsp-state-setup-failed { + base lsp-state-type; + description + "State setup failed."; + } + + identity lsp-state-up { + base lsp-state-type; + description + "State is up."; + } + + identity lsp-state-tearing-down { + base lsp-state-type; + description + "State is being torn down."; + } + + identity lsp-state-down { + base lsp-state-type; + description + "State is down."; + } + + identity path-invalidation-action-type { + description + "Base identity for TE path invalidation action types."; + } + + identity path-invalidation-action-drop { + base path-invalidation-action-type; + description + "Upon invalidation of the TE tunnel path, the tunnel remains + valid, but any packet mapped over the tunnel is dropped."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels, + Section 2.5"; + } + + identity path-invalidation-action-teardown { + base path-invalidation-action-type; + description + "TE path invalidation action teardown."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels, + Section 2.5"; + } + + identity lsp-restoration-type { + description + "Base identity from which LSP restoration types are derived."; + } + + identity lsp-restoration-restore-none { + base lsp-restoration-type; + description + "No LSP affected by a failure is restored."; + } + + identity lsp-restoration-restore-any { + base lsp-restoration-type; + description + "Any LSP affected by a failure is restored."; + } + + identity lsp-restoration-restore-all { + base lsp-restoration-type; + description + "Affected LSPs are restored after all LSPs of the tunnel are + broken."; + } + + identity restoration-scheme-type { + description + "Base identity for LSP restoration schemes."; + } + + identity restoration-scheme-rerouting { + base restoration-scheme-type; + description + "Restoration LSP is computed, signalled and configured after + the failure detection. + + This restoration scheme is also known as + 'Full LSP Re-routing', with the alternate route being + computed after the failure occurs."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + } + + identity restoration-scheme-preconfigured { + base restoration-scheme-type; + description + "Restoration LSP is precomputed, presignalled and + preconfigured prior to the failure."; + } + + identity restoration-scheme-precomputed { + base restoration-scheme-type; + description + "Restoration LSP is precomputed, but not presignalled nor + preconfigured, prior to the failure. + + This restoration scheme is also known as + 'Full LSP Re-routing', with the alternate route being + pre-computed and stored for use when the failure occurs."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + } + + identity restoration-scheme-presignaled { + base restoration-scheme-type; + description + "Restoration LSP is presignaled, but not preconfigured, + prior to the failure. + + This restoration scheme is also known as + 'Pre-planned LSP Re-routing'."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + } + + identity lsp-protection-type { + description + "Base identity from which LSP protection types are derived."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + } + + identity lsp-protection-unprotected { + base lsp-protection-type; + description + "'Unprotected' LSP protection type."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + } + + identity lsp-protection-reroute-extra { + base lsp-protection-type; + status obsolete; + description + "'(Full) Rerouting' LSP protection type. + + This identity has been obsoleted: the + 'restoration-scheme-rerouting' identity SHOULD be used + instead."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + } + + identity lsp-protection-reroute { + base lsp-protection-type; + status obsolete; + description + "'Rerouting without Extra-Traffic' LSP protection type. + + This identity has been obsoleted: the + 'restoration-scheme-rerouting' identity SHOULD be used + instead."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + } + + identity lsp-protection-1-for-n { + base lsp-protection-type; + description + "'1:N Protection with Extra-Traffic' LSP protection type."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + } + + identity lsp-protection-1-for-1 { + base lsp-protection-type; + description + "LSP protection '1:1 Protection Type'."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + } + + identity lsp-protection-unidir-1-plus-1 { + base lsp-protection-type; + description + "'1+1 Unidirectional Protection' LSP protection type."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + } + + identity lsp-protection-bidir-1-plus-1 { + base lsp-protection-type; + description + "'1+1 Bidirectional Protection' LSP protection type."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + } + + identity lsp-protection-extra-traffic { + base lsp-protection-type; + description + "Extra-Traffic LSP protection type."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + } + + identity lsp-protection-state { + description + "Base identity of protection states for reporting purposes."; + } + + identity normal { + base lsp-protection-state; + description + "Normal state."; + reference + "RFC 6378: MPLS Transport Profile (MPLS-TP) Linear Protection + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + identity signal-fail-of-protection { + base lsp-protection-state; + description + "The protection transport entity has a signal fail condition + that is of higher priority than the forced switchover + command."; + reference + "RFC 6378: MPLS Transport Profile (MPLS-TP) Linear Protection + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + identity lockout-of-protection { + base lsp-protection-state; + description + "A Loss of Protection (LoP) command is active."; + reference + "RFC 6378: MPLS Transport Profile (MPLS-TP) Linear Protection + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + identity forced-switch { + base lsp-protection-state; + description + "A forced switchover command is active."; + reference + "RFC 6378: MPLS Transport Profile (MPLS-TP) Linear Protection + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + identity signal-fail { + base lsp-protection-state; + description + "There is a signal fail condition on either the working path + or the protection path."; + reference + "RFC 6378: MPLS Transport Profile (MPLS-TP) Linear Protection + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + identity signal-degrade { + base lsp-protection-state; + description + "There is a signal degrade condition on either the working + path or the protection path."; + reference + "RFC 6378: MPLS Transport Profile (MPLS-TP) Linear Protection + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + identity manual-switch { + base lsp-protection-state; + description + "A manual switchover command is active."; + reference + "RFC 6378: MPLS Transport Profile (MPLS-TP) Linear Protection + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + identity wait-to-restore { + base lsp-protection-state; + description + "A Wait-to-Restore (WTR) timer is running."; + reference + "RFC 6378: MPLS Transport Profile (MPLS-TP) Linear Protection + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + identity do-not-revert { + base lsp-protection-state; + description + "A Do Not Revert (DNR) condition is active because of + non-revertive behavior."; + reference + "RFC 6378: MPLS Transport Profile (MPLS-TP) Linear Protection + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + identity failure-of-protocol { + base lsp-protection-state; + description + "LSP protection is not working because of a protocol failure + condition."; + reference + "RFC 7271: MPLS Transport Profile (MPLS-TP) Linear Protection + to Match the Operational Expectations of + Synchronous Digital Hierarchy, Optical Transport + Network, and Ethernet Transport Network Operators + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + identity protection-external-commands { + description + "Base identity from which protection-related external commands + used for troubleshooting purposes are derived."; + } + + identity action-freeze { + base protection-external-commands; + description + "A temporary configuration action initiated by an operator + command that prevents any switchover action from being taken + and, as such, freezes the current state."; + reference + "RFC 7271: MPLS Transport Profile (MPLS-TP) Linear Protection + to Match the Operational Expectations of + Synchronous Digital Hierarchy, Optical Transport + Network, and Ethernet Transport Network Operators + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + identity clear-freeze { + base protection-external-commands; + description + "An action that clears the active freeze state."; + reference + "RFC 7271: MPLS Transport Profile (MPLS-TP) Linear Protection + to Match the Operational Expectations of + Synchronous Digital Hierarchy, Optical Transport + Network, and Ethernet Transport Network Operators + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + identity action-lockout-of-normal { + base protection-external-commands; + description + "A temporary configuration action initiated by an operator + command to ensure that the normal traffic is not allowed + to use the protection transport entity."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + identity clear-lockout-of-normal { + base protection-external-commands; + description + "An action that clears the active lockout of the + normal state."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + identity action-lockout-of-protection { + base protection-external-commands; + description + "A temporary configuration action initiated by an operator + command to ensure that the protection transport entity is + temporarily not available to transport a traffic signal + (either normal or Extra-Traffic)."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + identity action-forced-switch { + base protection-external-commands; + description + "A switchover action initiated by an operator command to + switch the Extra-Traffic signal, the normal traffic signal, + or the null signal to the protection transport entity, + unless a switchover command of equal or higher priority is + in effect."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + identity action-manual-switch { + base protection-external-commands; + description + "A switchover action initiated by an operator command to + switch the Extra-Traffic signal, the normal traffic signal, + or the null signal to the protection transport entity, + unless a fault condition exists on other transport entities + or a switchover command of equal or higher priority is in + effect."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + identity action-exercise { + base protection-external-commands; + description + "An action that starts testing whether or not Automatic + Protection Switching (APS) communication is operating + correctly. + + It is of lower priority than any other state or command."; + reference + "RFC 7271: MPLS Transport Profile (MPLS-TP) Linear Protection + to Match the Operational Expectations of + Synchronous Digital Hierarchy, Optical Transport + Network, and Ethernet Transport Network Operators + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + identity clear { + base protection-external-commands; + description + "An action that clears the active near-end lockout of a + protection, forced switchover, manual switchover, + Wait-to-Restore (WTR) state, or exercise command."; + reference + "RFC 6378: MPLS Transport Profile (MPLS-TP) Linear Protection + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + identity switching-capabilities { + description + "Base identity for interface switching capabilities."; + reference + "RFC 3471: Generalized Multi-Protocol Label Switching (GMPLS) + Signaling Functional Description"; + } + + identity switching-psc1 { + base switching-capabilities; + description + "Packet-Switch Capable-1 (PSC-1)."; + reference + "RFC 3471: Generalized Multi-Protocol Label Switching (GMPLS) + Signaling Functional Description"; + } + + identity switching-evpl { + base switching-capabilities; + description + "Ethernet Virtual Private Line (EVPL)."; + reference + "RFC 6004: Generalized MPLS (GMPLS) Support for Metro + Ethernet Forum and G.8011 Ethernet Service + Switching"; + } + + identity switching-l2sc { + base switching-capabilities; + description + "Layer-2 Switch Capable (L2SC)."; + reference + "RFC 3471: Generalized Multi-Protocol Label Switching (GMPLS) + Signaling Functional Description"; + } + + identity switching-tdm { + base switching-capabilities; + description + "Time-Division-Multiplex Capable (TDM)."; + reference + "RFC 3471: Generalized Multi-Protocol Label Switching (GMPLS) + Signaling Functional Description"; + } + + identity switching-otn { + base switching-capabilities; + description + "OTN-TDM capable."; + reference + "RFC 7138: Traffic Engineering Extensions to OSPF for GMPLS + Control of Evolving G.709 Optical Transport + Networks"; + } + + identity switching-dcsc { + base switching-capabilities; + description + "Data Channel Switching Capable (DCSC)."; + reference + "RFC 6002: Generalized MPLS (GMPLS) Data Channel + Switching Capable (DCSC) and Channel Set Label + Extensions"; + } + + identity switching-lsc { + base switching-capabilities; + description + "Lambda-Switch Capable (LSC)."; + reference + "RFC 3471: Generalized Multi-Protocol Label Switching (GMPLS) + Signaling Functional Description"; + } + + identity switching-fsc { + base switching-capabilities; + description + "Fiber-Switch Capable (FSC)."; + reference + "RFC 3471: Generalized Multi-Protocol Label Switching (GMPLS) + Signaling Functional Description"; + } + + identity lsp-encoding-types { + description + "Base identity for encoding types."; + reference + "RFC 3471: Generalized Multi-Protocol Label Switching (GMPLS) + Signaling Functional Description"; + } + + identity lsp-encoding-packet { + base lsp-encoding-types; + description + "Packet LSP encoding."; + reference + "RFC 3471: Generalized Multi-Protocol Label Switching (GMPLS) + Signaling Functional Description"; + } + + identity lsp-encoding-ethernet { + base lsp-encoding-types; + description + "Ethernet LSP encoding."; + reference + "RFC 3471: Generalized Multi-Protocol Label Switching (GMPLS) + Signaling Functional Description"; + } + + identity lsp-encoding-pdh { + base lsp-encoding-types; + description + "ANSI/ETSI PDH LSP encoding."; + reference + "RFC 3471: Generalized Multi-Protocol Label Switching (GMPLS) + Signaling Functional Description"; + } + + identity lsp-encoding-sdh { + base lsp-encoding-types; + description + "SDH ITU-T G.707 / SONET ANSI T1.105 LSP encoding."; + reference + "RFC 3471: Generalized Multi-Protocol Label Switching (GMPLS) + Signaling Functional Description"; + } + + identity lsp-encoding-digital-wrapper { + base lsp-encoding-types; + description + "Digital Wrapper LSP encoding."; + reference + "RFC 3471: Generalized Multi-Protocol Label Switching (GMPLS) + Signaling Functional Description"; + } + + identity lsp-encoding-lambda { + base lsp-encoding-types; + description + "Lambda (photonic) LSP encoding."; + reference + "RFC 3471: Generalized Multi-Protocol Label Switching (GMPLS) + Signaling Functional Description"; + } + + identity lsp-encoding-fiber { + base lsp-encoding-types; + description + "Fiber LSP encoding."; + reference + "RFC 3471: Generalized Multi-Protocol Label Switching (GMPLS) + Signaling Functional Description"; + } + + identity lsp-encoding-fiber-channel { + base lsp-encoding-types; + description + "FiberChannel LSP encoding."; + reference + "RFC 3471: Generalized Multi-Protocol Label Switching (GMPLS) + Signaling Functional Description"; + } + + identity lsp-encoding-oduk { + base lsp-encoding-types; + description + "G.709 ODUk (Digital Path) LSP encoding."; + reference + "RFC 4328: Generalized Multi-Protocol Label Switching (GMPLS) + Signaling Extensions for G.709 Optical Transport + Networks Control"; + } + + identity lsp-encoding-optical-channel { + base lsp-encoding-types; + description + "G.709 Optical Channel LSP encoding."; + reference + "RFC 4328: Generalized Multi-Protocol Label Switching (GMPLS) + Signaling Extensions for G.709 Optical Transport + Networks Control"; + } + + identity lsp-encoding-line { + base lsp-encoding-types; + description + "Line (e.g., 8B/10B) LSP encoding."; + reference + "RFC 6004: Generalized MPLS (GMPLS) Support for Metro + Ethernet Forum and G.8011 Ethernet Service + Switching"; + } + + identity path-signaling-type { + description + "Base identity from which specific LSP path setup types + are derived."; + } + + identity path-setup-static { + base path-signaling-type; + description + "Static LSP provisioning path setup."; + } + + identity path-setup-rsvp { + base path-signaling-type; + description + "RSVP-TE signaling path setup."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels"; + } + + identity path-setup-sr { + base path-signaling-type; + description + "Segment-routing path setup."; + } + + identity path-scope-type { + description + "Base identity from which specific path scope types are + derived."; + } + + identity path-scope-segment { + base path-scope-type; + description + "Path scope segment."; + reference + "RFC 4873: GMPLS Segment Recovery"; + } + + identity path-scope-end-to-end { + base path-scope-type; + description + "Path scope end to end."; + reference + "RFC 4873: GMPLS Segment Recovery"; + } + + identity route-usage-type { + description + "Base identity for route usage."; + } + + identity route-include-object { + base route-usage-type; + description + "'Include route' object."; + } + + identity route-exclude-object { + base route-usage-type; + description + "'Exclude route' object."; + reference + "RFC 4874: Exclude Routes - Extension to Resource ReserVation + Protocol-Traffic Engineering (RSVP-TE)"; + } + + identity route-exclude-srlg { + base route-usage-type; + description + "Excludes Shared Risk Link Groups (SRLGs)."; + reference + "RFC 4874: Exclude Routes - Extension to Resource ReserVation + Protocol-Traffic Engineering (RSVP-TE)"; + } + + identity path-metric-optimization-type { + description + "Base identity used to define the path metric optimization + types."; + } + + identity link-path-metric-type { + description + "Base identity used to define the link and the path metric + types. + + The unit of the path metric value is interpreted in the + context of the path metric type and the derived identities + SHOULD describe the unit of the path metric types they + define."; + } + + identity link-metric-type { + base link-path-metric-type; + description + "Base identity for the link metric types."; + } + + identity link-metric-te { + base link-metric-type; + description + "Traffic Engineering (TE) Link Metric."; + reference + "RFC 3630: Traffic Engineering (TE) Extensions to OSPF + Version 2, Section 2.5.5 + RFC 5305: IS-IS Extensions for Traffic Engineering, + Section 3.7"; + } + + identity link-metric-igp { + base link-metric-type; + description + "Interior Gateway Protocol (IGP) Link Metric."; + reference + "RFC 3785: Use of Interior Gateway Protocol (IGP) Metric + as a second MPLS Traffic Engineering (TE) + Metric"; + } + + identity link-metric-delay-average { + base link-metric-type; + description + "Unidirectional Link Delay, measured in units of + microseconds."; + reference + "RFC 7471: OSPF Traffic Engineering (TE) Metric + Extensions, Section 4.1 + RFC 8570: IS-IS Traffic Engineering (TE) Metric + Extensions, Section 4.1"; + } + + identity link-metric-delay-minimum { + base link-metric-type; + description + "Minimum unidirectional Link Delay, measured in units of + microseconds."; + reference + "RFC 7471: OSPF Traffic Engineering (TE) Metric + Extensions, Section 4.2 + RFC 8570: IS-IS Traffic Engineering (TE) Metric + Extensions, Section 4.2"; + } + + identity link-metric-delay-maximum { + base link-metric-type; + description + "Maximum unidirectional Link Delay, measured in units of + microseconds."; + reference + "RFC 7471: OSPF Traffic Engineering (TE) Metric + Extensions, Section 4.2 + RFC 8570: IS-IS Traffic Engineering (TE) Metric + Extensions, Section 4.2"; + } + + identity link-metric-residual-bandwidth { + base link-metric-type; + description + "Unidirectional Residual Bandwidth, measured in units of + bytes per second. + + It is defined to be Maximum Bandwidth minus the bandwidth + currently allocated to LSPs."; + reference + "RFC 7471: OSPF Traffic Engineering (TE) Metric + Extensions, Section 4.5 + RFC 8570: IS-IS Traffic Engineering (TE) Metric + Extensions, Section 4.5"; + } + + identity path-metric-type { + base link-path-metric-type; + base path-metric-optimization-type; + description + "Base identity for the path metric types."; + } + + identity path-metric-te { + base path-metric-type; + description + "Traffic Engineering (TE) Path Metric."; + reference + "RFC 5440: Path Computation Element (PCE) Communication + Protocol (PCEP), Section 7.8"; + } + + identity path-metric-igp { + base path-metric-type; + description + "Interior Gateway Protocol (IGP) Path Metric."; + reference + "RFC 5440: Path Computation Element (PCE) Communication + Protocol (PCEP), section 7.8"; + } + + identity path-metric-hop { + base path-metric-type; + description + "Hop Count Path Metric."; + reference + "RFC 5440: Path Computation Element (PCE) Communication + Protocol (PCEP), Section 7.8"; + } + + identity path-metric-delay-average { + base path-metric-type; + description + "The Path Delay Metric, measured in units of + microseconds."; + reference + "RFC 8233: Extensions to the Path Computation Element + Communication Protocol (PCEP) to Compute + Service-Aware Label Switched Paths (LSPs), + Section 3.1.1"; + } + + identity path-metric-delay-minimum { + base path-metric-type; + description + "The Path Min Delay Metric, measured in units of + microseconds."; + reference + "I-D.ietf-pce-sid-algo: Carrying SR-Algorithm information + in PCE-based Networks, + draft-ietf-pce-sid-algo-14, + Sections 3.5.1 and 3.5.2"; + } + + identity path-metric-residual-bandwidth { + base path-metric-type; + description + "The Path Residual Bandwidth, defined as the minimum Link + Residual Bandwidth all the links along the path. + + The Path Residual Bandwidth can be seen as the path + metric associated with the Maximum residual Bandwidth Path + (MBP) objective function."; + reference + "RFC 5541: Encoding of Objective Functions in the Path + Computation Element Communication Protocol + (PCEP)"; + } + + identity path-metric-optimize-includes { + base path-metric-optimization-type; + description + "A metric that optimizes the number of included resources + specified in a set."; + } + + identity path-metric-optimize-excludes { + base path-metric-optimization-type; + description + "A metric that optimizes to a maximum the number of excluded + resources specified in a set."; + } + + identity path-tiebreaker-type { + description + "Base identity for the path tiebreaker type."; + } + + identity path-tiebreaker-minfill { + base path-tiebreaker-type; + description + "Min-Fill LSP path placement: selects the path with the most + available bandwidth (load balance LSPs over more links)."; + } + + identity path-tiebreaker-maxfill { + base path-tiebreaker-type; + description + "Max-Fill LSP path placement: selects the path with the least + available bandwidth (packing more LSPs over few links)."; + } + + identity path-tiebreaker-random { + base path-tiebreaker-type; + description + "Random LSP path placement."; + } + + identity resource-affinities-type { + description + "Base identity for resource class affinities."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels + RFC 2702: Requirements for Traffic Engineering Over MPLS"; + } + + identity resource-aff-include-all { + base resource-affinities-type; + description + "The set of attribute filters associated with a + tunnel, all of which must be present for a link + to be acceptable."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels + RFC 2702: Requirements for Traffic Engineering Over MPLS"; + } + + identity resource-aff-include-any { + base resource-affinities-type; + description + "The set of attribute filters associated with a + tunnel, any of which must be present for a link + to be acceptable."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels + RFC 2702: Requirements for Traffic Engineering Over MPLS"; + } + + identity resource-aff-exclude-any { + base resource-affinities-type; + description + "The set of attribute filters associated with a + tunnel, any of which renders a link unacceptable."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels + RFC 2702: Requirements for Traffic Engineering Over MPLS"; + } + + identity te-optimization-criterion { + description + "Base identity for the TE optimization criteria."; + reference + "RFC 9522: Overview and Principles of Internet Traffic + Engineering"; + } + + identity not-optimized { + base te-optimization-criterion; + description + "Optimization is not applied."; + } + + identity cost { + base te-optimization-criterion; + description + "Optimized on cost."; + reference + "RFC 5541: Encoding of Objective Functions in the Path + Computation Element Communication Protocol + (PCEP)"; + } + + identity delay { + base te-optimization-criterion; + description + "Optimized on delay."; + reference + "RFC 5541: Encoding of Objective Functions in the Path + Computation Element Communication Protocol + (PCEP)"; + } + + identity path-computation-srlg-type { + description + "Base identity for Shared Risk Link Group (SRLG) path + computation."; + } + + identity srlg-ignore { + base path-computation-srlg-type; + description + "Ignores Shared Risk Link Groups (SRLGs) in the path + computation."; + } + + identity srlg-strict { + base path-computation-srlg-type; + description + "Includes a strict Shared Risk Link Group (SRLG) check in + the path computation."; + } + + identity srlg-preferred { + base path-computation-srlg-type; + description + "Includes a preferred Shared Risk Link Group (SRLG) check in + the path computation."; + } + + identity srlg-weighted { + base path-computation-srlg-type; + description + "Includes a weighted Shared Risk Link Group (SRLG) check in + the path computation."; + } + + identity path-computation-error-reason { + description + "Base identity for path computation error reasons."; + } + + identity path-computation-error-path-not-found { + base path-computation-error-reason; + description + "Path computation has failed because of an unspecified + reason."; + reference + "RFC 5440: Path Computation Element (PCE) Communication + Protocol (PCEP), Section 7.5"; + } + + identity path-computation-error-no-topology { + base path-computation-error-reason; + description + "Path computation has failed because there is no topology + with the provided topology-identifier."; + } + + identity path-computation-error-no-dependent-server { + base path-computation-error-reason; + description + "Path computation has failed because one or more dependent + path computation servers are unavailable. + + The dependent path computation server could be + a Backward-Recursive Path Computation (BRPC) downstream + PCE or a child PCE."; + reference + "RFC 5441: A Backward-Recursive PCE-Based Computation (BRPC) + Procedure to Compute Shortest Constrained + Inter-Domain Traffic Engineering Label Switched + Paths + RFC 8685: Path Computation Element Communication Protocol + (PCEP) Extensions for the Hierarchical Path + Computation Element (H-PCE) Architecture"; + } + + identity path-computation-error-pce-unavailable { + base path-computation-error-reason; + description + "Path computation has failed because PCE is not available. + + It corresponds to bit 31 of the Flags field of the + NO-PATH-VECTOR TLV."; + reference + "RFC 5440: Path Computation Element (PCE) Communication + Protocol (PCEP) + + https://www.iana.org/assignments/pcep + /pcep.xhtml#no-path-vector-tlv"; + } + + identity path-computation-error-no-inclusion-hop { + base path-computation-error-reason; + description + "Path computation has failed because there is no + node or link provided by one or more inclusion hops."; + } + + identity path-computation-error-destination-unknown-in-domain { + base path-computation-error-reason; + description + "Path computation has failed because the destination node is + unknown in indicated destination domain. + + It corresponds to bit 19 of the Flags field of the + NO-PATH-VECTOR TLV."; + reference + "RFC 8685: Path Computation Element Communication Protocol + (PCEP) Extensions for the Hierarchical Path + Computation Element (H-PCE) Architecture + + https://www.iana.org/assignments/pcep + /pcep.xhtml#no-path-vector-tlv"; + } + + identity path-computation-error-no-resource { + base path-computation-error-reason; + description + "Path computation has failed because there is no + available resource in one or more domains. + + It corresponds to bit 20 of the Flags field of the + NO-PATH-VECTOR TLV."; + reference + "RFC 8685: Path Computation Element Communication Protocol + (PCEP) Extensions for the Hierarchical Path + Computation Element (H-PCE) Architecture + + https://www.iana.org/assignments/pcep + /pcep.xhtml#no-path-vector-tlv"; + } + + identity path-computation-error-child-pce-unresponsive { + base path-computation-error-no-dependent-server; + description + "Path computation has failed because child PCE is not + responsive. + + It corresponds to bit 21 of the Flags field of the + NO-PATH-VECTOR TLV."; + reference + "RFC 8685: Path Computation Element Communication Protocol + (PCEP) Extensions for the Hierarchical Path + Computation Element (H-PCE) Architecture + + https://www.iana.org/assignments/pcep + /pcep.xhtml#no-path-vector-tlv"; + } + + identity path-computation-error-destination-domain-unknown { + base path-computation-error-reason; + description + "Path computation has failed because the destination domain + was unknown. + + It corresponds to bit 22 of the Flags field of the + NO-PATH-VECTOR TLV."; + reference + "RFC 8685: Path Computation Element Communication Protocol + (PCEP) Extensions for the Hierarchical Path + Computation Element (H-PCE) Architecture + + https://www.iana.org/assignments/pcep + /pcep.xhtml#no-path-vector-tlv"; + } + + identity path-computation-error-p2mp { + base path-computation-error-reason; + description + "Path computation has failed because of P2MP reachability + problem. + + It corresponds to bit 24 of the Flags field of the + NO-PATH-VECTOR TLV."; + reference + "RFC 8306: Extensions to the Path Computation Element + Communication Protocol (PCEP) for + Point-to-Multipoint Traffic Engineering Label + Switched Paths + + https://www.iana.org/assignments/pcep + /pcep.xhtml#no-path-vector-tlv"; + } + + identity path-computation-error-no-gco-migration { + base path-computation-error-reason; + description + "Path computation has failed because of no Global Concurrent + Optimization (GCO) migration path found. + + It corresponds to bit 26 of the Flags field of the + NO-PATH-VECTOR TLV."; + reference + "RFC 5557: Path Computation Element Communication Protocol + (PCEP) Requirements and Protocol Extensions in + Support of Global Concurrent Optimization + + https://www.iana.org/assignments/pcep + /pcep.xhtml#no-path-vector-tlv"; + } + + identity path-computation-error-no-gco-solution { + base path-computation-error-reason; + description + "Path computation has failed because of no GCO solution + found. + + It corresponds to bit 25 of the Flags field of the + NO-PATH-VECTOR TLV."; + reference + "RFC 5557: Path Computation Element Communication Protocol + (PCEP) Requirements and Protocol Extensions in + Support of Global Concurrent Optimization + + https://www.iana.org/assignments/pcep + /pcep.xhtml#no-path-vector-tlv"; + } + + identity path-computation-error-pks-expansion { + base path-computation-error-reason; + description + "Path computation has failed because of Path-Key Subobject + (PKS) expansion failure. + + It corresponds to bit 27 of the Flags field of the + NO-PATH-VECTOR TLV."; + reference + "RFC 5520: Preserving Topology Confidentiality in + Inter-Domain Path Computation Using a + Path-Key-Based Mechanism + + https://www.iana.org/assignments/pcep + /pcep.xhtml#no-path-vector-tlv"; + } + + identity path-computation-error-brpc-chain-unavailable { + base path-computation-error-no-dependent-server; + description + "Path computation has failed because PCE BRPC chain + unavailable. + + It corresponds to bit 28 of the Flags field of the + NO-PATH-VECTOR TLV."; + reference + "RFC 5441: A Backward-Recursive PCE-Based Computation (BRPC) + Procedure to Compute Shortest Constrained + Inter-Domain Traffic Engineering Label Switched + Paths + + https://www.iana.org/assignments/pcep + /pcep.xhtml#no-path-vector-tlv"; + } + + identity path-computation-error-source-unknown { + base path-computation-error-reason; + description + "Path computation has failed because source node is + unknown. + + It corresponds to bit 29 of the Flags field of the + NO-PATH-VECTOR TLV."; + reference + "RFC 5440: Path Computation Element (PCE) Communication + Protocol (PCEP); + + https://www.iana.org/assignments/pcep + /pcep.xhtml#no-path-vector-tlv"; + } + + identity path-computation-error-destination-unknown { + base path-computation-error-reason; + description + "Path computation has failed because destination node is + unknown. + + It corresponds to bit 30 of the Flags field of the + NO-PATH-VECTOR TLV."; + reference + "RFC 5440: Path Computation Element (PCE) Communication + Protocol (PCEP); + + https://www.iana.org/assignments/pcep + /pcep.xhtml#no-path-vector-tlv"; + } + + identity protocol-origin-type { + description + "Base identity for protocol origin type."; + } + + identity protocol-origin-api { + base protocol-origin-type; + description + "Protocol origin is via Application Programming Interface + (API)."; + } + + identity protocol-origin-pcep { + base protocol-origin-type; + description + "Protocol origin is Path Computation Engine Protocol + (PCEP)."; + reference + "RFC 5440: Path Computation Element (PCE) Communication + Protocol (PCEP)"; + } + + identity protocol-origin-bgp { + base protocol-origin-type; + description + "Protocol origin is Border Gateway Protocol (BGP)."; + reference + "RFC 9012: The BGP Tunnel Encapsulation Attribute"; + } + + identity svec-objective-function-type { + description + "Base identity for SVEC objective function type."; + reference + "RFC 5541: Encoding of Objective Functions in the Path + Computation Element Communication Protocol (PCEP)"; + } + + identity svec-of-minimize-agg-bandwidth-consumption { + base svec-objective-function-type; + description + "Objective function for minimizing aggregate bandwidth + consumption (MBC)."; + reference + "RFC 5541: Encoding of Objective Functions in the Path + Computation Element Communication Protocol + (PCEP)"; + } + + identity svec-of-minimize-load-most-loaded-link { + base svec-objective-function-type; + description + "Objective function for minimizing the load on the link that + is carrying the highest load (MLL)."; + reference + "RFC 5541: Encoding of Objective Functions in the Path + Computation Element Communication Protocol + (PCEP)"; + } + + identity svec-of-minimize-cost-path-set { + base svec-objective-function-type; + description + "Objective function for minimizing the cost on a path set + (MCC)."; + reference + "RFC 5541: Encoding of Objective Functions in the Path + Computation Element Communication Protocol + (PCEP)"; + } + + identity svec-of-minimize-common-transit-domain { + base svec-objective-function-type; + description + "Objective function for minimizing the number of common + transit domains (MCTD)."; + reference + "RFC 8685: Path Computation Element Communication Protocol + (PCEP) Extensions for the Hierarchical Path + Computation Element (H-PCE) Architecture."; + } + + identity svec-of-minimize-shared-link { + base svec-objective-function-type; + description + "Objective function for minimizing the number of shared + links (MSL)."; + reference + "RFC 8685: Path Computation Element Communication Protocol + (PCEP) Extensions for the Hierarchical Path + Computation Element (H-PCE) Architecture."; + } + + identity svec-of-minimize-shared-srlg { + base svec-objective-function-type; + description + "Objective function for minimizing the number of shared + Shared Risk Link Groups (SRLG) (MSS)."; + reference + "RFC 8685: Path Computation Element Communication Protocol + (PCEP) Extensions for the Hierarchical Path + Computation Element (H-PCE) Architecture."; + } + + identity svec-of-minimize-shared-nodes { + base svec-objective-function-type; + description + "Objective function for minimizing the number of shared + nodes (MSN)."; + reference + "RFC 8685: Path Computation Element Communication Protocol + (PCEP) Extensions for the Hierarchical Path + Computation Element (H-PCE) Architecture."; + } + + identity svec-metric-type { + description + "Base identity for SVEC metric type."; + reference + "RFC 5541: Encoding of Objective Functions in the Path + Computation Element Communication Protocol (PCEP)"; + } + + identity svec-metric-cumulative-te { + base svec-metric-type; + description + "Cumulative TE cost."; + reference + "RFC 5541: Encoding of Objective Functions in the Path + Computation Element Communication Protocol + (PCEP)"; + } + + identity svec-metric-cumulative-igp { + base svec-metric-type; + description + "Cumulative IGP cost."; + reference + "RFC 5541: Encoding of Objective Functions in the Path + Computation Element Communication Protocol + (PCEP)"; + } + + identity svec-metric-cumulative-hop { + base svec-metric-type; + description + "Cumulative Hop path metric."; + reference + "RFC 5541: Encoding of Objective Functions in the Path + Computation Element Communication Protocol + (PCEP)"; + } + + identity svec-metric-aggregate-bandwidth-consumption { + base svec-metric-type; + description + "Aggregate bandwidth consumption."; + reference + "RFC 5541: Encoding of Objective Functions in the Path + Computation Element Communication Protocol + (PCEP)"; + } + + identity svec-metric-load-of-the-most-loaded-link { + base svec-metric-type; + description + "Load of the most loaded link."; + reference + "RFC 5541: Encoding of Objective Functions in the Path + Computation Element Communication Protocol + (PCEP)"; + } + + /* + * Typedefs + */ + + typedef admin-group { + type yang:hex-string { + /* 01:02:03:04 */ + length "1..11"; + } + description + "Administrative group / resource class / color representation + in 'hex-string' type. + + The most significant byte in the hex-string is the farthest + to the left in the byte sequence. + + Leading zero bytes in the configured value may be omitted + for brevity."; + reference + "RFC 3630: Traffic Engineering (TE) Extensions to OSPF + Version 2 + RFC 5305: IS-IS Extensions for Traffic Engineering + RFC 7308: Extended Administrative Groups in MPLS Traffic + Engineering (MPLS-TE)"; + } + + typedef admin-groups { + type union { + type admin-group; + type extended-admin-group; + } + description + "Derived types for TE administrative groups."; + } + + typedef extended-admin-group { + type yang:hex-string; + description + "Extended administrative group / resource class / color + representation in 'hex-string' type. + + The most significant byte in the hex-string is the farthest + to the left in the byte sequence. + + Leading zero bytes in the configured value may be omitted + for brevity."; + reference + "RFC 7308: Extended Administrative Groups in MPLS Traffic + Engineering (MPLS-TE)"; + } + + typedef path-attribute-flags { + type union { + type identityref { + base session-attributes-flags; + } + type identityref { + base lsp-attributes-flags; + } + } + description + "Path attributes flags type."; + } + + typedef performance-metrics-normality { + type enumeration { + enum unknown { + value 0; + description + "Unknown."; + } + enum normal { + value 1; + description + "Normal. + + Indicates that the anomalous bit is not set."; + } + enum abnormal { + value 2; + description + "Abnormal. + + Indicates that the anomalous bit is set."; + } + } + description + "Indicates whether a performance metric is normal (anomalous + bit not set), abnormal (anomalous bit set), or unknown."; + reference + "RFC 7471: OSPF Traffic Engineering (TE) Metric Extensions + RFC 7823: Performance-Based Path Selection for Explicitly + Routed Label Switched Paths (LSPs) Using TE Metric + Extensions + RFC 8570: IS-IS Traffic Engineering (TE) Metric Extensions"; + } + + typedef srlg { + type uint32; + description + "Shared Risk Link Group (SRLG) type."; + reference + "RFC 4203: OSPF Extensions in Support of Generalized + Multi-Protocol Label Switching (GMPLS) + RFC 5307: IS-IS Extensions in Support of Generalized + Multi-Protocol Label Switching (GMPLS)"; + } + + typedef te-common-status { + type enumeration { + enum up { + description + "Enabled."; + } + enum down { + description + "Disabled."; + } + enum testing { + description + "In some test mode."; + } + enum preparing-maintenance { + description + "The resource is disabled in the control plane to prepare + for a graceful shutdown for maintenance purposes."; + reference + "RFC 5817: Graceful Shutdown in MPLS and Generalized MPLS + Traffic Engineering Networks"; + } + enum maintenance { + description + "The resource is disabled in the data plane for maintenance + purposes."; + } + enum unknown { + description + "Status is unknown."; + } + } + description + "Defines a type representing the common states of a TE + resource."; + } + + typedef te-bandwidth { + type string { + pattern '0[xX](0((\.0?)?[pP](\+)?0?|(\.0?))|' + + '1(\.([\da-fA-F]{0,5}[02468aAcCeE]?)?)?' + + '[pP](\+)?(12[0-7]|' + + '1[01]\d|0?\d?\d)?)|0[xX][\da-fA-F]{1,8}|\d+' + + '(,(0[xX](0((\.0?)?[pP](\+)?0?|(\.0?))|' + + '1(\.([\da-fA-F]{0,5}[02468aAcCeE]?)?)?' + + '[pP](\+)?(12[0-7]|' + + '1[01]\d|0?\d?\d)?)|0[xX][\da-fA-F]{1,8}|\d+))*'; + } + description + "This is the generic bandwidth type. + + It is a string containing a list of numbers separated by + commas, where each of these numbers can be non-negative + decimal, hex integer, or hex float: + + (dec | hex | float)[*(','(dec | hex | float))] + + For the packet-switching type, the string encoding may follow + the type 'bandwidth-ieee-float32' as defined in RFC 8294 + (e.g., 0x1p10), where the units are in bytes per second. + + Canonically, the string is represented as all lowercase and in + hex, where the prefix '0x' precedes the hex number."; + reference + "RFC 8294: Common YANG Data Types for the Routing Area + ITU-T G.709: Interfaces for the optical transport network - + Edition 6.0 (06/2020)"; + } + + typedef te-ds-class { + type uint8 { + range "0..7"; + } + description + "The Differentiated Services Class-Type of traffic."; + reference + "RFC 4124: Protocol Extensions for Support of Diffserv-aware + MPLS Traffic Engineering, Section 4.3.1"; + } + + typedef te-global-id { + type uint32; + description + "An identifier to uniquely identify an operator, which can be + either a provider or a client. + + The definition of this type is taken from RFCs 6370 and 5003. + + This attribute type is used solely to provide a globally + unique context for TE topologies."; + reference + "RFC 5003: Attachment Individual Identifier (AII) Types for + Aggregation + RFC 6370: MPLS Transport Profile (MPLS-TP) Identifiers"; + } + + typedef te-hop-type { + type enumeration { + enum loose { + description + "A loose hop in an explicit path."; + } + enum strict { + description + "A strict hop in an explicit path."; + } + } + description + "Enumerated type for specifying loose or strict paths."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels, + Section 4.3.3"; + } + + typedef te-link-access-type { + type enumeration { + enum point-to-point { + description + "The link is point-to-point."; + } + enum multi-access { + description + "The link is multi-access, including broadcast and NBMA."; + } + } + description + "Defines a type representing the access type of a TE link."; + reference + "RFC 3630: Traffic Engineering (TE) Extensions to OSPF + Version 2"; + } + + typedef te-label-direction { + type enumeration { + enum forward { + description + "Label allocated for the forward LSP direction."; + } + enum reverse { + description + "Label allocated for the reverse LSP direction."; + } + } + description + "Enumerated type for specifying the forward or reverse + label."; + } + + typedef te-link-direction { + type enumeration { + enum incoming { + description + "The explicit route represents an incoming link on + a node."; + } + enum outgoing { + description + "The explicit route represents an outgoing link on + a node."; + } + } + description + "Enumerated type for specifying the direction of a link on + a node."; + } + + typedef te-metric { + type uint32; + description + "Traffic Engineering (TE) metric."; + reference + "RFC 3630: Traffic Engineering (TE) Extensions to OSPF + Version 2, Section 2.5.5 + RFC 5305: IS-IS Extensions for Traffic Engineering, + Section 3.7"; + } + + typedef te-node-id { + type union { + type yang:dotted-quad; + type inet:ipv6-address-no-zone; + } + description + "A type representing the identifier for a node in a TE + topology. + + The identifier is represented either as 4 octets in + dotted-quad notation, or as 16 octets in full, mixed, + shortened, or shortened-mixed IPv6 address notation. + + This attribute MAY be mapped to the Router Address TLV + described in Section 2.4.1 of RFC 3630, the TE Router ID + described in Section 3 of RFC 6827, the Traffic Engineering + Router ID TLV described in Section 4.3 of RFC 5305, the TE + Router ID TLV described in Section 3.2.1 of RFC 6119, or the + IPv6 TE Router ID TLV described in Section 4.1 of RFC 6119. + + The reachability of such a TE node MAY be achieved by a + mechanism such as that described in Section 6.2 of RFC 6827."; + reference + "RFC 3630: Traffic Engineering (TE) Extensions to OSPF + Version 2, Section 2.4.1 + RFC 5305: IS-IS Extensions for Traffic Engineering, + Section 4.3 + RFC 6119: IPv6 Traffic Engineering in IS-IS, Section 3.2.1 + RFC 6827: Automatically Switched Optical Network (ASON) + Routing for OSPFv2 Protocols, Section 3"; + } + + typedef te-oper-status { + type te-common-status; + description + "Defines a type representing the operational status of + a TE resource."; + } + + typedef te-admin-status { + type te-common-status; + description + "Defines a type representing the administrative status of + a TE resource."; + } + + typedef te-path-disjointness { + type bits { + bit node { + position 0; + description + "Node disjoint."; + } + bit link { + position 1; + description + "Link disjoint."; + } + bit srlg { + position 2; + description + "Shared Risk Link Group (SRLG) disjoint."; + } + } + description + "Type of the resource disjointness for a TE tunnel path."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + } + + typedef te-recovery-status { + type enumeration { + enum normal { + description + "Both the recovery span and the working span are fully + allocated and active, data traffic is being + transported over (or selected from) the working + span, and no trigger events are reported."; + } + enum recovery-started { + description + "The recovery action has been started but not completed."; + } + enum recovery-succeeded { + description + "The recovery action has succeeded. + + The working span has reported a failure/degrade condition, + and the user traffic is being transported (or selected) + on the recovery span."; + } + enum recovery-failed { + description + "The recovery action has failed."; + } + enum reversion-started { + description + "The reversion has started."; + } + enum reversion-succeeded { + description + "The reversion action has succeeded."; + } + enum reversion-failed { + description + "The reversion has failed."; + } + enum recovery-unavailable { + description + "The recovery is unavailable, as a result of either an + operator's lockout command or a failure condition + detected on the recovery span."; + } + enum recovery-admin { + description + "The operator has issued a command to switch the user + traffic to the recovery span."; + } + enum wait-to-restore { + description + "The recovery domain is recovering from a failure/degrade + condition on the working span that is being controlled by + the Wait-to-Restore (WTR) timer."; + } + } + description + "Defines the status of a recovery action."; + reference + "RFC 6378: MPLS Transport Profile (MPLS-TP) Linear Protection + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + typedef te-template-name { + type string { + pattern '/?([a-zA-Z0-9\-_.]+)(/[a-zA-Z0-9\-_.]+)*'; + } + description + "A type for the name of a TE node template or TE link + template."; + } + + typedef te-topology-event-type { + type enumeration { + enum add { + value 0; + description + "A TE node or TE link has been added."; + } + enum remove { + value 1; + description + "A TE node or TE link has been removed."; + } + enum update { + value 2; + description + "A TE node or TE link has been updated."; + } + } + description + "TE event type for notifications."; + } + + typedef te-topology-id { + type union { + type string { + length "0"; + // empty string + } + type string { + pattern '([a-zA-Z0-9\-_.]+:)*' + + '/?([a-zA-Z0-9\-_.]+)(/[a-zA-Z0-9\-_.]+)*'; + } + } + description + "An identifier for a topology. + + It is optional to have one or more prefixes at the beginning, + separated by colons. + + The prefixes can be 'network-types' as defined in the + 'ietf-network' module in RFC 8345, to help the user better + understand the topology before further inquiry is made."; + reference + "RFC 8345: A YANG Data Model for Network Topologies"; + } + + typedef te-tp-id { + type union { + type uint32; + // Unnumbered + type inet:ip-address; + // IPv4 or IPv6 address + } + description + "An identifier for a TE link endpoint on a node. + + This attribute is mapped to a local or remote link identifier + as defined in RFCs 3630 and 5305."; + reference + "RFC 3630: Traffic Engineering (TE) Extensions to OSPF + Version 2 + RFC 5305: IS-IS Extensions for Traffic Engineering"; + } + + typedef path-type { + type enumeration { + enum primary-path { + description + "Indicates that the TE path is a primary path."; + } + enum secondary-path { + description + "Indicates that the TE path is a secondary path."; + } + enum primary-reverse-path { + description + "Indicates that the TE path is a primary reverse path."; + } + enum secondary-reverse-path { + description + "Indicates that the TE path is a secondary reverse path."; + } + } + description + "The type of TE path, indicating whether a path is a primary, + or a reverse primary, or a secondary, or a reverse secondary + path."; + } + + /* + * TE bandwidth groupings + */ + + grouping te-bandwidth { + description + "This grouping defines the generic TE bandwidth. + + For some known data-plane technologies, specific modeling + structures are specified. + + The string-encoded 'te-bandwidth' type is used for + unspecified technologies. + + The modeling structure can be augmented later for other + technologies."; + container te-bandwidth { + description + "Container that specifies TE bandwidth. + + The choices can be augmented for specific data-plane + technologies."; + choice technology { + default "generic"; + description + "Data-plane technology type."; + case generic { + leaf generic { + type te-bandwidth; + description + "Bandwidth specified in a generic format."; + } + } + } + } + } + + /* + * TE label groupings + */ + + grouping te-label { + description + "This grouping defines the generic TE label. + + The modeling structure can be augmented for each technology. + + For unspecified technologies, 'rt-types:generalized-label' + is used."; + container te-label { + description + "Container that specifies the TE label. + + The choices can be augmented for specific data-plane + technologies."; + choice technology { + default "generic"; + description + "Data-plane technology type."; + case generic { + leaf generic { + type rt-types:generalized-label; + description + "TE label specified in a generic format."; + } + } + } + leaf direction { + type te-label-direction; + default "forward"; + description + "Label direction."; + } + } + } + + grouping te-topology-identifier { + description + "Augmentation for a TE topology."; + container te-topology-identifier { + description + "TE topology identifier container."; + leaf provider-id { + type te-global-id; + default "0"; + description + "An identifier to uniquely identify a provider. + If omitted, it assumes that the topology provider ID + value = 0 (the default)."; + } + leaf client-id { + type te-global-id; + default "0"; + description + "An identifier to uniquely identify a client. + If omitted, it assumes that the topology client ID + value = 0 (the default)."; + } + leaf topology-id { + type te-topology-id; + default ""; + description + "When the datastore contains several topologies, + 'topology-id' distinguishes between them. + + If omitted, the default (empty) string for this leaf is + assumed."; + } + } + } + + /* + * TE performance metrics groupings + */ + + grouping performance-metrics-one-way-delay-loss { + description + "Performance Metrics (PM) information in real time that can + be applicable to links or connections. + + PM defined in this grouping are applicable to generic TE PM + as well as packet TE PM."; + reference + "RFC 7471: OSPF Traffic Engineering (TE) Metric Extensions + RFC 8570: IS-IS Traffic Engineering (TE) Metric Extensions + RFC 7823: Performance-Based Path Selection for Explicitly + Routed Label Switched Paths (LSPs) Using TE Metric + Extensions"; + leaf one-way-delay { + type uint32 { + range "0..16777215"; + } + units "microseconds"; + description + "One-way delay or latency."; + } + leaf one-way-delay-normality { + type te-types:performance-metrics-normality; + description + "One-way delay normality."; + } + } + + grouping performance-metrics-two-way-delay-loss { + description + "Performance Metrics (PM) information in real time that can be + applicable to links or connections. + + PM defined in this grouping are applicable to generic TE PM + as well as packet TE PM."; + reference + "RFC 7471: OSPF Traffic Engineering (TE) Metric Extensions + RFC 8570: IS-IS Traffic Engineering (TE) Metric Extensions + RFC 7823: Performance-Based Path Selection for Explicitly + Routed Label Switched Paths (LSPs) Using TE Metric + Extensions"; + leaf two-way-delay { + type uint32 { + range "0..16777215"; + } + units "microseconds"; + description + "Two-way delay or latency."; + } + leaf two-way-delay-normality { + type te-types:performance-metrics-normality; + description + "Two-way delay normality."; + } + } + + grouping performance-metrics-one-way-bandwidth { + description + "Performance Metrics (PM) information in real time that can be + applicable to links. + + PM defined in this grouping are applicable to generic TE PM + as well as packet TE PM."; + reference + "RFC 7471: OSPF Traffic Engineering (TE) Metric Extensions + RFC 8570: IS-IS Traffic Engineering (TE) Metric Extensions + RFC 7823: Performance-Based Path Selection for Explicitly + Routed Label Switched Paths (LSPs) Using TE Metric + Extensions"; + leaf one-way-residual-bandwidth { + type rt-types:bandwidth-ieee-float32; + units "bytes per second"; + default "0x0p0"; + description + "Residual bandwidth that subtracts tunnel reservations from + Maximum Bandwidth (or link capacity) (RFC 3630) and + provides an aggregated remainder across QoS classes."; + reference + "RFC 3630: Traffic Engineering (TE) Extensions to OSPF + Version 2"; + } + leaf one-way-residual-bandwidth-normality { + type te-types:performance-metrics-normality; + default "normal"; + description + "Residual bandwidth normality."; + } + leaf one-way-available-bandwidth { + type rt-types:bandwidth-ieee-float32; + units "bytes per second"; + default "0x0p0"; + description + "Available bandwidth that is defined to be residual + bandwidth minus the measured bandwidth used for the + actual forwarding of non-RSVP-TE LSP packets. + + For a bundled link, available bandwidth is defined to be + the sum of the component link available bandwidths."; + } + leaf one-way-available-bandwidth-normality { + type te-types:performance-metrics-normality; + default "normal"; + description + "Available bandwidth normality."; + } + leaf one-way-utilized-bandwidth { + type rt-types:bandwidth-ieee-float32; + units "bytes per second"; + default "0x0p0"; + description + "Bandwidth utilization that represents the actual + utilization of the link (i.e., as measured in the router). + For a bundled link, bandwidth utilization is defined to + be the sum of the component link bandwidth utilizations."; + } + leaf one-way-utilized-bandwidth-normality { + type te-types:performance-metrics-normality; + default "normal"; + description + "Bandwidth utilization normality."; + } + } + + grouping one-way-performance-metrics { + description + "One-way Performance Metrics (PM) throttle grouping."; + leaf one-way-delay { + type uint32 { + range "0..16777215"; + } + units "microseconds"; + default "0"; + description + "One-way delay or latency."; + } + leaf one-way-residual-bandwidth { + type rt-types:bandwidth-ieee-float32; + units "bytes per second"; + default "0x0p0"; + description + "Residual bandwidth that subtracts tunnel reservations from + Maximum Bandwidth (or link capacity) (RFC 3630) and + provides an aggregated remainder across QoS classes."; + reference + "RFC 3630: Traffic Engineering (TE) Extensions to OSPF + Version 2"; + } + leaf one-way-available-bandwidth { + type rt-types:bandwidth-ieee-float32; + units "bytes per second"; + default "0x0p0"; + description + "Available bandwidth that is defined to be residual + bandwidth minus the measured bandwidth used for the + actual forwarding of non-RSVP-TE LSP packets. + + For a bundled link, available bandwidth is defined to be + the sum of the component link available bandwidths."; + } + leaf one-way-utilized-bandwidth { + type rt-types:bandwidth-ieee-float32; + units "bytes per second"; + default "0x0p0"; + description + "Bandwidth utilization that represents the actual + utilization of the link (i.e., as measured in the router). + For a bundled link, bandwidth utilization is defined to + be the sum of the component link bandwidth utilizations."; + } + } + + grouping two-way-performance-metrics { + description + "Two-way Performance Metrics (PM) throttle grouping."; + leaf two-way-delay { + type uint32 { + range "0..16777215"; + } + units "microseconds"; + default "0"; + description + "Two-way delay or latency."; + } + } + + grouping performance-metrics-thresholds { + description + "Grouping for configurable thresholds for measured + attributes."; + uses one-way-performance-metrics; + uses two-way-performance-metrics; + } + + grouping performance-metrics-attributes { + description + "Contains Performance Metrics (PM) attributes."; + container performance-metrics-one-way { + description + "One-way link performance information in real time."; + reference + "RFC 7471: OSPF Traffic Engineering (TE) Metric Extensions + RFC 8570: IS-IS Traffic Engineering (TE) Metric Extensions + RFC 7823: Performance-Based Path Selection for Explicitly + Routed Label Switched Paths (LSPs) Using TE Metric + Extensions"; + uses performance-metrics-one-way-delay-loss; + uses performance-metrics-one-way-bandwidth; + } + container performance-metrics-two-way { + description + "Two-way link performance information in real time."; + reference + "RFC 6374: Packet Loss and Delay Measurement for MPLS + Networks"; + uses performance-metrics-two-way-delay-loss; + } + } + + grouping performance-metrics-throttle-container { + description + "Controls Performance Metrics (PM) throttling."; + container throttle { + must 'suppression-interval >= measure-interval' { + error-message "'suppression-interval' cannot be less than " + + "'measure-interval'."; + description + "Constraint on 'suppression-interval' and + 'measure-interval'."; + } + description + "Link performance information in real time."; + reference + "RFC 7471: OSPF Traffic Engineering (TE) Metric Extensions + RFC 8570: IS-IS Traffic Engineering (TE) Metric Extensions + RFC 7823: Performance-Based Path Selection for Explicitly + Routed Label Switched Paths (LSPs) Using TE Metric + Extensions"; + leaf one-way-delay-offset { + type uint32 { + range "0..16777215"; + } + units "microseconds"; + default "0"; + description + "Offset value to be added to the measured delay value."; + } + leaf measure-interval { + type uint32; + units "seconds"; + default "30"; + description + "Interval to measure the extended metric values."; + } + leaf advertisement-interval { + type uint32; + units "seconds"; + default "0"; + description + "Interval to advertise the extended metric values."; + } + leaf suppression-interval { + type uint32 { + range "1..max"; + } + units "seconds"; + default "120"; + description + "Interval to suppress advertisement of the extended metric + values."; + reference + "RFC 8570: IS-IS Traffic Engineering (TE) Metric + Extensions, Section 6"; + } + container threshold-out { + description + "If the measured parameter falls outside an upper bound + for all but the minimum-delay metric (or a lower bound + for the minimum-delay metric only) and the advertised + value is not already outside that bound, an 'anomalous' + announcement (anomalous bit set) will be triggered."; + uses performance-metrics-thresholds; + } + container threshold-in { + description + "If the measured parameter falls inside an upper bound + for all but the minimum-delay metric (or a lower bound + for the minimum-delay metric only) and the advertised + value is not already inside that bound, a 'normal' + announcement (anomalous bit cleared) will be triggered."; + uses performance-metrics-thresholds; + } + container threshold-accelerated-advertisement { + description + "When the difference between the last advertised value and + the current measured value exceeds this threshold, an + 'anomalous' announcement (anomalous bit set) will be + triggered."; + uses performance-metrics-thresholds; + } + } + } + + /* + * TE tunnel generic groupings + */ + + grouping explicit-route-hop { + description + "The explicit route entry grouping."; + choice type { + description + "The explicit route entry type."; + case numbered-node-hop { + container numbered-node-hop { + must 'node-id-uri or node-id' { + description + "At least one node identifier needs to be present."; + } + description + "Numbered node route hop."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels, + Section 4.3, EXPLICIT_ROUTE in RSVP-TE + RFC 3477: Signalling Unnumbered Links in Resource + ReSerVation Protocol - Traffic Engineering + (RSVP-TE)"; + leaf node-id-uri { + type nw:node-id; + description + "The identifier of a node in the topology."; + } + leaf node-id { + type te-node-id; + description + "The identifier of a node in the TE topology."; + } + leaf hop-type { + type te-hop-type; + default "strict"; + description + "Strict or loose hop."; + } + } + } + case numbered-link-hop { + container numbered-link-hop { + description + "Numbered link explicit route hop."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels, + Section 4.3, EXPLICIT_ROUTE in RSVP-TE + RFC 3477: Signalling Unnumbered Links in Resource + ReSerVation Protocol - Traffic Engineering + (RSVP-TE)"; + leaf link-tp-id { + type te-tp-id; + mandatory true; + description + "TE Link Termination Point (LTP) identifier."; + } + leaf hop-type { + type te-hop-type; + default "strict"; + description + "Strict or loose hop."; + } + leaf direction { + type te-link-direction; + default "outgoing"; + description + "Link route object direction."; + } + } + } + case unnumbered-link-hop { + container unnumbered-link-hop { + must '(link-tp-id-uri or link-tp-id) and ' + + '(node-id-uri or node-id)' { + description + "At least one node identifier and at least one Link + Termination Point (LTP) identifier need to be + present."; + } + description + "Unnumbered link explicit route hop."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels, + Section 4.3, EXPLICIT_ROUTE in RSVP-TE + RFC 3477: Signalling Unnumbered Links in Resource + ReSerVation Protocol - Traffic Engineering + (RSVP-TE)"; + leaf link-tp-id-uri { + type nt:tp-id; + description + "Link Termination Point (LTP) identifier."; + } + leaf link-tp-id { + type te-tp-id; + description + "TE LTP identifier. + + The combination of the TE link ID and the TE node ID + is used to identify an unnumbered TE link."; + } + leaf node-id-uri { + type nw:node-id; + description + "The identifier of a node in the topology."; + } + leaf node-id { + type te-node-id; + description + "The identifier of a node in the TE topology."; + } + leaf hop-type { + type te-hop-type; + default "strict"; + description + "Strict or loose hop."; + } + leaf direction { + type te-link-direction; + default "outgoing"; + description + "Link route object direction."; + } + } + } + case as-number { + container as-number-hop { + description + "AS explicit route hop."; + leaf as-number { + type inet:as-number; + mandatory true; + description + "The Autonomous System (AS) number."; + } + leaf hop-type { + type te-hop-type; + default "strict"; + description + "Strict or loose hop."; + } + } + } + case label { + description + "The label explicit route hop type."; + container label-hop { + description + "Label hop type."; + uses te-label; + } + } + } + } + + grouping explicit-route-hop-with-srlg { + description + "Augments the explicit route entry grouping with Shared Risk + Link Group (SRLG) hop type."; + uses explicit-route-hop { + augment "type" { + description + "Augmentation for a generic explicit route for Shared + Risk Link Group (SRLG) inclusion or exclusion."; + case srlg { + description + "An Shared Risk Link Group (SRLG) value to be + included or excluded."; + container srlg { + description + "Shared Risk Link Group (SRLG) container."; + leaf srlg { + type uint32; + description + "Shared Risk Link Group (SRLG) value."; + } + } + } + } + } + } + + grouping record-route-state { + description + "The Record Route grouping."; + leaf index { + type uint32; + description + "Record Route hop index. + + The index is used to identify an entry in the list. + + The order of entries is defined by the user without relying + on key values."; + } + choice type { + description + "The Record Route entry type."; + case numbered-node-hop { + description + "Numbered node route hop."; + container numbered-node-hop { + must 'node-id-uri or node-id' { + description + "At least one node identifier need to be present."; + } + description + "Numbered node route hop container."; + leaf node-id-uri { + type nw:node-id; + description + "The identifier of a node in the topology."; + } + leaf node-id { + type te-node-id; + description + "The identifier of a node in the TE topology."; + } + leaf-list flags { + type path-attribute-flags; + description + "Path attributes flags."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels + RFC 4090: Fast Reroute Extensions to RSVP-TE for LSP + Tunnels + RFC 4561: Definition of a Record Route Object (RRO) + Node-Id Sub-Object"; + } + } + } + case numbered-link-hop { + description + "Numbered link route hop."; + container numbered-link-hop { + description + "Numbered link route hop container."; + leaf link-tp-id { + type te-tp-id; + mandatory true; + description + "Numbered TE LTP identifier."; + } + leaf-list flags { + type path-attribute-flags; + description + "Path attributes flags."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels + RFC 4090: Fast Reroute Extensions to RSVP-TE for LSP + Tunnels + RFC 4561: Definition of a Record Route Object (RRO) + Node-Id Sub-Object"; + } + } + } + case unnumbered-link-hop { + description + "Unnumbered link route hop."; + container unnumbered-link-hop { + must '(link-tp-id-uri or link-tp-id) and ' + + '(node-id-uri or node-id)' { + description + "At least one node identifier and at least one Link + Termination Point (LTP) identifier need to be + present."; + } + description + "Unnumbered link Record Route hop."; + reference + "RFC 3477: Signalling Unnumbered Links in Resource + ReSerVation Protocol - Traffic Engineering + (RSVP-TE)"; + leaf link-tp-id-uri { + type nt:tp-id; + description + "Link Termination Point (LTP) identifier."; + } + leaf link-tp-id { + type te-tp-id; + description + "TE LTP identifier. + + The combination of the TE link ID and the TE node ID + is used to identify an unnumbered TE link."; + } + leaf node-id-uri { + type nw:node-id; + description + "The identifier of a node in the topology."; + } + leaf node-id { + type te-node-id; + description + "The identifier of a node in the TE topology."; + } + leaf-list flags { + type path-attribute-flags; + description + "Path attributes flags."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels + RFC 4090: Fast Reroute Extensions to RSVP-TE for LSP + Tunnels + RFC 4561: Definition of a Record Route Object (RRO) + Node-Id Sub-Object"; + } + } + } + case label { + description + "The label Record Route entry types."; + container label-hop { + description + "Label route hop type."; + uses te-label; + leaf-list flags { + type path-attribute-flags; + description + "Path attributes flags."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels + RFC 4090: Fast Reroute Extensions to RSVP-TE for LSP + Tunnels + RFC 4561: Definition of a Record Route Object (RRO) + Node-Id Sub-Object"; + } + } + } + } + } + + grouping label-restriction-info { + description + "Label set item information."; + leaf restriction { + type enumeration { + enum inclusive { + description + "The label or label range is inclusive."; + } + enum exclusive { + description + "The label or label range is exclusive."; + } + } + default "inclusive"; + description + "Indicates whether the list item is inclusive or exclusive."; + } + leaf index { + type uint32; + description + "The index of the label restriction list entry."; + } + container label-start { + must "(not(../label-end/te-label/direction) and" + + " not(te-label/direction))" + + " or " + + "(../label-end/te-label/direction = te-label/direction)" + + " or " + + "(not(te-label/direction) and" + + " (../label-end/te-label/direction = 'forward'))" + + " or " + + "(not(../label-end/te-label/direction) and" + + " (te-label/direction = 'forward'))" { + error-message "'label-start' and 'label-end' must have the " + + "same direction."; + } + description + "This is the starting label if a label range is specified. + This is the label value if a single label is specified, + in which case the 'label-end' attribute is not set."; + uses te-label; + } + container label-end { + must "(not(../label-start/te-label/direction) and" + + " not(te-label/direction))" + + " or " + + "(../label-start/te-label/direction = te-label/direction)" + + " or " + + "(not(te-label/direction) and" + + " (../label-start/te-label/direction = 'forward'))" + + " or " + + "(not(../label-start/te-label/direction) and" + + " (te-label/direction = 'forward'))" { + error-message "'label-start' and 'label-end' must have the " + + "same direction."; + } + description + "This is the ending label if a label range is specified. + This attribute is not set if a single label is specified."; + uses te-label; + } + container label-step { + description + "The step increment between labels in the label range. + + The label start/end values MUST be consistent with the sign + of label step. + + For example: + 'label-start' < 'label-end' enforces 'label-step' > 0 + 'label-start' > 'label-end' enforces 'label-step' < 0."; + choice technology { + default "generic"; + description + "Data-plane technology type."; + case generic { + leaf generic { + type int32; + default "1"; + description + "Label range step."; + } + } + } + } + leaf range-bitmap { + type yang:hex-string; + description + "When there are gaps between 'label-start' and 'label-end', + this attribute is used to specify the positions + of the used labels. + + This is represented in big endian as 'hex-string'. + + In case the restriction is 'inclusive', the bit-position is + set if the corresponding mapped label is available. + In this case, if the range-bitmap is not present, all the + labels in the range are available. + + In case the restriction is 'exclusive', the bit-position is + set if the corresponding mapped label is not available. + In this case, if the range-bitmap is not present, all the + labels in the range are not available. + + The most significant byte in the hex-string is the farthest + to the left in the byte sequence. + + Leading zero bytes in the configured value may be omitted + for brevity. + + Each bit position in the 'range-bitmap' 'hex-string' maps + to a label in the range derived from 'label-start'. + + For example, assuming that 'label-start' = 16000 and + 'range-bitmap' = 0x01000001, then: + - bit position (0) is set, and the corresponding mapped + label from the range is 16000 + (0 * 'label-step') or + 16000 for default 'label-step' = 1. + - bit position (24) is set, and the corresponding mapped + label from the range is 16000 + (24 * 'label-step') or + 16024 for default 'label-step' = 1."; + } + } + + grouping label-set-info { + description + "Grouping for the list of label restrictions specifying what + labels may or may not be used."; + container label-restrictions { + description + "The label restrictions container."; + list label-restriction { + key "index"; + description + "The absence of the label restrictions container implies + that all labels are acceptable; otherwise, only restricted + labels are available."; + reference + "RFC 7579: General Network Element Constraint Encoding + for GMPLS-Controlled Networks"; + uses label-restriction-info; + } + } + } + + grouping optimization-metric-entry { + description + "Optimization metrics configuration grouping."; + leaf metric-type { + type identityref { + base path-metric-optimization-type; + } + description + "Identifies the 'metric-type' that the path computation + process uses for optimization."; + } + leaf weight { + type uint8; + default "1"; + description + "TE path metric normalization weight."; + } + container explicit-route-exclude-objects { + when "../metric-type = " + + "'te-types:path-metric-optimize-excludes'"; + description + "Container for the 'exclude route' object list."; + uses path-route-exclude-objects; + } + container explicit-route-include-objects { + when "../metric-type = " + + "'te-types:path-metric-optimize-includes'"; + description + "Container for the 'include route' object list."; + uses path-route-include-objects; + } + } + + grouping common-constraints { + description + "Common constraints grouping that can be set on + a constraint set or directly on the tunnel."; + uses te-bandwidth { + description + "A requested bandwidth to use for path computation."; + } + leaf link-protection { + type identityref { + base link-protection-type; + } + default "te-types:link-protection-unprotected"; + description + "Link protection type required for the links included + in the computed path."; + reference + "RFC 4202: Routing Extensions in Support of + Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + leaf setup-priority { + type uint8 { + range "0..7"; + } + default "7"; + description + "TE LSP requested setup priority."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels"; + } + leaf hold-priority { + type uint8 { + range "0..7"; + } + default "7"; + description + "TE LSP requested hold priority."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels"; + } + leaf signaling-type { + type identityref { + base path-signaling-type; + } + default "te-types:path-setup-rsvp"; + description + "TE tunnel path signaling type."; + } + } + + grouping tunnel-constraints { + description + "Tunnel constraints grouping that can be set on + a constraint set or directly on the tunnel."; + leaf network-id { + type nw:network-id; + description + "The network topology identifier."; + } + uses te-topology-identifier; + uses common-constraints; + } + + grouping path-constraints-route-objects { + description + "List of route entries to be included or excluded when + performing the path computation."; + container explicit-route-objects { + description + "Container for the explicit route object lists."; + list route-object-exclude-always { + key "index"; + ordered-by user; + description + "List of route objects to always exclude from the path + computation."; + leaf index { + type uint32; + description + "Explicit Route Object index. + + The index is used to identify an entry in the list. + + The order of entries is defined by the user without + relying on key values."; + } + uses explicit-route-hop; + } + list route-object-include-exclude { + key "index"; + ordered-by user; + description + "List of route objects to include or exclude in the path + computation."; + leaf explicit-route-usage { + type identityref { + base route-usage-type; + } + default "te-types:route-include-object"; + description + "Indicates whether to include or exclude the + route object. + + The default is to include it."; + } + leaf index { + type uint32; + description + "Route object include-exclude index. + + The index is used to identify an entry in the list. + + The order of entries is defined by the user without + relying on key values."; + } + uses explicit-route-hop-with-srlg; + } + } + } + + grouping path-route-include-objects { + description + "List of route objects to be included when performing + the path computation."; + list route-object-include-object { + key "index"; + ordered-by user; + description + "List of Explicit Route Objects to be included in the + path computation."; + leaf index { + type uint32; + description + "Route object entry index. + + The index is used to identify an entry in the list. + + The order of entries is defined by the user without + relying on key values."; + } + uses explicit-route-hop; + } + } + + grouping path-route-exclude-objects { + description + "List of route objects to be excluded when performing + the path computation."; + list route-object-exclude-object { + key "index"; + ordered-by user; + description + "List of Explicit Route Objects to be excluded in the + path computation."; + leaf index { + type uint32; + description + "Route object entry index. + + The index is used to identify an entry in the list. + + The order of entries is defined by the user without + relying on key values."; + } + uses explicit-route-hop-with-srlg; + } + } + + grouping generic-path-metric-bounds { + description + "TE path metric bounds grouping."; + container path-metric-bounds { + description + "Top-level container for the list of path metric bounds."; + list path-metric-bound { + key "metric-type"; + description + "List of path metric bounds, which can apply to link and + path metrics. + + TE paths which have at least one path metric which + exceeds the specified bounds MUST NOT be selected. + + TE paths that traverse TE links which have at least one + link metric which exceeds the specified bounds MUST NOT + be selected."; + leaf metric-type { + type identityref { + base link-path-metric-type; + } + description + "Identifies an entry in the list of 'metric-type' items + bound for the TE path."; + } + leaf upper-bound { + type uint64; + default "0"; + description + "Upper bound on the specified 'metric-type'. + + A zero indicates an unbounded upper limit for the + specified 'metric-type'. + + The unit of is interpreted in the context of the + 'metric-type' identity."; + } + } + } + } + + grouping generic-path-optimization { + description + "TE generic path optimization grouping."; + container optimizations { + description + "The objective function container that includes + attributes to impose when computing a TE path."; + choice algorithm { + description + "Optimizations algorithm."; + case metric { + if-feature "path-optimization-metric"; + /* Optimize by metric */ + list optimization-metric { + key "metric-type"; + description + "TE path metric type."; + uses optimization-metric-entry; + } + /* Tiebreakers */ + container tiebreakers { + status deprecated; + description + "Container for the list of tiebreakers. + + This container has been deprecated by the tiebreaker + leaf."; + list tiebreaker { + key "tiebreaker-type"; + status deprecated; + description + "The list of tiebreaker criteria to apply on an + equally favored set of paths, in order to pick + the best."; + leaf tiebreaker-type { + type identityref { + base path-metric-type; + } + status deprecated; + description + "Identifies an entry in the list of tiebreakers."; + } + } + } + } + case objective-function { + if-feature "path-optimization-objective-function"; + /* Objective functions */ + container objective-function { + description + "The objective function container that includes + attributes to impose when computing a TE path."; + leaf objective-function-type { + type identityref { + base objective-function-type; + } + default "te-types:of-minimize-cost-path"; + description + "Objective function entry."; + } + } + } + } + } + leaf tiebreaker { + type identityref { + base path-tiebreaker-type; + } + default "te-types:path-tiebreaker-random"; + description + "The tiebreaker criteria to apply on an equally favored set + of paths, in order to pick the best."; + } + } + + grouping generic-path-affinities { + description + "Path affinities grouping."; + container path-affinities-values { + description + "Path affinities represented as values."; + list path-affinities-value { + key "usage"; + description + "List of named affinity constraints."; + leaf usage { + type identityref { + base resource-affinities-type; + } + description + "Identifies an entry in the list of value affinity + constraints."; + } + leaf value { + type admin-groups; + default ""; + description + "The affinity value. + + The default is empty."; + } + } + } + container path-affinity-names { + description + "Path affinities represented as names."; + list path-affinity-name { + key "usage"; + description + "List of named affinity constraints."; + leaf usage { + type identityref { + base resource-affinities-type; + } + description + "Identifies an entry in the list of named affinity + constraints."; + } + list affinity-name { + key "name"; + description + "List of named affinities."; + leaf name { + type string; + description + "Identifies a named affinity entry."; + } + } + } + } + } + + grouping generic-path-srlgs { + description + "Path Shared Risk Link Group (SRLG) grouping."; + container path-srlgs-lists { + description + "Path Shared Risk Link Group (SRLG) properties container."; + list path-srlgs-list { + key "usage"; + description + "List of Shared Risk Link Group (SRLG) values to be + included or excluded."; + leaf usage { + type identityref { + base route-usage-type; + } + description + "Identifies an entry in a list of Shared Risk Link Groups + (SRLGs) to either include or exclude."; + } + leaf-list values { + type srlg; + description + "List of Shared Risk Link Group (SRLG) values."; + } + } + } + container path-srlgs-names { + description + "Container for the list of named Shared Risk Link Groups + (SRLGs)."; + list path-srlgs-name { + key "usage"; + description + "List of named Shared Risk Link Groups (SRLGs) to be + included or excluded."; + leaf usage { + type identityref { + base route-usage-type; + } + description + "Identifies an entry in a list of named Shared Risk Link + Groups (SRLGs) to either include or exclude."; + } + leaf-list names { + type string; + description + "List of named Shared Risk Link Groups (SRLGs)."; + } + } + } + } + + grouping generic-path-disjointness { + description + "Path disjointness grouping."; + leaf disjointness { + type te-path-disjointness; + description + "The type of resource disjointness. + When configured for a primary path, the disjointness level + applies to all secondary LSPs. + + When configured for a secondary path, the disjointness + level overrides the level configured for the primary path."; + } + } + + grouping common-path-constraints-attributes { + description + "Common path constraints configuration grouping."; + uses common-constraints; + uses generic-path-metric-bounds; + uses generic-path-affinities; + uses generic-path-srlgs; + } + + grouping generic-path-constraints { + description + "Global named path constraints configuration grouping."; + container path-constraints { + description + "TE named path constraints container."; + uses common-path-constraints-attributes; + uses generic-path-disjointness; + } + } + + grouping generic-path-properties { + description + "TE generic path properties grouping."; + container path-properties { + config false; + description + "The TE path properties."; + list path-metric { + key "metric-type"; + description + "TE path metric type."; + leaf metric-type { + type identityref { + base path-metric-type; + } + description + "TE path metric type."; + } + leaf accumulative-value { + type uint64; + description + "TE path metric accumulative value."; + } + } + uses generic-path-affinities; + uses generic-path-srlgs; + container path-route-objects { + description + "Container for the list of route objects either returned by + the computation engine or actually used by an LSP."; + list path-route-object { + key "index"; + ordered-by user; + description + "List of route objects either returned by the computation + engine or actually used by an LSP."; + leaf index { + type uint32; + description + "Route object entry index. + + The index is used to identify an entry in the list. + + The order of entries is defined by the user without + relying on key values."; + } + uses explicit-route-hop; + } + } + } + } + + grouping encoding-and-switching-type { + description + "Common grouping to define the LSP encoding and + switching types"; + leaf encoding { + type identityref { + base te-types:lsp-encoding-types; + } + description + "LSP encoding type."; + reference + "RFC 3945: Generalized Multi-Protocol Label Switching (GMPLS) + Architecture"; + } + leaf switching-type { + type identityref { + base te-types:switching-capabilities; + } + description + "LSP switching type."; + reference + "RFC 3945: Generalized Multi-Protocol Label Switching (GMPLS) + Architecture"; + } + } + + grouping te-generic-node-id { + description + "A reusable grouping for a TE generic node identifier."; + leaf id { + type union { + type te-node-id; + type inet:ip-address; + type nw:node-id; + } + description + "The identifier of the node. + + It can be represented as IP address or dotted quad address + or as an URI. + + The type data node disambiguates the union type."; + } + leaf type { + type enumeration { + enum ip { + description + "IP address representation of the node identifier."; + } + enum te-id { + description + "TE identifier of the node"; + } + enum node-id { + description + "URI representation of the node identifier."; + } + } + description + "Type of node identifier representation."; + } + } +} \ No newline at end of file diff --git a/src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-teas-yang-te-34/ietf-te-device.yang b/src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-teas-yang-te-34/ietf-te-device.yang new file mode 100644 index 000000000..f788fa2ea --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-teas-yang-te-34/ietf-te-device.yang @@ -0,0 +1,595 @@ +module ietf-te-device { + yang-version 1.1; + namespace "urn:ietf:params:xml:ns:yang:ietf-te-device"; + + /* Replace with IANA when assigned */ + + prefix te-dev; + + /* Import TE module */ + + import ietf-te { + prefix te; + reference + "RFCXXXX: A YANG Data Model for Traffic Engineering + Tunnels and Interfaces"; + } + + /* Import TE types */ + + import ietf-te-types { + prefix te-types; + reference + "draft-ietf-teas-rfc8776-update: Common YANG Data Types + for Traffic Engineering."; + } + import ietf-interfaces { + prefix if; + reference + "RFC8343: A YANG Data Model for Interface Management"; + } + import ietf-routing-types { + prefix rt-types; + reference + "RFC8294: Common YANG Data Types for the Routing Area"; + } + + organization + "IETF Traffic Engineering Architecture and Signaling (TEAS) + Working Group"; + contact + "WG Web: + WG List: + + Editor: Tarek Saad + + + Editor: Rakesh Gandhi + + + Editor: Vishnu Pavan Beeram + + + Editor: Himanshu Shah + + + Editor: Xufeng Liu + + + Editor: Igor Bryskin + + + Editor: Oscar Gonzalez de Dios + "; + + description + "This module defines a data model for TE device configurations, + state, and RPCs. The model fully conforms to the + Network Management Datastore Architecture (NMDA). + + Copyright (c) 2023 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject to + the license terms contained in, the Revised BSD License set + forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC XXXX + (https://www.rfc-editor.org/info/rfcXXXX); see the RFC itself + for full legal notices."; + + // RFC Ed.: replace XXXX with actual RFC number and remove this + // note. + // RFC Ed.: update the date below with the date of RFC publication + // and remove this note. + + revision 2024-02-02 { + description + "Initial revision for the TE device YANG module."; + reference + "RFCXXXX: A YANG Data Model for Traffic Engineering Tunnels + and Interfaces"; + } + + grouping lsp-device-timers { + description + "Device TE LSP timers configs."; + leaf lsp-install-interval { + type uint32; + units "seconds"; + description + "TE LSP installation delay time."; + } + leaf lsp-cleanup-interval { + type uint32; + units "seconds"; + description + "TE LSP cleanup delay time."; + } + leaf lsp-invalidation-interval { + type uint32; + units "seconds"; + description + "TE LSP path invalidation before taking action delay time."; + } + } + + grouping te-igp-flooding-bandwidth-config { + description + "Configurable items for igp flooding bandwidth + threshold configuration."; + leaf threshold-type { + type enumeration { + enum delta { + description + "'delta' indicates that the local + system should flood IGP updates when a + change in reserved bandwidth >= the specified + delta occurs on the interface."; + } + enum threshold-crossed { + description + "THRESHOLD-CROSSED indicates that + the local system should trigger an update (and + hence flood) the reserved bandwidth when the + reserved bandwidth changes such that it crosses, + or becomes equal to one of the threshold values."; + } + } + description + "The type of threshold that should be used to specify the + values at which bandwidth is flooded. 'delta' indicates that + the local system should flood IGP updates when a change in + reserved bandwidth >= the specified delta occurs on the + interface. Where 'threshold-crossed' is specified, the local + system should trigger an update (and hence flood) the + reserved bandwidth when the reserved bandwidth changes such + that it crosses, or becomes equal to one of the threshold + values."; + } + leaf delta-percentage { + when "../threshold-type = 'delta'" { + description + "The percentage delta can only be specified when the + threshold type is specified to be a percentage delta of + the reserved bandwidth."; + } + type rt-types:percentage; + description + "The percentage of the maximum-reservable-bandwidth + considered as the delta that results in an IGP update + being flooded."; + } + leaf threshold-specification { + when "../threshold-type = 'threshold-crossed'" { + description + "The selection of whether mirrored or separate threshold + values are to be used requires user specified thresholds + to be set."; + } + type enumeration { + enum mirrored-up-down { + description + "mirrored-up-down indicates that a single set of + threshold values should be used for both increasing + and decreasing bandwidth when determining whether + to trigger updated bandwidth values to be flooded + in the IGP TE extensions."; + } + enum separate-up-down { + description + "separate-up-down indicates that a separate + threshold values should be used for the increasing + and decreasing bandwidth when determining whether + to trigger updated bandwidth values to be flooded + in the IGP TE extensions."; + } + } + description + "This value specifies whether a single set of threshold + values should be used for both increasing and decreasing + bandwidth when determining whether to trigger updated + bandwidth values to be flooded in the IGP TE extensions. + 'mirrored-up-down' indicates that a single value (or set of + values) should be used for both increasing and decreasing + values, where 'separate-up-down' specifies that the + increasing and decreasing values will be separately + specified."; + } + leaf-list up-thresholds { + when "../threshold-type = 'threshold-crossed'" + + "and ../threshold-specification = 'separate-up-down'" { + description + "A list of up-thresholds can only be specified when the + bandwidth update is triggered based on crossing a + threshold and separate up and down thresholds are + required."; + } + type rt-types:percentage; + description + "The thresholds (expressed as a percentage of the maximum + reservable bandwidth) at which bandwidth updates are to be + triggered when the bandwidth is increasing."; + } + leaf-list down-thresholds { + when "../threshold-type = 'threshold-crossed'" + + "and ../threshold-specification = 'separate-up-down'" { + description + "A list of down-thresholds can only be specified when the + bandwidth update is triggered based on crossing a + threshold and separate up and down thresholds are + required."; + } + type rt-types:percentage; + description + "The thresholds (expressed as a percentage of the maximum + reservable bandwidth) at which bandwidth updates are to be + triggered when the bandwidth is decreasing."; + } + leaf-list up-down-thresholds { + when "../threshold-type = 'threshold-crossed'" + + "and ../threshold-specification = 'mirrored-up-down'" { + description + "A list of thresholds corresponding to both increasing + and decreasing bandwidths can be specified only when an + update is triggered based on crossing a threshold, and + the same up and down thresholds are required."; + } + type rt-types:percentage; + description + "The thresholds (expressed as a percentage of the maximum + reservable bandwidth of the interface) at which bandwidth + updates are flooded - used both when the bandwidth is + increasing and decreasing."; + } + } + + /** + * TE device augmentations + */ + augment "/te:te" { + description + "TE global container."; + /* TE Interface Configuration Data */ + container interfaces { + description + "Configuration data model for TE interfaces."; + uses te-igp-flooding-bandwidth-config; + list interface { + key "name"; + description + "The list of interfaces enabled for TE."; + leaf name { + type if:interface-ref; + description + "The reference to interface enabled for TE."; + } + /* TE interface parameters */ + leaf te-metric { + type te-types:te-metric; + description + "TE interface metric."; + } + choice admin-group-type { + description + "TE interface administrative groups + representation type."; + case value-admin-groups { + choice value-admin-group-type { + description + "choice of admin-groups."; + case admin-groups { + description + "Administrative group/Resource + class/Color."; + leaf admin-group { + type te-types:admin-group; + description + "TE interface administrative group."; + } + } + case extended-admin-groups { + if-feature "te-types:extended-admin-groups"; + description + "Extended administrative group/Resource + class/Color."; + leaf extended-admin-group { + type te-types:extended-admin-group; + description + "TE interface extended administrative group."; + } + } + } + } + case named-admin-groups { + list named-admin-groups { + if-feature "te-types:extended-admin-groups"; + if-feature "te-types:named-extended-admin-groups"; + key "named-admin-group"; + description + "A list of named admin-group entries."; + leaf named-admin-group { + type leafref { + path "../../../../te:globals/" + + "te:named-admin-groups/te:named-admin-group/" + + "te:name"; + } + description + "A named admin-group entry."; + } + } + } + } + choice srlg-type { + description + "Choice of SRLG configuration."; + case value-srlgs { + list values { + key "value"; + description + "List of SRLG values that + this link is part of."; + leaf value { + type uint32 { + range "0..4294967295"; + } + description + "Value of the SRLG"; + } + } + } + case named-srlgs { + list named-srlgs { + if-feature "te-types:named-srlg-groups"; + key "named-srlg"; + description + "A list of named SRLG entries."; + leaf named-srlg { + type leafref { + path "../../../../te:globals/" + + "te:named-srlgs/te:named-srlg/te:name"; + } + description + "A named SRLG entry."; + } + } + } + } + uses te-igp-flooding-bandwidth-config; + list switching-capabilities { + key "switching-capability"; + description + "List of interface capabilities for this interface."; + leaf switching-capability { + type identityref { + base te-types:switching-capabilities; + } + description + "Switching Capability for this interface."; + } + leaf encoding { + type identityref { + base te-types:lsp-encoding-types; + } + description + "Encoding supported by this interface."; + } + } + container te-advertisements-state { + config false; + description + "TE interface advertisements state container."; + leaf flood-interval { + type uint32; + description + "The periodic flooding interval."; + } + leaf last-flooded-time { + type uint32; + units "seconds"; + description + "Time elapsed since last flooding in seconds."; + } + leaf next-flooded-time { + type uint32; + units "seconds"; + description + "Time remained for next flooding in seconds."; + } + leaf last-flooded-trigger { + type enumeration { + enum link-up { + description + "Link-up flooding trigger."; + } + enum link-down { + description + "Link-down flooding trigger."; + } + enum threshold-up { + description + "Bandwidth reservation up threshold."; + } + enum threshold-down { + description + "Bandwidth reservation down threshold."; + } + enum bandwidth-change { + description + "Bandwidth capacity change."; + } + enum user-initiated { + description + "Initiated by user."; + } + enum srlg-change { + description + "SRLG property change."; + } + enum periodic-timer { + description + "Periodic timer expired."; + } + } + default "periodic-timer"; + description + "Trigger for the last flood."; + } + list advertised-level-areas { + key "level-area"; + description + "List of level-areas that the TE interface is + advertised in."; + leaf level-area { + type uint32; + description + "The IGP area or level where the TE interface link + state is advertised in."; + } + } + } + } + } + } + + /* TE globals device augmentation */ + + augment "/te:te/te:globals" { + description + "Global TE device specific configuration parameters."; + uses lsp-device-timers; + } + + /* TE tunnels device configuration augmentation */ + + augment "/te:te/te:tunnels/te:tunnel" { + description + "Tunnel device dependent augmentation."; + leaf path-invalidation-action { + type identityref { + base te-types:path-invalidation-action-type; + } + description + "Tunnel path invalidation action."; + } + uses lsp-device-timers; + } + + /* TE LSPs device state augmentation */ + + augment "/te:te/te:lsps/te:lsp" { + description + "TE LSP device dependent augmentation."; + container lsp-timers { + when "../te:origin-type = 'ingress'" { + description + "Applicable to ingress LSPs only."; + } + description + "Ingress LSP timers."; + leaf uptime { + type uint32; + units "seconds"; + description + "The LSP uptime."; + } + leaf time-to-install { + type uint32; + units "seconds"; + description + "The time remaining for a new LSP to be instantiated + in forwarding to carry traffic."; + } + leaf time-to-destroy { + type uint32; + units "seconds"; + description + "The time remaining for a existing LSP to be deleted + from forwarding."; + } + } + container downstream-info { + when "../te:origin-type != 'egress'" { + description + "Downstream information of the LSP."; + } + description + "downstream information."; + leaf nhop { + type te-types:te-tp-id; + description + "downstream next-hop address."; + } + leaf outgoing-interface { + type if:interface-ref; + description + "downstream interface."; + } + container neighbor { + uses te-types:te-generic-node-id; + description + "downstream neighbor address."; + } + leaf label { + type rt-types:generalized-label; + description + "downstream label."; + } + } + container upstream-info { + when "../te:origin-type != 'ingress'" { + description + "Upstream information of the LSP."; + } + description + "upstream information."; + leaf phop { + type te-types:te-tp-id; + description + "upstream next-hop or previous-hop address."; + } + container neighbor { + uses te-types:te-generic-node-id; + description + "upstream neighbor address."; + } + leaf label { + type rt-types:generalized-label; + description + "upstream label."; + } + } + } + + /* TE interfaces RPCs/execution Data */ + + rpc link-state-update { + description + "Triggers a link state update for the specific interface."; + input { + choice filter-type { + mandatory true; + description + "Filter choice."; + case match-all { + leaf all { + type empty; + mandatory true; + description + "Match all TE interfaces."; + } + } + case match-one-interface { + leaf interface { + type if:interface-ref; + description + "Match a specific TE interface."; + } + } + } + } + } +} \ No newline at end of file diff --git a/src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-teas-yang-te-34/ietf-te.yang b/src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-teas-yang-te-34/ietf-te.yang new file mode 100644 index 000000000..48b160305 --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-teas-yang-te-34/ietf-te.yang @@ -0,0 +1,1516 @@ +module ietf-te { + yang-version 1.1; + namespace "urn:ietf:params:xml:ns:yang:ietf-te"; + + /* Replace with IANA when assigned */ + + prefix te; + + /* Import TE generic types */ + import ietf-te-types { + prefix te-types; + reference + "draft-ietf-teas-rfc8776-update: Common YANG Data Types + for Traffic Engineering."; + } + import ietf-yang-types { + prefix yang; + reference + "RFC6991: Common YANG Data Types."; + } + + import ietf-network { + prefix "nw"; + reference "RFC 8345: A YANG Data Model for Network Topologies"; + } + + import ietf-network-topology { + prefix "nt"; + reference "RFC 8345: A YANG Data Model for Network Topologies"; + } + + organization + "IETF Traffic Engineering Architecture and Signaling (TEAS) + Working Group."; + contact + "WG Web: + WG List: + + Editor: Tarek Saad + + + Editor: Rakesh Gandhi + + + Editor: Vishnu Pavan Beeram + + + Editor: Himanshu Shah + + + Editor: Xufeng Liu + + + Editor: Igor Bryskin + + + Editor: Oscar Gonzalez de Dios + "; + + description + "YANG data module for TE configuration, state, and RPCs. + The model fully conforms to the Network Management + Datastore Architecture (NMDA). + + Copyright (c) 2023 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject to + the license terms contained in, the Revised BSD License set + forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC XXXX + (https://www.rfc-editor.org/info/rfcXXXX); see the RFC itself + for full legal notices."; + + // RFC Ed.: replace XXXX with actual RFC number and remove this + // note. + // RFC Ed.: update the date below with the date of RFC publication + // and remove this note. + + revision 2024-02-02 { + description + "Initial revision for the TE generic YANG module."; + reference + "RFCXXXX: A YANG Data Model for Traffic Engineering Tunnels + and Interfaces."; + } + + typedef tunnel-ref { + type leafref { + path "/te:te/te:tunnels/te:tunnel/te:name"; + require-instance false; + } + description + "This type is used by data models that need to reference + configured TE tunnel."; + } + + /** + * TE tunnel generic groupings + */ + + grouping path-common-properties { + description + "Common path attributes."; + leaf name { + type string; + description + "TE path name."; + } + leaf path-computation-method { + type identityref { + base te-types:path-computation-method; + } + default "te-types:path-locally-computed"; + description + "The method used for computing the path, either + locally computed, queried from a server or not + computed at all (explicitly configured)."; + } + container path-computation-server { + when "derived-from-or-self(../path-computation-method, " + + "'te-types:path-externally-queried')" { + description + "The path-computation server when the path is + externally queried."; + } + uses te-types:te-generic-node-id; + description + "Address of the external path computation + server."; + } + leaf compute-only { + type empty; + description + "When present, the path is computed and updated whenever + the topology is updated. No resources are committed + or reserved in the network."; + } + leaf use-path-computation { + when "derived-from-or-self(../path-computation-method, " + + "'te-types:path-locally-computed')"; + type boolean; + default "true"; + description + "When 'true' indicates the path is dynamically computed + and/or validated against the Traffic-Engineering Database + (TED), and when 'false' indicates no path expansion or + validation against the TED is required."; + } + leaf lockdown { + type empty; + description + "When present, indicates no reoptimization to be attempted + for this path."; + } + leaf path-scope { + type identityref { + base te-types:path-scope-type; + } + default "te-types:path-scope-end-to-end"; + config false; + description + "Indicates whether the path is a segment or portion of + of the full path., or is the an end-to-end path for + the TE Tunnel."; + } + } + + /* This grouping is re-used in path-computation rpc */ + grouping path-compute-info { + description + "Attributes used for path computation request."; + uses tunnel-associations-properties; + uses te-types:generic-path-optimization; + leaf named-path-constraint { + if-feature "te-types:named-path-constraints"; + type leafref { + path "/te:te/te:globals/te:named-path-constraints/" + + "te:named-path-constraint/te:name"; + } + description + "Reference to a globally defined named path constraint set."; + } + uses path-constraints-common; + } + + /* This grouping is re-used in path-computation rpc */ + grouping path-forward-properties { + description + "The path preference."; + leaf preference { + type uint8 { + range "1..255"; + } + default "1"; + description + "Specifies a preference for this path. The lower the number + higher the preference."; + } + leaf co-routed { + when "/te:te/te:tunnels/te:tunnel/te:bidirectional = 'true'" { + description + "Applicable to bidirectional tunnels only."; + } + type boolean; + default "false"; + description + "Indicates whether the reverse path must to be co-routed + with the primary."; + } + } + + /* This grouping is re-used in path-computation rpc */ + grouping k-requested-paths { + description + "The k-shortest paths requests."; + leaf k-requested-paths { + type uint8; + default "1"; + description + "The number of k-shortest-paths requested from the path + computation server and returned sorted by its optimization + objective."; + } + } + + grouping path-state { + description + "TE per path state parameters."; + uses path-computation-response; + container lsp-provisioning-error-infos { + config false; + description + "LSP provisioning error information."; + list lsp-provisioning-error-info { + description + "List of LSP provisioning error info entries."; + leaf error-reason { + type identityref { + base te-types:lsp-provisioning-error-reason; + } + description + "LSP provision error type."; + } + leaf error-description { + type string; + description + "The textual representation of the error occurred during + path computation."; + } + leaf error-timestamp { + type yang:date-and-time; + description + "Timestamp of when the reported error occurred."; + } + leaf error-node-id { + type te-types:te-node-id; + description + "Node identifier of node where error occurred."; + } + leaf error-link-id { + type te-types:te-tp-id; + description + "Link ID where the error occurred."; + } + leaf lsp-id { + type uint16; + description + "The LSP-ID for which path computation was performed."; + } + } + } + container lsps { + config false; + description + "The TE LSPs container."; + list lsp { + key "node lsp-id"; + description + "List of LSPs associated with the tunnel."; + leaf tunnel-name { + type leafref { + path "/te:te/te:lsps/te:lsp/te:tunnel-name"; + } + description "TE tunnel name."; + } + leaf node { + type leafref { + path "/te:te/te:lsps/te:lsp[tunnel-name=" + + "current()/../te:tunnel-name][lsp-id=" + + "current()/../te:lsp-id]/te:node"; + } + description "The node where the LSP state resides on."; + } + leaf lsp-id { + type leafref { + path "/te:te/te:lsps/te:lsp[tunnel-name=" + + "current()/../tunnel-name]/te:lsp-id"; + } + description "The TE LSP identifier."; + } + } + } + } + + /* This grouping is re-used in path-computation rpc */ + grouping path-computation-response { + description + "Attributes reported by path computation response."; + container computed-paths-properties { + config false; + description + "Computed path properties container."; + list computed-path-properties { + key "k-index"; + description + "List of computed paths."; + leaf k-index { + type uint8; + description + "The k-th path returned from the computation server. + A lower k value path is more optimal than higher k + value path(s)"; + } + uses te-types:generic-path-properties { + augment "path-properties" { + description + "additional path properties returned by path + computation."; + uses te-types:te-bandwidth; + leaf disjointness-type { + type te-types:te-path-disjointness; + config false; + description + "The type of resource disjointness. + When reported for a primary path, it represents the + minimum level of disjointness of all the secondary + paths. When reported for a secondary path, it + represents the disjointness of the secondary path."; + } + } + } + } + } + container computed-path-error-infos { + config false; + description + "Path computation information container."; + list computed-path-error-info { + description + "List of path computation info entries."; + leaf error-description { + type string; + description + "Textual representation of the error that occurred + during path computation."; + } + leaf error-timestamp { + type yang:date-and-time; + description + "Timestamp of last path computation attempt."; + } + leaf error-reason { + type identityref { + base te-types:path-computation-error-reason; + } + description + "Reason for the path computation error."; + } + } + } + } + + grouping protection-restoration-properties { + description + "Protection and restoration parameters."; + container protection { + description + "Protection parameters."; + leaf protection-type { + type identityref { + base te-types:lsp-protection-type; + } + default "te-types:lsp-protection-unprotected"; + description + "LSP protection type."; + } + leaf protection-reversion-disable { + type boolean; + default "false"; + description + "Disable protection reversion to working path."; + } + leaf hold-off-time { + type uint32; + units "milli-seconds"; + description + "The time between the declaration of an SF or SD condition + and the initialization of the protection switching + algorithm."; + reference + "RFC4427"; + } + leaf wait-to-revert { + type uint16; + units "seconds"; + description + "Time to wait before attempting LSP reversion."; + reference + "RFC4427"; + } + leaf aps-signal-id { + type uint8 { + range "1..255"; + } + default "1"; + description + "The APS signal number used to reference the traffic of + this tunnel. The default value for normal traffic is 1. + The default value for extra-traffic is 255. If not + specified, non-default values can be assigned by the + server, if and only if, the server controls both + endpoints."; + reference + "ITU_G.808.1"; + } + } + container restoration { + description + "Restoration parameters."; + leaf restoration-type { + type identityref { + base te-types:lsp-restoration-type; + } + description + "LSP restoration type."; + } + leaf restoration-scheme { + type identityref { + base te-types:restoration-scheme-type; + } + description + "LSP restoration scheme."; + } + leaf restoration-reversion-disable { + type boolean; + default "false"; + description + "Disable restoration reversion to working path."; + } + leaf hold-off-time { + type uint32; + units "milli-seconds"; + description + "The time between the declaration of an SF or SD condition + and the initialization of the protection switching + algorithm."; + reference + "RFC4427"; + } + leaf wait-to-restore { + type uint16; + units "seconds"; + description + "Time to wait before attempting LSP restoration."; + reference + "RFC4427"; + } + leaf wait-to-revert { + type uint16; + units "seconds"; + description + "Time to wait before attempting LSP reversion."; + reference + "RFC4427"; + } + } + } + + grouping tunnel-associations-properties { + description + "TE tunnel association grouping."; + container association-objects { + description + "TE tunnel associations."; + list association-object { + key "association-key"; + unique "type id source/id source/type"; + description + "List of association base objects."; + reference + "RFC4872"; + leaf association-key { + type string; + description + "Association key used to identify a specific + association in the list"; + } + leaf type { + type identityref { + base te-types:association-type; + } + description + "Association type."; + reference + "RFC4872"; + } + leaf id { + type uint16; + description + "Association identifier."; + reference + "RFC4872"; + } + container source { + uses te-types:te-generic-node-id; + description + "Association source."; + reference + "RFC4872"; + } + } + list association-object-extended { + key "association-key"; + unique + "type id source/id source/type global-source extended-id"; + description + "List of extended association objects."; + reference + "RFC6780"; + leaf association-key { + type string; + description + "Association key used to identify a specific + association in the list"; + } + leaf type { + type identityref { + base te-types:association-type; + } + description + "Association type."; + reference + "RFC4872, RFC6780"; + } + leaf id { + type uint16; + description + "Association identifier."; + reference + "RFC4872, RFC6780"; + } + container source { + uses te-types:te-generic-node-id; + description + "Association source."; + reference + "RFC4872, RFC6780"; + } + leaf global-source { + type uint32; + description + "Association global source."; + reference + "RFC6780"; + } + leaf extended-id { + type yang:hex-string; + description + "Association extended identifier."; + reference + "RFC6780"; + } + } + } + } + + grouping tunnel-end-point { + description + "Common grouping used to specify the tunnel source and + destination end-points."; + leaf node-id { + type nw:node-id; + description + "The TE tunnel end-point node identifier"; + } + leaf te-node-id { + type te-types:te-node-id; + description + "The TE tunnel end-point TE node identifier"; + } + leaf tunnel-tp-id { + when "../node-id or ../te-node-id" { + description + "The TE tunnel termination point identifier is local to + a node"; + } + type binary; + description + "The TE tunnel end-point TE tunnel termination point + identifier"; + } + } + + /* This grouping is re-used in path-computation rpc */ + grouping tunnel-common-attributes { + description + "Common grouping to define the TE tunnel parameters"; + container source { + description + "TE tunnel source end-point."; + uses tunnel-end-point; + } + container destination { + description + "TE tunnel destination end-point."; + uses tunnel-end-point; + } + leaf bidirectional { + type boolean; + default "false"; + description + "Indicates a bidirectional tunnel"; + } + } + + /* This grouping is re-used in path-computation rpc */ + grouping tunnel-hierarchy-properties { + description + "A grouping for TE tunnel hierarchy information."; + container hierarchy { + description + "Container for TE hierarchy related information."; + container dependency-tunnels { + description + "List of tunnels that this tunnel can be potentially + dependent on."; + list dependency-tunnel { + key "name"; + description + "A tunnel entry that this tunnel can potentially depend + on."; + leaf name { + type tunnel-ref; + description + "Dependency tunnel name. The tunnel may not have been + instantiated yet."; + } + uses te-types:encoding-and-switching-type; + } + } + container hierarchical-link { + description + "Identifies a hierarchical link (in client layer) + that this tunnel is associated with. By default, the + topology of the hierarchical link is the same topology of + the tunnel;"; + reference + "RFC4206"; + leaf enable { + type boolean; + default "false"; + description + "Enables the hierarchical link properties supported by + this tunnel"; + } + leaf local-node-id { + type nw:node-id; + description + "The local node identifier."; + } + leaf local-te-node-id { + type te-types:te-node-id; + description + "The local TE node identifier."; + } + leaf local-link-tp-id { + type nt:tp-id; + description + "The local link termination point identifier."; + reference + "RFC8345"; + } + leaf local-te-link-tp-id { + type te-types:te-tp-id; + description + "The local TE link termination point identifier."; + } + leaf remote-node-id { + type nw:node-id; + description + "The remote node identifier."; + } + leaf remote-link-tp-id { + type nt:tp-id; + description + "The remote link termination point identifier."; + reference + "RFC8345"; + } + leaf remote-te-link-tp-id { + type te-types:te-tp-id; + description + "The remote TE link termination point identifier."; + } + leaf remote-te-node-id { + type te-types:te-node-id; + description + "Remote TE node identifier."; + } + leaf link-id { + type nt:link-id; + config false; + description + "A network topology assigned identifier to the link"; + reference + "RFC8345"; + } + leaf network-id { + type nw:network-id; + description + "The network topology identifier where the hierarchical + link supported by this TE tunnel is instantiated."; + } + uses te-types:te-topology-identifier { + description + "The TE topology identifier where the hierarchical link + supported by this TE tunnel is instantiated."; + } + } + } + } + + grouping path-constraints-common { + description + "Global named path constraints configuration + grouping."; + uses te-types:common-path-constraints-attributes; + uses te-types:generic-path-disjointness; + uses te-types:path-constraints-route-objects; + container path-in-segment { + presence "The end-to-end tunnel starts in a previous domain; + this tunnel is a segment in the current domain."; + description + "If an end-to-end tunnel crosses multiple domains using + the same technology, some additional constraints have to be + taken in consideration in each domain. + This TE tunnel segment is stitched to the upstream TE tunnel + segment."; + uses te-types:label-set-info; + } + container path-out-segment { + presence + "The end-to-end tunnel is not terminated in this domain; + this tunnel is a segment in the current domain."; + description + "If an end-to-end tunnel crosses multiple domains using + the same technology, some additional constraints have to be + taken in consideration in each domain. + This TE tunnel segment is stitched to the downstream TE + tunnel segment."; + uses te-types:label-set-info; + } + } + + /** + * TE container + */ + + container te { + description + "TE global container."; + leaf enable { + type boolean; + description + "Enables the TE component features."; + } + + /* TE Global Data */ + container globals { + description + "Globals TE system-wide configuration data container."; + container named-admin-groups { + description + "TE named admin groups container."; + list named-admin-group { + if-feature "te-types:extended-admin-groups"; + if-feature "te-types:named-extended-admin-groups"; + key "name"; + description + "List of named TE admin-groups."; + leaf name { + type string; + description + "A string name that uniquely identifies a TE + interface named admin-group."; + } + leaf bit-position { + type uint32; + description + "Bit position representing the administrative group."; + reference + "RFC3209 and RFC7308"; + } + + } + } + container named-srlgs { + description + "TE named SRLGs container."; + list named-srlg { + if-feature "te-types:named-srlg-groups"; + key "name"; + description + "A list of named SRLG groups."; + leaf name { + type string; + description + "A string name that uniquely identifies a TE + interface named SRLG."; + } + leaf value { + type te-types:srlg; + description + "An SRLG value."; + } + leaf cost { + type uint32; + description + "SRLG associated cost. Used during path to append + the path cost when traversing a link with this SRLG."; + } + } + } + container named-path-constraints { + description + "TE named path constraints container."; + list named-path-constraint { + if-feature "te-types:named-path-constraints"; + key "name"; + leaf name { + type string; + description + "A string name that uniquely identifies a + path constraint set."; + } + uses path-constraints-common; + description + "A list of named path constraints."; + } + } + } + + /* TE Tunnel Data */ + container tunnels { + description + "Tunnels TE configuration data container."; + list tunnel { + key "name"; + description + "The list of TE tunnels."; + leaf name { + type string; + description + "TE tunnel name."; + } + leaf alias { + type string; + description + "An alternate name of the TE tunnel that can be modified + anytime during its lifetime."; + } + leaf identifier { + type uint32; + description + "TE tunnel Identifier."; + reference + "RFC3209"; + } + leaf color { + type uint32; + description "The color associated with the TE tunnel."; + reference "RFC9012"; + } + leaf description { + type string; + default "None"; + description + "Textual description for this TE tunnel."; + } + leaf admin-state { + type identityref { + base te-types:tunnel-admin-state-type; + } + default "te-types:tunnel-admin-state-up"; + description + "TE tunnel administrative state."; + } + leaf operational-state { + type identityref { + base te-types:tunnel-state-type; + } + config false; + description + "TE tunnel operational state."; + } + uses te-types:encoding-and-switching-type; + uses tunnel-common-attributes; + container controller { + description + "Contains tunnel data relevant to external controller(s). + This target node may be augmented by external module(s), + for example, to add data for PCEP initiated and/or + delegated tunnels."; + leaf protocol-origin { + type identityref { + base te-types:protocol-origin-type; + } + description + "The protocol origin for instantiating the tunnel."; + } + leaf controller-entity-id { + type string; + description + "An identifier unique within the scope of visibility + that associated with the entity that controls the + tunnel."; + reference "RFC8232"; + } + } + leaf reoptimize-timer { + type uint16; + units "seconds"; + description + "Frequency of reoptimization of a traffic engineered + LSP."; + } + uses tunnel-associations-properties; + uses protection-restoration-properties; + uses te-types:tunnel-constraints; + uses tunnel-hierarchy-properties; + container primary-paths { + description + "The set of primary paths."; + reference "RFC4872"; + list primary-path { + key "name"; + description + "List of primary paths for this tunnel."; + leaf active { + type boolean; + config false; + description + "Indicates an active path that + has been selected from the primary paths list."; + } + uses path-common-properties; + uses path-forward-properties; + uses k-requested-paths; + uses path-compute-info; + uses path-state; + container primary-reverse-path { + when "../../../te:bidirectional = 'true'"; + description + "The reverse primary path properties."; + uses path-common-properties; + uses path-compute-info; + uses path-state; + container candidate-secondary-reverse-paths { + description + "The set of referenced candidate reverse secondary + paths from the full set of secondary reverse paths + which may be used for this primary path."; + list candidate-secondary-reverse-path { + key "secondary-reverse-path"; + ordered-by user; + description + "List of candidate secondary reverse path(s)"; + leaf secondary-reverse-path { + type leafref { + path "../../../../../../" + + "te:secondary-reverse-paths/" + + "te:secondary-reverse-path/te:name"; + } + description + "A reference to the secondary reverse path that + may be utilized when the containing primary + reverse path is in use."; + } + leaf active { + type boolean; + config false; + description + "Indicates an active path that has been + selected from the secondary reverse paths + list."; + } + } + } + } + container candidate-secondary-paths { + description + "The set of candidate secondary paths which may be + used for this primary path. When secondary paths are + specified in the list the path of the secondary LSP + in use must be restricted to those paths + referenced. + The priority of the secondary paths is specified + within the list. Higher priority values are less + preferred - that is to say that a path with priority + 0 is the most preferred path. In the case that the + list is empty, any secondary path may be + utilised when the current primary path is in use."; + list candidate-secondary-path { + key "secondary-path"; + ordered-by user; + description + "List of candidate secondary paths for this + tunnel."; + leaf secondary-path { + type leafref { + path "../../../../../te:secondary-paths/" + + "te:secondary-path/te:name"; + } + description + "A reference to the secondary path that may be + utilised when the containing primary path is + in use."; + } + leaf active { + type boolean; + config false; + description + "Indicates an active path that has been selected + from the candidate secondary paths."; + } + } + } + } + } + container secondary-paths { + description + "The set of secondary paths."; + reference "RFC4872"; + list secondary-path { + key "name"; + description + "List of secondary paths for this tunnel."; + uses path-common-properties; + leaf preference { + type uint8 { + range "1..255"; + } + default "1"; + description + "Specifies a preference for this path. The lower the + number higher the preference."; + } + leaf secondary-reverse-path { + type leafref { + path "../../../" + + "te:secondary-reverse-paths/" + + "te:secondary-reverse-path/te:name"; + } + description + "A reference to the reverse secondary path when + co-routed with the secondary path."; + } + uses path-compute-info; + uses protection-restoration-properties; + uses path-state; + } + } + container secondary-reverse-paths { + description + "The set of secondary reverse paths."; + list secondary-reverse-path { + key "name"; + description + "List of secondary paths for this tunnel."; + uses path-common-properties; + leaf preference { + type uint8 { + range "1..255"; + } + default "1"; + description + "Specifies a preference for this path. The lower the + number higher the preference. Paths that have the + same preference will be activated together."; + } + uses path-compute-info; + uses protection-restoration-properties; + uses path-state; + } + } + action tunnel-action { + description + "Action commands to manipulate the TE tunnel state."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels, + Section 2.5"; + input { + leaf action-type { + type identityref { + base te-types:tunnel-action-type; + } + description + "The action to be invoked on the TE tunnel."; + } + } + output { + leaf action-result { + type identityref { + base te-types:te-action-result; + } + description + "The result of the tunnel action operation."; + } + } + } + action protection-external-commands { + description + "Actions to manipulate the protection external + commands of the TE tunnel."; + reference + "RFC 4427: Recovery (Protection and Restoration) + Terminology for Generalized Multi-Protocol Label + Switching (GMPLS)"; + input { + leaf protection-external-command { + type identityref { + base te-types:protection-external-commands; + } + description + "Protection external command."; + } + leaf protection-group-ingress-node { + type boolean; + default "true"; + description + "When 'true', indicates that the action is + applied on ingress node. + By default, the action applies to the ingress node + only."; + } + leaf protection-group-egress-node { + type boolean; + default "false"; + description + "When set to 'true', indicates that the action is + applied on egress node. + By default, the action applies to the ingress node + only."; + } + leaf path-name { + type string; + description + "The name of the path that the external command + applies to."; + } + leaf path-type { + type te-types:path-type; + description + "The type of the path that the external command + applies to."; + } + leaf traffic-type { + type enumeration { + enum normal-traffic { + description + "The manual-switch or forced-switch command + applies to the normal traffic (this Tunnel)."; + } + enum null-traffic { + description + "The manual-switch or forced-switch command + applies to the null traffic."; + } + enum extra-traffic { + description + "The manual-switch or forced-switch command + applies to the extra traffic (the extra-traffic + Tunnel sharing protection bandwidth with this + Tunnel)."; + } + } + description + "Indicates whether the manual-switch or forced-switch + commands applies to the normal traffic, the null + traffic or the extra-traffic."; + reference + "RFC4427"; + } + leaf extra-traffic-tunnel-ref { + type tunnel-ref; + description + "In case there are multiple extra-traffic tunnels + sharing protection bandwidth with this Tunnel + (m:n protection), represents which extra-traffic + Tunnel the manual-switch or forced-switch to + extra-traffic command applies to."; + } + } + } + } + } + + /* TE LSPs Data */ + container lsps { + config false; + description + "TE LSPs state container."; + list lsp { + key "tunnel-name lsp-id node"; + unique "source destination tunnel-id lsp-id " + + "extended-tunnel-id"; + description + "List of LSPs associated with the tunnel."; + leaf tunnel-name { + type string; + description "The TE tunnel name."; + } + leaf lsp-id { + type uint16; + description + "Identifier used in the SENDER_TEMPLATE and the + FILTER_SPEC that can be changed to allow a sender to + share resources with itself."; + reference + "RFC3209"; + } + leaf node { + type te-types:te-node-id; + description + "The node where the TE LSP state resides on."; + } + leaf source { + type te-types:te-node-id; + description + "Tunnel sender address extracted from + SENDER_TEMPLATE object."; + reference + "RFC3209"; + } + leaf destination { + type te-types:te-node-id; + description + "The tunnel endpoint address."; + reference + "RFC3209"; + } + leaf tunnel-id { + type uint16; + description + "The tunnel identifier that remains + constant over the life of the tunnel."; + reference + "RFC3209"; + } + leaf extended-tunnel-id { + type yang:dotted-quad; + description + "The LSP Extended Tunnel ID."; + reference + "RFC3209"; + } + leaf operational-state { + type identityref { + base te-types:lsp-state-type; + } + description + "The LSP operational state."; + } + leaf signaling-type { + type identityref { + base te-types:path-signaling-type; + } + description + "The signaling protocol used to set up this LSP."; + } + leaf origin-type { + type enumeration { + enum ingress { + description + "Origin ingress."; + } + enum egress { + description + "Origin egress."; + } + enum transit { + description + "Origin transit."; + } + } + description + "The origin of the LSP relative to the location of the + local switch in the path."; + } + leaf lsp-resource-status { + type enumeration { + enum primary { + description + "A primary LSP is a fully established LSP for which + the resource allocation has been committed at the + data plane."; + } + enum secondary { + description + "A secondary LSP is an LSP that has been provisioned + in the control plane only; e.g. resource allocation + has not been committed at the data plane."; + } + } + description + "LSP resource allocation state."; + reference + "RFC4872, section 4.2.1"; + } + leaf lockout-of-normal { + type boolean; + description + "When set to 'true', it represents a lockout of normal + traffic external command. When set to 'false', it + represents a clear lockout of normal traffic external + command. The lockout of normal traffic command applies + to this Tunnel."; + reference + "RFC4427"; + } + leaf freeze { + type boolean; + description + "When set to 'true', it represents a freeze external + command. When set to 'false', it represents a clear + freeze external command. The freeze command applies to + all the Tunnels which are sharing the protection + resources with this Tunnel."; + reference + "RFC4427"; + } + leaf lsp-protection-role { + type enumeration { + enum working { + description + "A working LSP must be a primary LSP whilst a + protecting LSP can be either a primary or a + secondary LSP. Also, known as protected LSPs when + working LSPs are associated with protecting LSPs."; + } + enum protecting { + description + "A secondary LSP is an LSP that has been provisioned + in the control plane only; e.g. resource allocation + has not been committed at the data plane."; + } + } + description + "LSP role type."; + reference + "RFC4872, section 4.2.1"; + } + leaf lsp-protection-state { + type identityref { + base te-types:lsp-protection-state; + } + config false; + description + "The reported protection state controlling which + tunnel is using the resources of the protecting LSP."; + } + leaf protection-group-ingress-node-id { + type te-types:te-node-id; + description + "Indicates the te-node-id of the protection group + ingress node when the APS state represents an external + command (LoP, SF, MS) applied to it or a WTR timer + running on it. If the external command is not applied to + the ingress node or the WTR timer is not running on it, + this attribute is not specified. A value 0.0.0.0 is used + when the te-node-id of the protection group ingress node + is unknown (e.g., because the ingress node is outside + the scope of control of the server)"; + } + leaf protection-group-egress-node-id { + type te-types:te-node-id; + description + "Indicates the te-node-id of the protection group egress + node when the APS state represents an external command + (LoP, SF, MS) applied to it or a WTR timer running on + it. If the external command is not applied to the + ingress node or the WTR timer is not running on it, this + attribute is not specified. A value 0.0.0.0 is used when + the te-node-id of the protection group ingress node is + unknown (e.g., because the ingress node is outside the + scope of control of the server)"; + } + container lsp-actual-route-information { + description + "RSVP recorded route object information."; + list lsp-actual-route-information { + when "../../origin-type = 'ingress'" { + description + "Applicable on ingress LSPs only."; + } + key "index"; + description + "Record route list entry."; + uses te-types:record-route-state; + } + } + } + } + } + + /* TE Tunnel RPCs/execution Data */ + + rpc tunnels-path-compute { + description + "This RPC is a generic API whose + input and output parameters are expected to be provided by + augments to this module."; + reference + "RFC 4655: A Path Computation Element (PCE)-Based + Architecture."; + input { + container path-compute-info { + /* + * An external path compute module may augment this + * target. + */ + description + "RPC input information."; + } + } + output { + container path-compute-result { + /* + * An external path compute module may augment this + * target. + */ + description + "RPC output information."; + } + } + } + + rpc tunnels-actions { + description + "RPC that manipulates the state of a TE tunnel."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels, + Section 2.5"; + input { + container tunnel-info { + description + "TE tunnel information."; + choice filter-type { + mandatory true; + description + "Filter choice."; + case all-tunnels { + leaf all { + type empty; + mandatory true; + description + "When present, applies the action on all TE + tunnels."; + } + } + case one-tunnel { + leaf tunnel { + type tunnel-ref; + description + "Apply action on the specific TE tunnel."; + } + } + } + } + container action-info { + description + "TE tunnel action information."; + leaf action { + type identityref { + base te-types:tunnel-action-type; + } + description + "The action type."; + } + leaf disruptive { + when "derived-from-or-self(../action, " + + "'te-types:tunnel-action-reoptimize')"; + type empty; + description + "When present, specifies whether or not the + reoptimization + action is allowed to be disruptive."; + } + } + } + output { + leaf action-result { + type identityref { + base te-types:te-action-result; + } + description + "The result of the tunnel action operation."; + } + } + } +} \ No newline at end of file diff --git a/src/tests/tools/mock_nce_t_ctrl/yang/draft-layer1-types/ietf-layer1-types.yang b/src/tests/tools/mock_nce_t_ctrl/yang/draft-layer1-types/ietf-layer1-types.yang new file mode 100644 index 000000000..ba3820b72 --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/yang/draft-layer1-types/ietf-layer1-types.yang @@ -0,0 +1,1361 @@ +module ietf-layer1-types { + yang-version 1.1; + namespace "urn:ietf:params:xml:ns:yang:ietf-layer1-types"; + prefix "l1-types"; + + import ietf-routing-types { + prefix rt-types; + reference + "RFC 8294: Common YANG Data Types for the Routing Area"; + } + + organization + "IETF CCAMP Working Group"; + contact + "WG Web: + WG List: + + Editor: Haomian Zheng + + + Editor: Italo Busi + "; + + description + "This module defines Layer 1 YANG types. The model fully conforms + to the Network Management Datastore Architecture (NMDA). + + Copyright (c) 2024 IETF Trust and the persons + identified as authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Revised BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC XXXX; see + the RFC itself for full legal notices. + + The key words 'MUST', 'MUST NOT', 'REQUIRED', 'SHALL', 'SHALL + NOT', 'SHOULD', 'SHOULD NOT', 'RECOMMENDED', 'NOT RECOMMENDED', + 'MAY', and 'OPTIONAL' in this document are to be interpreted as + described in BCP 14 (RFC 2119) (RFC 8174) when, and only when, + they appear in all capitals, as shown here."; + + revision "2024-02-22" { + description + "Initial Version"; + reference + "RFC XXXX: A YANG Data Model for Layer 1 Types"; + // RFC Editor: replace RFC XXXX with actual RFC number, + // update date information and remove this note. + } + + /* + * Identities + */ + + identity tributary-slot-granularity { + description + "Tributary Slot Granularity (TSG)."; + reference + "ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN)"; + } + + identity tsg-1.25G { + base tributary-slot-granularity; + description + "1.25G tributary slot granularity."; + } + + identity tsg-2.5G { + base tributary-slot-granularity; + description + "2.5G tributary slot granularity."; + } + + identity tsg-5G { + base tributary-slot-granularity; + description + "5G tributary slot granularity."; + } + + identity odu-type { + description + "Base identity from which specific Optical Data Unit (ODU) + type is derived."; + reference + "RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN)"; + } + + identity ODU0 { + base odu-type; + description + "ODU0 type (1.24Gb/s)."; + reference + "RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN)"; + } + + identity ODU1 { + base odu-type; + description + "ODU1 type (2.49Gb/s)."; + reference + "RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN)"; + } + + identity ODU2 { + base odu-type; + description + "ODU2 type (10.03Gb/s)."; + reference + "RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN)"; + } + + identity ODU2e { + base odu-type; + description + "ODU2e type (10.39Gb/s)."; + reference + "RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN)"; + } + + identity ODU3 { + base odu-type; + description + "ODU3 type (40.31Gb/s)."; + reference + "RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN)"; + } + + identity ODU4 { + base odu-type; + description + "ODU4 type (104.79Gb/s)."; + reference + "RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN)"; + } + + identity ODUflex { + base odu-type; + description + "ODUflex type (flexible bit rate, not resizable). + + It could be used for any type of ODUflex, including + ODUflex(CBR), ODUflex(GFP), ODUflex(GFP,n,k), ODUflex(IMP,s), + ODUflex(IMP) and ODUflex(FlexE-aware)."; + reference + "RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN)"; + } + identity ODUflex-resizable { + base odu-type; + description + "ODUflex protocol (flexible bit rate, resizable). + + It could be used only for ODUflex(GFP,n,k)."; + reference + "RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN)"; + } + + identity protocol { + description + "Base identity from which specific protocol is derived."; + reference + "MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity Ethernet { + base protocol; + description + "Ethernet protocol."; + reference + "MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity Fibre-Channel { + base protocol; + description + "Fibre-Channel (FC) protocol."; + reference + "MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity SDH { + base protocol; + description + "SDH protocol."; + reference + "MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity SONET { + base protocol; + description + "SONET protocol."; + reference + "MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity client-signal { + description + "Base identity from which specific Constant Bit Rate (CBR) + client signal is derived"; + } + + identity coding-func { + description + "Base identity from which specific coding function + is derived."; + reference + "MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity ETH-1Gb { + base client-signal; + description + "Client signal type of 1GbE."; + reference + "IEEE 802.3-2018, Clause 36: IEEE Standard for Ethernet + + RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN)"; + } + + identity ETH-10Gb-LAN { + base client-signal; + description + "Client signal type of ETH-10Gb-LAN (10.3 Gb/s)."; + reference + "IEEE 802.3-2018, Clause 49: IEEE Standard for Ethernet + + RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN)"; + } + + identity ETH-10Gb-WAN { + base client-signal; + description + "Client signal type of ETH-10Gb-WAN (9.95 Gb/s)."; + reference + "IEEE 802.3-2018, Clause 50: IEEE Standard for Ethernet + + RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN)"; + } + + identity ETH-40Gb { + base client-signal; + description + "Client signal type of 40GbE."; + reference + "IEEE 802.3-2018, Clause 82: IEEE Standard for Ethernet + + RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN)"; + } + + identity ETH-100Gb { + base client-signal; + description + "Client signal type of 100GbE."; + reference + "IEEE 802.3-2018, Clause 82: IEEE Standard for Ethernet + + RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN)"; + } + + identity STM-1 { + base client-signal; + base coding-func; + description + "Client signal type of STM-1; + STM-1 G.707 (N=1) coding function."; + reference + "ITU-T G.707 v7.0 (01/2007): Network node interface for the + synchronous digital hierarchy (SDH) + + RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN) + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity STM-4 { + base client-signal; + base coding-func; + description + "Client signal type of STM-4; + STM-4 G.707 (N=4) coding function."; + reference + "ITU-T G.707 v7.0 (01/2007): Network node interface for the + synchronous digital hierarchy (SDH) + + RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN) + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity STM-16 { + base client-signal; + base coding-func; + description + "Client signal type of STM-16; + STM-16 G.707 (N=16) coding function."; + reference + "ITU-T G.707 v7.0 (01/2007): Network node interface for the + synchronous digital hierarchy (SDH) + + RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN) + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity STM-64 { + base client-signal; + base coding-func; + description + "Client signal type of STM-64; + STM-64 G.707 (N=64) coding function."; + reference + "ITU-T G.707 v7.0 (01/2007): Network node interface for the + synchronous digital hierarchy (SDH) + + RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN) + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity STM-256 { + base client-signal; + base coding-func; + description + "Client signal type of STM-256; + STM-256 G.707 (N=256) coding function."; + reference + "ITU-T G.707 v7.0 (01/2007): Network node interface for the + synchronous digital hierarchy (SDH) + + RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN) + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity OC-3 { + base client-signal; + base coding-func; + description + "Client signal type of OC3; + OC-3 GR-253-CORE (N=3) coding function."; + reference + "ANSI T1.105-2001: Synchronous Optical Network (SONET) + Basic Description including Multiplex Structure, Rates, + and Formats + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity OC-12 { + base client-signal; + base coding-func; + description + "Client signal type of OC12; + OC-12 GR-253-CORE (N=12) coding function."; + reference + "ANSI T1.105-2001: Synchronous Optical Network (SONET) + Basic Description including Multiplex Structure, Rates, + and Formats + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity OC-48 { + base client-signal; + base coding-func; + description + "Client signal type of OC48; + OC-48 GR-253-CORE (N=48) coding function."; + reference + "ANSI T1.105-2001: Synchronous Optical Network (SONET) + Basic Description including Multiplex Structure, Rates, + and Formats + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity OC-192 { + base client-signal; + base coding-func; + description + "Client signal type of OC192; + OC-192 GR-253-CORE (N=192) coding function."; + reference + "ANSI T1.105-2001: Synchronous Optical Network (SONET) + Basic Description including Multiplex Structure, Rates, + and Formats + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity OC-768 { + base client-signal; + base coding-func; + description + "Client signal type of OC768; + OC-768 GR-253-CORE (N=768) coding function."; + reference + "ANSI T1.105-2001: Synchronous Optical Network (SONET) + Basic Description including Multiplex Structure, Rates, + and Formats + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity FC-100 { + base client-signal; + base coding-func; + description + "Client signal type of Fibre Channel FC-100; + FC-100 FC-FS-2 (1.0625 Gb/s) coding function."; + reference + "ANSI INCITS 230-1994 R1999): Information Technology - + Fibre Channel - Physical and Signaling Interface (FC-PH) + + RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN) + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity FC-200 { + base client-signal; + base coding-func; + description + "Client signal type of Fibre Channel FC-200; + FC-200 FC-FS-2 (2.125 Gb/s) coding function."; + reference + "ANSI INCITS 230-1994 R1999): Information Technology - + Fibre Channel - Physical and Signaling Interface (FC-PH) + + RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN) + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity FC-400 { + base client-signal; + base coding-func; + description + "Client signal type of Fibre Channel FC-400; + FC-400 FC-FS-2 (4.250 Gb/s) coding function."; + reference + "ANSI INCITS 230-1994 R1999): Information Technology - + Fibre Channel - Physical and Signaling Interface (FC-PH) + + RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN) + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity FC-800 { + base client-signal; + base coding-func; + description + "Client signal type of Fibre Channel FC-800; + FC-800 FC-FS-2 (8.500 Gb/s) coding function."; + reference + "ANSI INCITS 230-1994 R1999): Information Technology - + Fibre Channel - Physical and Signaling Interface (FC-PH) + + RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN) + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity FC-1200 { + base client-signal; + base coding-func; + description + "Client signal type of Fibre Channel FC-1200; + FC-1200 FC-10GFC (10.51875 Gb/s) coding function."; + reference + "ANSI INCITS 230-1994 R1999): Information Technology - + Fibre Channel - Physical and Signaling Interface (FC-PH) + + RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN) + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity FC-1600 { + base client-signal; + base coding-func; + description + "Client signal type of Fibre Channel FC-1600; + FC-1600 FC-FS-3 (14.025 Gb/s) coding function."; + reference + "ANSI INCITS 230-1994 R1999): Information Technology - + Fibre Channel - Physical and Signaling Interface (FC-PH) + + RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN) + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity FC-3200 { + base client-signal; + base coding-func; + description + "Client signal type of Fibre Channel FC-3200; + FC-3200 FC-FS-4 (28.05 Gb/s) coding function."; + reference + "ANSI INCITS 230-1994 R1999): Information Technology - + Fibre Channel - Physical and Signaling Interface (FC-PH) + + RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN) + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity ETH-1000X { + base coding-func; + description + "1000BASE-X PCS clause 36 coding function."; + reference + "IEEE 802.3-2018, Clause 36: IEEE Standard for Ethernet + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity ETH-10GW { + base coding-func; + description + "IEEE 802.3-2018, Clause 50: IEEE Standard for Ethernet + + 10GBASE-W (WAN PHY) PCS clause 49 and WIS clause 50 + coding function."; + reference + "MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity ETH-10GR { + base coding-func; + description + "10GBASE-R (LAN PHY) PCS clause 49 coding function."; + reference + "IEEE 802.3-2018, Clause 49: IEEE Standard for Ethernet + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity ETH-40GR { + base coding-func; + description + "40GBASE-R PCS clause 82 coding function."; + reference + "IEEE 802.3-2018, Clause 82: IEEE Standard for Ethernet + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity ETH-100GR { + base coding-func; + description + "100GBASE-R PCS clause 82 coding function."; + reference + "IEEE 802.3-2018, Clause 82: IEEE Standard for Ethernet + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity optical-interface-func { + description + "Base identity from which optical-interface-function + is derived."; + reference + "MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity SX-PMD-1000 { + base optical-interface-func; + description + "SX-PMD-clause-38 Optical Interface function for + 1000BASE-X PCS-36."; + reference + "IEEE 802.3-2018, Clause 38: IEEE Standard for Ethernet + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity LX-PMD-1000 { + base optical-interface-func; + description + "LX-PMD-clause-38 Optical Interface function for + 1000BASE-X PCS-36."; + reference + "IEEE 802.3-2018, Clause 38: IEEE Standard for Ethernet + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity LX10-PMD-1000 { + base optical-interface-func; + description + "LX10-PMD-clause-59 Optical Interface function for + 1000BASE-X PCS-36."; + reference + "IEEE 802.3-2018, Clause 59: IEEE Standard for Ethernet + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity BX10-PMD-1000 { + base optical-interface-func; + description + "BX10-PMD-clause-59 Optical Interface function for + 1000BASE-X PCS-36."; + reference + "IEEE 802.3-2018, Clause 59: IEEE Standard for Ethernet + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity LW-PMD-10G { + base optical-interface-func; + description + "LW-PMD-clause-52 Optical Interface function for + 10GBASE-W PCS-49-WIS-50."; + reference + "IEEE 802.3-2018, Clause 52: IEEE Standard for Ethernet + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity EW-PMD-10G { + base optical-interface-func; + description + "EW-PMD-clause-52 Optical Interface function for + 10GBASE-W PCS-49-WIS-50."; + reference + "IEEE 802.3-2018, Clause 52: IEEE Standard for Ethernet + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity LR-PMD-10G { + base optical-interface-func; + description + "LR-PMD-clause-52 Optical Interface function for + 10GBASE-R PCS-49."; + reference + "IEEE 802.3-2018, Clause 52: IEEE Standard for Ethernet + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity ER-PMD-10G { + base optical-interface-func; + description + "ER-PMD-clause-52 Optical Interface function for + 10GBASE-R PCS-49."; + reference + "IEEE 802.3-2018, Clause 52: IEEE Standard for Ethernet + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity LR4-PMD-40G { + base optical-interface-func; + description + "LR4-PMD-clause-87 Optical Interface function for + 40GBASE-R PCS-82."; + reference + "IEEE 802.3-2018, Clause 87: IEEE Standard for Ethernet + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity ER4-PMD-40G { + base optical-interface-func; + description + "ER4-PMD-clause-87 Optical Interface function for + 40GBASE-R PCS-82."; + reference + "IEEE 802.3-2018, Clause 87: IEEE Standard for Ethernet + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity FR-PMD-40G { + base optical-interface-func; + description + "FR-PMD-clause-89 Optical Interface function for + 40GBASE-R PCS-82."; + reference + "IEEE 802.3-2018, Clause 89: IEEE Standard for Ethernet + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity LR4-PMD-100G { + base optical-interface-func; + description + "LR4-PMD-clause-88 Optical Interface function for + 100GBASE-R PCS-82."; + reference + "IEEE 802.3-2018, Clause 88: IEEE Standard for Ethernet + + MEF63: Subscriber Layer 1 Service Attributes"; + } + identity ER4-PMD-100G { + base optical-interface-func; + description + "ER4-PMD-clause-88 Optical Interface function for + 100GBASE-R PCS-82."; + reference + "IEEE 802.3-2018, Clause 88: IEEE Standard for Ethernet + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + /* + * Typedefs + */ + + typedef otn-tpn { + type uint16 { + range "1..4095"; + } + description + "Tributary Port Number (TPN) for OTN."; + reference + "RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks."; + } + + typedef otn-ts { + type uint16 { + range "1..4095"; + } + description + "Tributary Slot (TS) for OTN."; + reference + "RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks."; + } + + typedef otn-label-range-type { + type enumeration { + enum trib-slot { + description + "Defines a range of OTN tributary slots (TS)."; + } + enum trib-port { + description + "Defines a range of OTN tributary ports (TPN)."; + } + } + description + "Defines the type of OTN label range: TS or TPN."; + } + + typedef gfp-k { + type enumeration { + enum 2 { + description + "The ODU2.ts rate (1,249,177.230 kbit/s) is used + to compute the rate of an ODUflex(GFP,n,2)."; + } + enum 3 { + description + "The ODU3.ts rate (1,254,470.354 kbit/s) is used + to compute the rate of an ODUflex(GFP,n,3)."; + } + enum 4 { + description + "The ODU4.ts rate (1,301,467.133 kbit/s) is used + to compute the rate of an ODUflex(GFP,n,4)."; + } + } + description + "The ODUk.ts used to compute the rate of an ODUflex(GFP,n,k)."; + reference + "ITU-T G.709 v6.0 (06/2020), Table 7-8 and L.7: Interfaces for + the Optical Transport Network (OTN)"; + } + + typedef flexe-client-rate { + type union { + type uint16; + type enumeration { + enum "10G" { + description + "Represents a 10G FlexE Client signal (s=2)."; + } + enum "40G" { + description + "Represents a 40G FlexE Client signal (s=8)."; + } + } + } + description + "The FlexE Client signal rate (s x 5,156,250.000 kbit/s) + used to compute the rate of an ODUflex(IMP, s). + + Valid values for s are s=2 (10G), s=4 (40G) and + s=5 x n (n x 25G). + + In the first two cases an enumeration value + (either 10G or 40G) is used, while in the latter case + the value of n is used."; + reference + "ITU-T G.709 v6.0 (06/2020), Table 7-2: Interfaces for the + Optical Transport Network (OTN)"; + } + + typedef odtu-flex-type { + type enumeration { + enum "2" { + description + "The ODTU2.ts ODTU type."; + } + enum "3" { + description + "The ODTU3.ts ODTU type."; + } + enum "4" { + description + "The ODTU4.ts ODTU type."; + } + enum "Cn" { + description + "The ODTUCn.ts ODTU type."; + } + } + description + "The type of Optical Data Tributary Unit (ODTU), + whose nominal bitrate is used to compute the number of + Tributary Slots (TS) required by an ODUflex LSP, according to + the (19-1a) and (20-1a) formulas defined in G.709."; + reference + "ITU-T G.709 v6.0 (06/2020), Table 7-7, clause 19.6 and + clause 20.5: Interfaces for the Optical Transport + Network (OTN)"; + } + + typedef bandwidth-scientific-notation { + type string { + pattern + '0(\.0?)?([eE](\+)?0?)?|' + + '[1-9](\.[0-9]{0,6})?[eE](\+)?(9[0-6]|[1-8][0-9]|0?[0-9])?'; + } + units "bps"; + description + "Bandwidth values, expressed using the scientific notation + in bits per second. + + The encoding format is the external decimal-significant + character sequences specified in IEEE 754 and ISO/IEC 9899:1999 + for 32-bit decimal floating-point numbers: + (-1)**(S) * 10**(Exponent) * (Significant), + where Significant uses 7 digits. + + An implementation for this representation MAY use decimal32 + or binary32. The range of the Exponent is from -95 to +96 + for decimal32, and from -38 to +38 for binary32. + As a bandwidth value, the format is restricted to be + normalized, non-negative, and non-fraction: + n.dddddde{+}dd, N.DDDDDDE{+}DD, 0e0 or 0E0, + where 'd' and 'D' are decimal digits; 'n' and 'N' are + non-zero decimal digits; 'e' and 'E' indicate a power of ten. + Some examples are 0e0, 1e10, and 9.953e9."; + reference + "IEEE Std 754-2001: IEEE Standard for Floating-Point + Arithmetic + + ISO/IEC 9899:1999: Information technology - Programming + Languages - C"; + } + + /* + * Groupings + */ + + grouping otn-link-bandwidth { + description + "Bandwidth attributes for OTN links."; + container otn-bandwidth { + description + "Bandwidth attributes for OTN links."; + list odulist { + key "odu-type"; + description + "OTN bandwidth definition"; + leaf odu-type { + type identityref { + base odu-type; + } + description "ODU type"; + } + leaf number { + type uint16; + description "Number of ODUs."; + } + leaf ts-number { + when 'derived-from-or-self(../odu-type,"ODUflex") or + derived-from-or-self(../odu-type, + "ODUflex-resizable")' { + description + "Applicable when odu-type is ODUflex or + ODUflex-resizable."; + } + type uint16 { + range "1..4095"; + } + description + "The number of Tributary Slots (TS) that + could be used by all the ODUflex LSPs."; + } + } + } + } + + grouping otn-path-bandwidth { + description + "Bandwidth attributes for OTN paths."; + container otn-bandwidth { + description + "Bandwidth attributes for OTN paths."; + leaf odu-type { + type identityref { + base odu-type; + } + description "ODU type"; + } + choice oduflex-type { + when 'derived-from-or-self(./odu-type,"ODUflex") or + derived-from-or-self(./odu-type, + "ODUflex-resizable")' { + description + "Applicable when odu-type is ODUflex or + ODUflex-resizable."; + } + description + "Types of ODUflex used to compute the ODUflex + nominal bit rate."; + reference + "ITU-T G.709 v6.0 (06/2020), Table 7-2: Interfaces for the + Optical Transport Network (OTN)"; + case generic { + leaf nominal-bit-rate { + type union { + type l1-types:bandwidth-scientific-notation; + type rt-types:bandwidth-ieee-float32; + } + mandatory true; + description + "Nominal ODUflex bit rate."; + } + } + case cbr { + leaf client-type { + type identityref { + base client-signal; + } + mandatory true; + description + "The type of Constant Bit Rate (CBR) client signal + of an ODUflex(CBR)."; + } + } + case gfp-n-k { + leaf gfp-n { + type uint8 { + range "1..80"; + } + mandatory true; + description + "The value of n for an ODUflex(GFP,n,k)."; + reference + "ITU-T G.709 v6.0 (06/2020), Tables 7-8 and L.7: + Interfaces for the Optical Transport Network (OTN)"; + } + leaf gfp-k { + type gfp-k; + description + "The value of k for an ODUflex(GFP,n,k). + + If omitted, it is calculated from the value of gfp-n + as described in Table 7-8 of G.709."; + reference + "ITU-T G.709 v6.0 (06/2020), Tables 7-8 and L.7: + Interfaces for the Optical Transport Network (OTN)"; + } + } + case flexe-client { + leaf flexe-client { + type flexe-client-rate; + mandatory true; + description + "The rate of the FlexE-client for an ODUflex(IMP,s)."; + } + } + case flexe-aware { + leaf flexe-aware-n { + type uint16; + mandatory true; + description + "The rate of FlexE-aware client signal + for ODUflex(FlexE-aware)"; + } + } + case packet { + leaf opuflex-payload-rate { + type union { + type l1-types:bandwidth-scientific-notation; + type rt-types:bandwidth-ieee-float32; + } + mandatory true; + description + "Either the GFP-F encapsulated packet client nominal + bit rate for an ODUflex(GFP) or the 64b/66b encoded + packet client nominal bit rate for an ODUflex(IMP)."; + } + } + } + } + } + + grouping otn-max-path-bandwidth { + description + "Maximum bandwidth attributes for OTN paths."; + container otn-bandwidth { + description + "Maximum bandwidth attributes for OTN paths."; + leaf odu-type { + type identityref { + base odu-type; + } + description "ODU type."; + } + leaf max-ts-number { + when 'derived-from-or-self(../odu-type,"ODUflex") or + derived-from-or-self(../odu-type, + "ODUflex-resizable")' { + description + "Applicable when odu-type is ODUflex or + ODUflex-resizable."; + } + type uint16 { + range "1..4095"; + } + description + "The maximum number of Tributary Slots (TS) that could be + used by an ODUflex LSP."; + } + } + } + + grouping otn-label-range-info { + description + "Label range information for OTN. + + This grouping SHOULD be used together with the + otn-label-start-end and otn-label-step groupings to provide + OTN technology-specific label information to the models which + use the label-restriction-info grouping defined in the module + ietf-te-types."; + container otn-label-range { + description + "Label range information for OTN."; + leaf range-type { + type otn-label-range-type; + description "The type of range (e.g., TPN or TS) + to which the label range applies"; + } + leaf tsg { + type identityref { + base tributary-slot-granularity; + } + description + "Tributary slot granularity (TSG) to which the label range + applies. + + This leaf MUST be present when the range-type is TS. + + This leaf MAY be omitted when mapping an ODUk over an OTUk + Link. In this case the range-type is tpn, with only one + entry (ODUk), and the tpn range has only one value (1)."; + reference + "ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN)"; + } + leaf-list odu-type-list { + type identityref { + base odu-type; + } + description + "List of ODU types to which the label range applies. + + An Empty odu-type-list means that the label range + applies to all the supported ODU types."; + } + leaf priority { + type uint8 { + range 0..7; + } + description + "Priority in Interface Switching Capability + Descriptor (ISCD)."; + reference + "RFC4203: OSPF Extensions in Support of Generalized + Multi-Protocol Label Switching (GMPLS)"; + } + } + } + + grouping otn-label-start-end { + description + "The OTN label-start or label-end used to specify an OTN label + range. + + This grouping is dependent on the range-type defined in the + otn-label-range-info grouping. + + This grouping SHOULD be used together with the + otn-label-range-info and otn-label-step groupings to provide + OTN technology-specific label information to the models which + use the label-restriction-info grouping defined in the module + ietf-te-types."; + container otn-label { + description + "Label start or label end for OTN. + + It is either a TPN or a TS depending on the OTN label range + type specified in the 'range-type' leaf defined in the + otn-label-range-info grouping."; + leaf tpn { + when "../../../../otn-label-range/range-type = + 'trib-port'" { + description + "Valid only when range-type represented by + trib-port."; + } + type otn-tpn; + description + "Tributary Port Number (TPN)."; + reference + "RFC7139: GMPLS Signaling Extensions for Control of + Evolving G.709 Optical Transport Networks"; + } + leaf ts { + when "../../../../otn-label-range/range-type = + 'trib-slot'" { + description + "Valid only when range-type represented by + trib-slot."; + } + type otn-ts; + description + "Tributary Slot (TS) number."; + reference + "RFC7139: GMPLS Signaling Extensions for Control of + Evolving G.709 Optical Transport Networks"; + } + } + } + + grouping otn-label-hop { + description "OTN Label"; + reference + "RFC7139, section 6: GMPLS Signaling Extensions for Control of + Evolving G.709 Optical Transport Networks"; + container otn-label { + description + "Label hop for OTN."; + leaf tpn { + type otn-tpn; + description + "Tributary Port Number (TPN)."; + reference + "RFC7139: GMPLS Signaling Extensions for Control of + Evolving G.709 Optical Transport Networks"; + } + leaf tsg { + type identityref { + base tributary-slot-granularity; + } + description "Tributary Slot Granularity (TSG)."; + reference + "ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN)"; + } + leaf ts-list { + type string { + pattern "([1-9][0-9]{0,3}(-[1-9][0-9]{0,3})?" + + "(,[1-9][0-9]{0,3}(-[1-9][0-9]{0,3})?)*)"; + } + description + "A list of available Tributary Slots (TS) ranging + between 1 and 4095. If multiple values or + ranges are given, they all MUST be disjoint + and MUST be in ascending order. + For example 1-20,25,50-1000."; + reference + "RFC 7139: GMPLS Signaling Extensions for Control + of Evolving G.709 Optical Transport Networks"; + } + } + } + + grouping otn-label-step { + description + "Label step for OTN. + + This grouping is dependent on the range-type defined in the + otn-label-range-info grouping. + + This grouping SHOULD be used together with the + otn-label-range-info and otn-label-start-end groupings to + provide OTN technology-specific label information to the + models which use the label-restriction-info grouping defined + in the module ietf-te-types."; + container otn-label-step { + description + "Label step for OTN. + + It is either a TPN or a TS depending on the OTN label range + type specified in the 'range-type' leaf defined in the + otn-label-range-info grouping."; + leaf tpn { + when "../../../otn-label-range/range-type = + 'trib-port'" { + description + "Valid only when range-type represented by + trib-port."; + } + type otn-tpn; + description + "Label step which represents possible increments for + Tributary Port Number (TPN)."; + reference + "RFC7139: GMPLS Signaling Extensions for Control of + Evolving G.709 Optical Transport Networks"; + } + leaf ts { + when "../../../otn-label-range/range-type = + 'trib-slot'" { + description + "Valid only when range-type represented by + trib-slot"; + } + type otn-ts; + description + "Label step which represents possible increments for + Tributary Slot (TS) number."; + reference + "RFC7139: GMPLS Signaling Extensions for Control of + Evolving G.709 Optical Transport Networks"; + } + } + } +} \ No newline at end of file diff --git a/src/tests/tools/mock_nce_t_ctrl/yang/rfc6991/ietf-inet-types.yang b/src/tests/tools/mock_nce_t_ctrl/yang/rfc6991/ietf-inet-types.yang new file mode 100644 index 000000000..a1ef0dfaa --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/yang/rfc6991/ietf-inet-types.yang @@ -0,0 +1,458 @@ +module ietf-inet-types { + + namespace "urn:ietf:params:xml:ns:yang:ietf-inet-types"; + prefix "inet"; + + organization + "IETF NETMOD (NETCONF Data Modeling Language) Working Group"; + + contact + "WG Web: + WG List: + + WG Chair: David Kessens + + + WG Chair: Juergen Schoenwaelder + + + Editor: Juergen Schoenwaelder + "; + + description + "This module contains a collection of generally useful derived + YANG data types for Internet addresses and related things. + + Copyright (c) 2013 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (http://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC 6991; see + the RFC itself for full legal notices."; + + revision 2013-07-15 { + description + "This revision adds the following new data types: + - ip-address-no-zone + - ipv4-address-no-zone + - ipv6-address-no-zone"; + reference + "RFC 6991: Common YANG Data Types"; + } + + revision 2010-09-24 { + description + "Initial revision."; + reference + "RFC 6021: Common YANG Data Types"; + } + + /*** collection of types related to protocol fields ***/ + + typedef ip-version { + type enumeration { + enum unknown { + value "0"; + description + "An unknown or unspecified version of the Internet + protocol."; + } + enum ipv4 { + value "1"; + description + "The IPv4 protocol as defined in RFC 791."; + } + enum ipv6 { + value "2"; + description + "The IPv6 protocol as defined in RFC 2460."; + } + } + description + "This value represents the version of the IP protocol. + + In the value set and its semantics, this type is equivalent + to the InetVersion textual convention of the SMIv2."; + reference + "RFC 791: Internet Protocol + RFC 2460: Internet Protocol, Version 6 (IPv6) Specification + RFC 4001: Textual Conventions for Internet Network Addresses"; + } + + typedef dscp { + type uint8 { + range "0..63"; + } + description + "The dscp type represents a Differentiated Services Code Point + that may be used for marking packets in a traffic stream. + In the value set and its semantics, this type is equivalent + to the Dscp textual convention of the SMIv2."; + reference + "RFC 3289: Management Information Base for the Differentiated + Services Architecture + RFC 2474: Definition of the Differentiated Services Field + (DS Field) in the IPv4 and IPv6 Headers + RFC 2780: IANA Allocation Guidelines For Values In + the Internet Protocol and Related Headers"; + } + + typedef ipv6-flow-label { + type uint32 { + range "0..1048575"; + } + description + "The ipv6-flow-label type represents the flow identifier or Flow + Label in an IPv6 packet header that may be used to + discriminate traffic flows. + + In the value set and its semantics, this type is equivalent + to the IPv6FlowLabel textual convention of the SMIv2."; + reference + "RFC 3595: Textual Conventions for IPv6 Flow Label + RFC 2460: Internet Protocol, Version 6 (IPv6) Specification"; + } + + typedef port-number { + type uint16 { + range "0..65535"; + } + description + "The port-number type represents a 16-bit port number of an + Internet transport-layer protocol such as UDP, TCP, DCCP, or + SCTP. Port numbers are assigned by IANA. A current list of + all assignments is available from . + + Note that the port number value zero is reserved by IANA. In + situations where the value zero does not make sense, it can + be excluded by subtyping the port-number type. + In the value set and its semantics, this type is equivalent + to the InetPortNumber textual convention of the SMIv2."; + reference + "RFC 768: User Datagram Protocol + RFC 793: Transmission Control Protocol + RFC 4960: Stream Control Transmission Protocol + RFC 4340: Datagram Congestion Control Protocol (DCCP) + RFC 4001: Textual Conventions for Internet Network Addresses"; + } + + /*** collection of types related to autonomous systems ***/ + + typedef as-number { + type uint32; + description + "The as-number type represents autonomous system numbers + which identify an Autonomous System (AS). An AS is a set + of routers under a single technical administration, using + an interior gateway protocol and common metrics to route + packets within the AS, and using an exterior gateway + protocol to route packets to other ASes. IANA maintains + the AS number space and has delegated large parts to the + regional registries. + + Autonomous system numbers were originally limited to 16 + bits. BGP extensions have enlarged the autonomous system + number space to 32 bits. This type therefore uses an uint32 + base type without a range restriction in order to support + a larger autonomous system number space. + + In the value set and its semantics, this type is equivalent + to the InetAutonomousSystemNumber textual convention of + the SMIv2."; + reference + "RFC 1930: Guidelines for creation, selection, and registration + of an Autonomous System (AS) + RFC 4271: A Border Gateway Protocol 4 (BGP-4) + RFC 4001: Textual Conventions for Internet Network Addresses + RFC 6793: BGP Support for Four-Octet Autonomous System (AS) + Number Space"; + } + + /*** collection of types related to IP addresses and hostnames ***/ + + typedef ip-address { + type union { + type inet:ipv4-address; + type inet:ipv6-address; + } + description + "The ip-address type represents an IP address and is IP + version neutral. The format of the textual representation + implies the IP version. This type supports scoped addresses + by allowing zone identifiers in the address format."; + reference + "RFC 4007: IPv6 Scoped Address Architecture"; + } + + typedef ipv4-address { + type string { + pattern + '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}' + + '([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])' + + '(%[\p{N}\p{L}]+)?'; + } + description + "The ipv4-address type represents an IPv4 address in + dotted-quad notation. The IPv4 address may include a zone + index, separated by a % sign. + + The zone index is used to disambiguate identical address + values. For link-local addresses, the zone index will + typically be the interface index number or the name of an + interface. If the zone index is not present, the default + zone of the device will be used. + + The canonical format for the zone index is the numerical + format"; + } + + typedef ipv6-address { + type string { + pattern '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}' + + '((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|' + + '(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\.){3}' + + '(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))' + + '(%[\p{N}\p{L}]+)?'; + pattern '(([^:]+:){6}(([^:]+:[^:]+)|(.*\..*)))|' + + '((([^:]+:)*[^:]+)?::(([^:]+:)*[^:]+)?)' + + '(%.+)?'; + } + description + "The ipv6-address type represents an IPv6 address in full, + mixed, shortened, and shortened-mixed notation. The IPv6 + address may include a zone index, separated by a % sign. + + The zone index is used to disambiguate identical address + values. For link-local addresses, the zone index will + typically be the interface index number or the name of an + interface. If the zone index is not present, the default + zone of the device will be used. + + The canonical format of IPv6 addresses uses the textual + representation defined in Section 4 of RFC 5952. The + canonical format for the zone index is the numerical + format as described in Section 11.2 of RFC 4007."; + reference + "RFC 4291: IP Version 6 Addressing Architecture + RFC 4007: IPv6 Scoped Address Architecture + RFC 5952: A Recommendation for IPv6 Address Text + Representation"; + } + + typedef ip-address-no-zone { + type union { + type inet:ipv4-address-no-zone; + type inet:ipv6-address-no-zone; + } + description + "The ip-address-no-zone type represents an IP address and is + IP version neutral. The format of the textual representation + implies the IP version. This type does not support scoped + addresses since it does not allow zone identifiers in the + address format."; + reference + "RFC 4007: IPv6 Scoped Address Architecture"; + } + + typedef ipv4-address-no-zone { + type inet:ipv4-address { + pattern '[0-9\.]*'; + } + description + "An IPv4 address without a zone index. This type, derived from + ipv4-address, may be used in situations where the zone is + known from the context and hence no zone index is needed."; + } + + typedef ipv6-address-no-zone { + type inet:ipv6-address { + pattern '[0-9a-fA-F:\.]*'; + } + description + "An IPv6 address without a zone index. This type, derived from + ipv6-address, may be used in situations where the zone is + known from the context and hence no zone index is needed."; + reference + "RFC 4291: IP Version 6 Addressing Architecture + RFC 4007: IPv6 Scoped Address Architecture + RFC 5952: A Recommendation for IPv6 Address Text + Representation"; + } + + typedef ip-prefix { + type union { + type inet:ipv4-prefix; + type inet:ipv6-prefix; + } + description + "The ip-prefix type represents an IP prefix and is IP + version neutral. The format of the textual representations + implies the IP version."; + } + + typedef ipv4-prefix { + type string { + pattern + '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}' + + '([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])' + + '/(([0-9])|([1-2][0-9])|(3[0-2]))'; + } + description + "The ipv4-prefix type represents an IPv4 address prefix. + The prefix length is given by the number following the + slash character and must be less than or equal to 32. + + A prefix length value of n corresponds to an IP address + mask that has n contiguous 1-bits from the most + significant bit (MSB) and all other bits set to 0. + + The canonical format of an IPv4 prefix has all bits of + the IPv4 address set to zero that are not part of the + IPv4 prefix."; + } + + typedef ipv6-prefix { + type string { + pattern '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}' + + '((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|' + + '(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\.){3}' + + '(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))' + + '(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'; + pattern '(([^:]+:){6}(([^:]+:[^:]+)|(.*\..*)))|' + + '((([^:]+:)*[^:]+)?::(([^:]+:)*[^:]+)?)' + + '(/.+)'; + } + + description + "The ipv6-prefix type represents an IPv6 address prefix. + The prefix length is given by the number following the + slash character and must be less than or equal to 128. + + A prefix length value of n corresponds to an IP address + mask that has n contiguous 1-bits from the most + significant bit (MSB) and all other bits set to 0. + + The IPv6 address should have all bits that do not belong + to the prefix set to zero. + + The canonical format of an IPv6 prefix has all bits of + the IPv6 address set to zero that are not part of the + IPv6 prefix. Furthermore, the IPv6 address is represented + as defined in Section 4 of RFC 5952."; + reference + "RFC 5952: A Recommendation for IPv6 Address Text + Representation"; + } + + /*** collection of domain name and URI types ***/ + + typedef domain-name { + type string { + pattern + '((([a-zA-Z0-9_]([a-zA-Z0-9\-_]){0,61})?[a-zA-Z0-9]\.)*' + + '([a-zA-Z0-9_]([a-zA-Z0-9\-_]){0,61})?[a-zA-Z0-9]\.?)' + + '|\.'; + length "1..253"; + } + description + "The domain-name type represents a DNS domain name. The + name SHOULD be fully qualified whenever possible. + + Internet domain names are only loosely specified. Section + 3.5 of RFC 1034 recommends a syntax (modified in Section + 2.1 of RFC 1123). The pattern above is intended to allow + for current practice in domain name use, and some possible + future expansion. It is designed to hold various types of + domain names, including names used for A or AAAA records + (host names) and other records, such as SRV records. Note + that Internet host names have a stricter syntax (described + in RFC 952) than the DNS recommendations in RFCs 1034 and + 1123, and that systems that want to store host names in + schema nodes using the domain-name type are recommended to + adhere to this stricter standard to ensure interoperability. + + The encoding of DNS names in the DNS protocol is limited + to 255 characters. Since the encoding consists of labels + prefixed by a length bytes and there is a trailing NULL + byte, only 253 characters can appear in the textual dotted + notation. + + The description clause of schema nodes using the domain-name + type MUST describe when and how these names are resolved to + IP addresses. Note that the resolution of a domain-name value + may require to query multiple DNS records (e.g., A for IPv4 + and AAAA for IPv6). The order of the resolution process and + which DNS record takes precedence can either be defined + explicitly or may depend on the configuration of the + resolver. + + Domain-name values use the US-ASCII encoding. Their canonical + format uses lowercase US-ASCII characters. Internationalized + domain names MUST be A-labels as per RFC 5890."; + reference + "RFC 952: DoD Internet Host Table Specification + RFC 1034: Domain Names - Concepts and Facilities + RFC 1123: Requirements for Internet Hosts -- Application + and Support + RFC 2782: A DNS RR for specifying the location of services + (DNS SRV) + RFC 5890: Internationalized Domain Names in Applications + (IDNA): Definitions and Document Framework"; + } + + typedef host { + type union { + type inet:ip-address; + type inet:domain-name; + } + description + "The host type represents either an IP address or a DNS + domain name."; + } + + typedef uri { + type string; + description + "The uri type represents a Uniform Resource Identifier + (URI) as defined by STD 66. + + Objects using the uri type MUST be in US-ASCII encoding, + and MUST be normalized as described by RFC 3986 Sections + 6.2.1, 6.2.2.1, and 6.2.2.2. All unnecessary + percent-encoding is removed, and all case-insensitive + characters are set to lowercase except for hexadecimal + digits, which are normalized to uppercase as described in + Section 6.2.2.1. + + The purpose of this normalization is to help provide + unique URIs. Note that this normalization is not + sufficient to provide uniqueness. Two URIs that are + textually distinct after this normalization may still be + equivalent. + + Objects using the uri type may restrict the schemes that + they permit. For example, 'data:' and 'urn:' schemes + might not be appropriate. + + A zero-length URI is not a valid URI. This can be used to + express 'URI absent' where required. + + In the value set and its semantics, this type is equivalent + to the Uri SMIv2 textual convention defined in RFC 5017."; + reference + "RFC 3986: Uniform Resource Identifier (URI): Generic Syntax + RFC 3305: Report from the Joint W3C/IETF URI Planning Interest + Group: Uniform Resource Identifiers (URIs), URLs, + and Uniform Resource Names (URNs): Clarifications + and Recommendations + RFC 5017: MIB Textual Conventions for Uniform Resource + Identifiers (URIs)"; + } + +} \ No newline at end of file diff --git a/src/tests/tools/mock_nce_t_ctrl/yang/rfc6991/ietf-yang-types.yang b/src/tests/tools/mock_nce_t_ctrl/yang/rfc6991/ietf-yang-types.yang new file mode 100644 index 000000000..f6624fed8 --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/yang/rfc6991/ietf-yang-types.yang @@ -0,0 +1,474 @@ +module ietf-yang-types { + + namespace "urn:ietf:params:xml:ns:yang:ietf-yang-types"; + prefix "yang"; + + organization + "IETF NETMOD (NETCONF Data Modeling Language) Working Group"; + + contact + "WG Web: + WG List: + + WG Chair: David Kessens + + + WG Chair: Juergen Schoenwaelder + + + Editor: Juergen Schoenwaelder + "; + + description + "This module contains a collection of generally useful derived + YANG data types. + + Copyright (c) 2013 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (http://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC 6991; see + the RFC itself for full legal notices."; + + revision 2013-07-15 { + description + "This revision adds the following new data types: + - yang-identifier + - hex-string + - uuid + - dotted-quad"; + reference + "RFC 6991: Common YANG Data Types"; + } + + revision 2010-09-24 { + description + "Initial revision."; + reference + "RFC 6021: Common YANG Data Types"; + } + + /*** collection of counter and gauge types ***/ + + typedef counter32 { + type uint32; + description + "The counter32 type represents a non-negative integer + that monotonically increases until it reaches a + maximum value of 2^32-1 (4294967295 decimal), when it + wraps around and starts increasing again from zero. + + Counters have no defined 'initial' value, and thus, a + single value of a counter has (in general) no information + content. Discontinuities in the monotonically increasing + value normally occur at re-initialization of the + management system, and at other times as specified in the + description of a schema node using this type. If such + other times can occur, for example, the creation of + a schema node of type counter32 at times other than + re-initialization, then a corresponding schema node + should be defined, with an appropriate type, to indicate + the last discontinuity. + + The counter32 type should not be used for configuration + schema nodes. A default statement SHOULD NOT be used in + combination with the type counter32. + + In the value set and its semantics, this type is equivalent + to the Counter32 type of the SMIv2."; + reference + "RFC 2578: Structure of Management Information Version 2 + (SMIv2)"; + } + + typedef zero-based-counter32 { + type yang:counter32; + default "0"; + description + "The zero-based-counter32 type represents a counter32 + that has the defined 'initial' value zero. + + A schema node of this type will be set to zero (0) on creation + and will thereafter increase monotonically until it reaches + a maximum value of 2^32-1 (4294967295 decimal), when it + wraps around and starts increasing again from zero. + + Provided that an application discovers a new schema node + of this type within the minimum time to wrap, it can use the + 'initial' value as a delta. It is important for a management + station to be aware of this minimum time and the actual time + between polls, and to discard data if the actual time is too + long or there is no defined minimum time. + + In the value set and its semantics, this type is equivalent + to the ZeroBasedCounter32 textual convention of the SMIv2."; + reference + "RFC 4502: Remote Network Monitoring Management Information + Base Version 2"; + } + + typedef counter64 { + type uint64; + description + "The counter64 type represents a non-negative integer + that monotonically increases until it reaches a + maximum value of 2^64-1 (18446744073709551615 decimal), + when it wraps around and starts increasing again from zero. + + Counters have no defined 'initial' value, and thus, a + single value of a counter has (in general) no information + content. Discontinuities in the monotonically increasing + value normally occur at re-initialization of the + management system, and at other times as specified in the + description of a schema node using this type. If such + other times can occur, for example, the creation of + a schema node of type counter64 at times other than + re-initialization, then a corresponding schema node + should be defined, with an appropriate type, to indicate + the last discontinuity. + + The counter64 type should not be used for configuration + schema nodes. A default statement SHOULD NOT be used in + combination with the type counter64. + + In the value set and its semantics, this type is equivalent + to the Counter64 type of the SMIv2."; + reference + "RFC 2578: Structure of Management Information Version 2 + (SMIv2)"; + } + + typedef zero-based-counter64 { + type yang:counter64; + default "0"; + description + "The zero-based-counter64 type represents a counter64 that + has the defined 'initial' value zero. + + A schema node of this type will be set to zero (0) on creation + and will thereafter increase monotonically until it reaches + a maximum value of 2^64-1 (18446744073709551615 decimal), + when it wraps around and starts increasing again from zero. + + Provided that an application discovers a new schema node + of this type within the minimum time to wrap, it can use the + 'initial' value as a delta. It is important for a management + station to be aware of this minimum time and the actual time + between polls, and to discard data if the actual time is too + long or there is no defined minimum time. + + In the value set and its semantics, this type is equivalent + to the ZeroBasedCounter64 textual convention of the SMIv2."; + reference + "RFC 2856: Textual Conventions for Additional High Capacity + Data Types"; + } + + typedef gauge32 { + type uint32; + description + "The gauge32 type represents a non-negative integer, which + may increase or decrease, but shall never exceed a maximum + value, nor fall below a minimum value. The maximum value + cannot be greater than 2^32-1 (4294967295 decimal), and + the minimum value cannot be smaller than 0. The value of + a gauge32 has its maximum value whenever the information + being modeled is greater than or equal to its maximum + value, and has its minimum value whenever the information + being modeled is smaller than or equal to its minimum value. + If the information being modeled subsequently decreases + below (increases above) the maximum (minimum) value, the + gauge32 also decreases (increases). + + In the value set and its semantics, this type is equivalent + to the Gauge32 type of the SMIv2."; + reference + "RFC 2578: Structure of Management Information Version 2 + (SMIv2)"; + } + + typedef gauge64 { + type uint64; + description + "The gauge64 type represents a non-negative integer, which + may increase or decrease, but shall never exceed a maximum + value, nor fall below a minimum value. The maximum value + cannot be greater than 2^64-1 (18446744073709551615), and + the minimum value cannot be smaller than 0. The value of + a gauge64 has its maximum value whenever the information + being modeled is greater than or equal to its maximum + value, and has its minimum value whenever the information + being modeled is smaller than or equal to its minimum value. + If the information being modeled subsequently decreases + below (increases above) the maximum (minimum) value, the + gauge64 also decreases (increases). + + In the value set and its semantics, this type is equivalent + to the CounterBasedGauge64 SMIv2 textual convention defined + in RFC 2856"; + reference + "RFC 2856: Textual Conventions for Additional High Capacity + Data Types"; + } + + /*** collection of identifier-related types ***/ + + typedef object-identifier { + type string { + pattern '(([0-1](\.[1-3]?[0-9]))|(2\.(0|([1-9]\d*))))' + + '(\.(0|([1-9]\d*)))*'; + } + description + "The object-identifier type represents administratively + assigned names in a registration-hierarchical-name tree. + + Values of this type are denoted as a sequence of numerical + non-negative sub-identifier values. Each sub-identifier + value MUST NOT exceed 2^32-1 (4294967295). Sub-identifiers + are separated by single dots and without any intermediate + whitespace. + + The ASN.1 standard restricts the value space of the first + sub-identifier to 0, 1, or 2. Furthermore, the value space + of the second sub-identifier is restricted to the range + 0 to 39 if the first sub-identifier is 0 or 1. Finally, + the ASN.1 standard requires that an object identifier + has always at least two sub-identifiers. The pattern + captures these restrictions. + + Although the number of sub-identifiers is not limited, + module designers should realize that there may be + implementations that stick with the SMIv2 limit of 128 + sub-identifiers. + + This type is a superset of the SMIv2 OBJECT IDENTIFIER type + since it is not restricted to 128 sub-identifiers. Hence, + this type SHOULD NOT be used to represent the SMIv2 OBJECT + IDENTIFIER type; the object-identifier-128 type SHOULD be + used instead."; + reference + "ISO9834-1: Information technology -- Open Systems + Interconnection -- Procedures for the operation of OSI + Registration Authorities: General procedures and top + arcs of the ASN.1 Object Identifier tree"; + } + + typedef object-identifier-128 { + type object-identifier { + pattern '\d*(\.\d*){1,127}'; + } + description + "This type represents object-identifiers restricted to 128 + sub-identifiers. + + In the value set and its semantics, this type is equivalent + to the OBJECT IDENTIFIER type of the SMIv2."; + reference + "RFC 2578: Structure of Management Information Version 2 + (SMIv2)"; + } + + typedef yang-identifier { + type string { + length "1..max"; + pattern '[a-zA-Z_][a-zA-Z0-9\-_.]*'; + pattern '.|..|[^xX].*|.[^mM].*|..[^lL].*'; + } + description + "A YANG identifier string as defined by the 'identifier' + rule in Section 12 of RFC 6020. An identifier must + start with an alphabetic character or an underscore + followed by an arbitrary sequence of alphabetic or + numeric characters, underscores, hyphens, or dots. + + A YANG identifier MUST NOT start with any possible + combination of the lowercase or uppercase character + sequence 'xml'."; + reference + "RFC 6020: YANG - A Data Modeling Language for the Network + Configuration Protocol (NETCONF)"; + } + + /*** collection of types related to date and time***/ + + typedef date-and-time { + type string { + pattern '\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d+)?' + + '(Z|[\+\-]\d{2}:\d{2})'; + } + description + "The date-and-time type is a profile of the ISO 8601 + standard for representation of dates and times using the + Gregorian calendar. The profile is defined by the + date-time production in Section 5.6 of RFC 3339. + + The date-and-time type is compatible with the dateTime XML + schema type with the following notable exceptions: + + (a) The date-and-time type does not allow negative years. + + (b) The date-and-time time-offset -00:00 indicates an unknown + time zone (see RFC 3339) while -00:00 and +00:00 and Z + all represent the same time zone in dateTime. + + (c) The canonical format (see below) of data-and-time values + differs from the canonical format used by the dateTime XML + schema type, which requires all times to be in UTC using + the time-offset 'Z'. + + This type is not equivalent to the DateAndTime textual + convention of the SMIv2 since RFC 3339 uses a different + separator between full-date and full-time and provides + higher resolution of time-secfrac. + + The canonical format for date-and-time values with a known time + zone uses a numeric time zone offset that is calculated using + the device's configured known offset to UTC time. A change of + the device's offset to UTC time will cause date-and-time values + to change accordingly. Such changes might happen periodically + in case a server follows automatically daylight saving time + (DST) time zone offset changes. The canonical format for + date-and-time values with an unknown time zone (usually + referring to the notion of local time) uses the time-offset + -00:00."; + reference + "RFC 3339: Date and Time on the Internet: Timestamps + RFC 2579: Textual Conventions for SMIv2 + XSD-TYPES: XML Schema Part 2: Datatypes Second Edition"; + } + + typedef timeticks { + type uint32; + description + "The timeticks type represents a non-negative integer that + represents the time, modulo 2^32 (4294967296 decimal), in + hundredths of a second between two epochs. When a schema + node is defined that uses this type, the description of + the schema node identifies both of the reference epochs. + + In the value set and its semantics, this type is equivalent + to the TimeTicks type of the SMIv2."; + reference + "RFC 2578: Structure of Management Information Version 2 + (SMIv2)"; + } + + typedef timestamp { + type yang:timeticks; + description + "The timestamp type represents the value of an associated + timeticks schema node at which a specific occurrence + happened. The specific occurrence must be defined in the + description of any schema node defined using this type. When + the specific occurrence occurred prior to the last time the + associated timeticks attribute was zero, then the timestamp + value is zero. Note that this requires all timestamp values + to be reset to zero when the value of the associated timeticks + attribute reaches 497+ days and wraps around to zero. + + The associated timeticks schema node must be specified + in the description of any schema node using this type. + + In the value set and its semantics, this type is equivalent + to the TimeStamp textual convention of the SMIv2."; + reference + "RFC 2579: Textual Conventions for SMIv2"; + } + + /*** collection of generic address types ***/ + + typedef phys-address { + type string { + pattern '([0-9a-fA-F]{2}(:[0-9a-fA-F]{2})*)?'; + } + + description + "Represents media- or physical-level addresses represented + as a sequence octets, each octet represented by two hexadecimal + numbers. Octets are separated by colons. The canonical + representation uses lowercase characters. + + In the value set and its semantics, this type is equivalent + to the PhysAddress textual convention of the SMIv2."; + reference + "RFC 2579: Textual Conventions for SMIv2"; + } + + typedef mac-address { + type string { + pattern '[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'; + } + description + "The mac-address type represents an IEEE 802 MAC address. + The canonical representation uses lowercase characters. + + In the value set and its semantics, this type is equivalent + to the MacAddress textual convention of the SMIv2."; + reference + "IEEE 802: IEEE Standard for Local and Metropolitan Area + Networks: Overview and Architecture + RFC 2579: Textual Conventions for SMIv2"; + } + + /*** collection of XML-specific types ***/ + + typedef xpath1.0 { + type string; + description + "This type represents an XPATH 1.0 expression. + + When a schema node is defined that uses this type, the + description of the schema node MUST specify the XPath + context in which the XPath expression is evaluated."; + reference + "XPATH: XML Path Language (XPath) Version 1.0"; + } + + /*** collection of string types ***/ + + typedef hex-string { + type string { + pattern '([0-9a-fA-F]{2}(:[0-9a-fA-F]{2})*)?'; + } + description + "A hexadecimal string with octets represented as hex digits + separated by colons. The canonical representation uses + lowercase characters."; + } + + typedef uuid { + type string { + pattern '[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-' + + '[0-9a-fA-F]{4}-[0-9a-fA-F]{12}'; + } + description + "A Universally Unique IDentifier in the string representation + defined in RFC 4122. The canonical representation uses + lowercase characters. + + The following is an example of a UUID in string representation: + f81d4fae-7dec-11d0-a765-00a0c91e6bf6 + "; + reference + "RFC 4122: A Universally Unique IDentifier (UUID) URN + Namespace"; + } + + typedef dotted-quad { + type string { + pattern + '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}' + + '([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])'; + } + description + "An unsigned 32-bit number expressed in the dotted-quad + notation, i.e., four octets written as decimal numbers + and separated with the '.' (full stop) character."; + } +} \ No newline at end of file diff --git a/src/tests/tools/mock_nce_t_ctrl/yang/rfc8294/iana-routing-types.yang b/src/tests/tools/mock_nce_t_ctrl/yang/rfc8294/iana-routing-types.yang new file mode 100644 index 000000000..e57ebd239 --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/yang/rfc8294/iana-routing-types.yang @@ -0,0 +1,471 @@ +module iana-routing-types { + namespace "urn:ietf:params:xml:ns:yang:iana-routing-types"; + prefix iana-rt-types; + + organization + "IANA"; + contact + "Internet Assigned Numbers Authority + + Postal: ICANN + 12025 Waterfront Drive, Suite 300 + Los Angeles, CA 90094-2536 + United States of America + Tel: +1 310 301 5800 + "; + + description + "This module contains a collection of YANG data types + considered defined by IANA and used for routing + protocols. + + Copyright (c) 2017 IETF Trust and the persons + identified as authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC 8294; see + the RFC itself for full legal notices."; + + revision 2017-12-04 { + description "Initial revision."; + reference + "RFC 8294: Common YANG Data Types for the Routing Area. + Section 4."; + } + + /*** Collection of IANA types related to routing ***/ + /*** IANA Address Family enumeration ***/ + + typedef address-family { + type enumeration { + enum ipv4 { + value 1; + description + "IPv4 Address Family."; + } + + enum ipv6 { + value 2; + description + "IPv6 Address Family."; + } + + enum nsap { + value 3; + description + "OSI Network Service Access Point (NSAP) Address Family."; + } + + enum hdlc { + value 4; + description + "High-Level Data Link Control (HDLC) Address Family."; + } + + enum bbn1822 { + value 5; + description + "Bolt, Beranek, and Newman Report 1822 (BBN 1822) + Address Family."; + } + + enum ieee802 { + value 6; + description + "IEEE 802 Committee Address Family + (aka Media Access Control (MAC) address)."; + } + + enum e163 { + value 7; + description + "ITU-T E.163 Address Family."; + } + enum e164 { + value 8; + description + "ITU-T E.164 (Switched Multimegabit Data Service (SMDS), + Frame Relay, ATM) Address Family."; + } + + enum f69 { + value 9; + description + "ITU-T F.69 (Telex) Address Family."; + } + + enum x121 { + value 10; + description + "ITU-T X.121 (X.25, Frame Relay) Address Family."; + } + + enum ipx { + value 11; + description + "Novell Internetwork Packet Exchange (IPX) + Address Family."; + } + + enum appletalk { + value 12; + description + "Apple AppleTalk Address Family."; + } + + enum decnet-iv { + value 13; + description + "Digital Equipment DECnet Phase IV Address Family."; + } + + enum vines { + value 14; + description + "Banyan Vines Address Family."; + } + + enum e164-nsap { + value 15; + description + "ITU-T E.164 with NSAP sub-address Address Family."; + } + + enum dns { + value 16; + description + "Domain Name System (DNS) Address Family."; + } + + enum distinguished-name { + value 17; + description + "Distinguished Name Address Family."; + } + + enum as-num { + value 18; + description + "Autonomous System (AS) Number Address Family."; + } + + enum xtp-v4 { + value 19; + description + "Xpress Transport Protocol (XTP) over IPv4 + Address Family."; + } + + enum xtp-v6 { + value 20; + description + "XTP over IPv6 Address Family."; + } + + enum xtp-native { + value 21; + description + "XTP native mode Address Family."; + } + + enum fc-port { + value 22; + description + "Fibre Channel (FC) World-Wide Port Name Address Family."; + } + enum fc-node { + value 23; + description + "FC World-Wide Node Name Address Family."; + } + + enum gwid { + value 24; + description + "ATM Gateway Identifier (GWID) Number Address Family."; + } + + enum l2vpn { + value 25; + description + "Layer 2 VPN (L2VPN) Address Family."; + } + + enum mpls-tp-section-eid { + value 26; + description + "MPLS Transport Profile (MPLS-TP) Section Endpoint + Identifier Address Family."; + } + + enum mpls-tp-lsp-eid { + value 27; + description + "MPLS-TP Label Switched Path (LSP) Endpoint Identifier + Address Family."; + } + + enum mpls-tp-pwe-eid { + value 28; + description + "MPLS-TP Pseudowire Endpoint Identifier Address Family."; + } + + enum mt-v4 { + value 29; + description + "Multi-Topology IPv4 Address Family."; + } + + enum mt-v6 { + value 30; + description + "Multi-Topology IPv6 Address Family."; + } + + enum eigrp-common-sf { + value 16384; + description + "Enhanced Interior Gateway Routing Protocol (EIGRP) + Common Service Family Address Family."; + } + + enum eigrp-v4-sf { + value 16385; + description + "EIGRP IPv4 Service Family Address Family."; + } + + enum eigrp-v6-sf { + value 16386; + description + "EIGRP IPv6 Service Family Address Family."; + } + + enum lcaf { + value 16387; + description + "Locator/ID Separation Protocol (LISP) + Canonical Address Format (LCAF) Address Family."; + } + + enum bgp-ls { + value 16388; + description + "Border Gateway Protocol - Link State (BGP-LS) + Address Family."; + } + + enum mac-48 { + value 16389; + description + "IEEE 48-bit MAC Address Family."; + } + + enum mac-64 { + value 16390; + description + "IEEE 64-bit MAC Address Family."; + } + + enum trill-oui { + value 16391; + description + "Transparent Interconnection of Lots of Links (TRILL) + IEEE Organizationally Unique Identifier (OUI) + Address Family."; + } + + enum trill-mac-24 { + value 16392; + description + "TRILL final 3 octets of 48-bit MAC Address Family."; + } + + enum trill-mac-40 { + value 16393; + description + "TRILL final 5 octets of 64-bit MAC Address Family."; + } + + enum ipv6-64 { + value 16394; + description + "First 8 octets (64 bits) of IPv6 address + Address Family."; + } + + enum trill-rbridge-port-id { + value 16395; + description + "TRILL Routing Bridge (RBridge) Port ID Address Family."; + } + + enum trill-nickname { + value 16396; + description + "TRILL Nickname Address Family."; + } + } + + description + "Enumeration containing all the IANA-defined + Address Families."; + + } + + /*** Subsequent Address Family Identifiers (SAFIs) ***/ + /*** for multiprotocol BGP enumeration ***/ + + typedef bgp-safi { + type enumeration { + enum unicast-safi { + value 1; + description + "Unicast SAFI."; + } + + enum multicast-safi { + value 2; + description + "Multicast SAFI."; + } + + enum labeled-unicast-safi { + value 4; + description + "Labeled Unicast SAFI."; + } + + enum multicast-vpn-safi { + value 5; + description + "Multicast VPN SAFI."; + } + + enum pseudowire-safi { + value 6; + description + "Multi-segment Pseudowire VPN SAFI."; + } + + enum tunnel-encap-safi { + value 7; + description + "Tunnel Encap SAFI."; + } + + enum mcast-vpls-safi { + value 8; + description + "Multicast Virtual Private LAN Service (VPLS) SAFI."; + } + + enum tunnel-safi { + value 64; + description + "Tunnel SAFI."; + } + + enum vpls-safi { + value 65; + description + "VPLS SAFI."; + } + + enum mdt-safi { + value 66; + description + "Multicast Distribution Tree (MDT) SAFI."; + } + + enum v4-over-v6-safi { + value 67; + description + "IPv4 over IPv6 SAFI."; + } + + enum v6-over-v4-safi { + value 68; + description + "IPv6 over IPv4 SAFI."; + } + + enum l1-vpn-auto-discovery-safi { + value 69; + description + "Layer 1 VPN Auto-Discovery SAFI."; + } + + enum evpn-safi { + value 70; + description + "Ethernet VPN (EVPN) SAFI."; + } + + enum bgp-ls-safi { + value 71; + description + "BGP-LS SAFI."; + } + + enum bgp-ls-vpn-safi { + value 72; + description + "BGP-LS VPN SAFI."; + } + + enum sr-te-safi { + value 73; + description + "Segment Routing - Traffic Engineering (SR-TE) SAFI."; + } + + enum labeled-vpn-safi { + value 128; + description + "MPLS Labeled VPN SAFI."; + } + + enum multicast-mpls-vpn-safi { + value 129; + description + "Multicast for BGP/MPLS IP VPN SAFI."; + } + + enum route-target-safi { + value 132; + description + "Route Target SAFI."; + } + + enum ipv4-flow-spec-safi { + value 133; + description + "IPv4 Flow Specification SAFI."; + } + + enum vpnv4-flow-spec-safi { + value 134; + description + "IPv4 VPN Flow Specification SAFI."; + } + + enum vpn-auto-discovery-safi { + value 140; + description + "VPN Auto-Discovery SAFI."; + } + } + description + "Enumeration for BGP SAFI."; + reference + "RFC 4760: Multiprotocol Extensions for BGP-4."; + } +} \ No newline at end of file diff --git a/src/tests/tools/mock_nce_t_ctrl/yang/rfc8294/ietf-routing-types.yang b/src/tests/tools/mock_nce_t_ctrl/yang/rfc8294/ietf-routing-types.yang new file mode 100644 index 000000000..65c83bc84 --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/yang/rfc8294/ietf-routing-types.yang @@ -0,0 +1,771 @@ +module ietf-routing-types { + namespace "urn:ietf:params:xml:ns:yang:ietf-routing-types"; + prefix rt-types; + + import ietf-yang-types { + prefix yang; + } + import ietf-inet-types { + prefix inet; + } + + organization + "IETF RTGWG - Routing Area Working Group"; + contact + "WG Web: + WG List: + + Editors: Xufeng Liu + + Yingzhen Qu + + Acee Lindem + + Christian Hopps + + Lou Berger + "; + + description + "This module contains a collection of YANG data types + considered generally useful for routing protocols. + + Copyright (c) 2017 IETF Trust and the persons + identified as authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC 8294; see + the RFC itself for full legal notices."; + revision 2017-12-04 { + description "Initial revision."; + reference + "RFC 8294: Common YANG Data Types for the Routing Area. + Section 3."; + } + + /*** Identities related to MPLS/GMPLS ***/ + + identity mpls-label-special-purpose-value { + description + "Base identity for deriving identities describing + special-purpose Multiprotocol Label Switching (MPLS) label + values."; + reference + "RFC 7274: Allocating and Retiring Special-Purpose MPLS + Labels."; + } + + identity ipv4-explicit-null-label { + base mpls-label-special-purpose-value; + description + "This identity represents the IPv4 Explicit NULL Label."; + reference + "RFC 3032: MPLS Label Stack Encoding. Section 2.1."; + } + + identity router-alert-label { + base mpls-label-special-purpose-value; + description + "This identity represents the Router Alert Label."; + reference + "RFC 3032: MPLS Label Stack Encoding. Section 2.1."; + } + + identity ipv6-explicit-null-label { + base mpls-label-special-purpose-value; + description + "This identity represents the IPv6 Explicit NULL Label."; + reference + "RFC 3032: MPLS Label Stack Encoding. Section 2.1."; + } + + identity implicit-null-label { + base mpls-label-special-purpose-value; + description + "This identity represents the Implicit NULL Label."; + reference + "RFC 3032: MPLS Label Stack Encoding. Section 2.1."; + } + + identity entropy-label-indicator { + base mpls-label-special-purpose-value; + description + "This identity represents the Entropy Label Indicator."; + reference + "RFC 6790: The Use of Entropy Labels in MPLS Forwarding. + Sections 3 and 10.1."; + } + + identity gal-label { + base mpls-label-special-purpose-value; + description + "This identity represents the Generic Associated Channel + (G-ACh) Label (GAL)."; + reference + "RFC 5586: MPLS Generic Associated Channel. + Sections 4 and 10."; + } + + identity oam-alert-label { + base mpls-label-special-purpose-value; + description + "This identity represents the OAM Alert Label."; + reference + "RFC 3429: Assignment of the 'OAM Alert Label' for + Multiprotocol Label Switching Architecture (MPLS) + Operation and Maintenance (OAM) Functions. + Sections 3 and 6."; + } + + identity extension-label { + base mpls-label-special-purpose-value; + description + "This identity represents the Extension Label."; + reference + "RFC 7274: Allocating and Retiring Special-Purpose MPLS + Labels. Sections 3.1 and 5."; + } + + /*** Collection of types related to routing ***/ + + typedef router-id { + type yang:dotted-quad; + description + "A 32-bit number in the dotted-quad format assigned to each + router. This number uniquely identifies the router within + an Autonomous System."; + } + + /*** Collection of types related to VPNs ***/ + + typedef route-target { + type string { + pattern + '(0:(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|' + + '6[0-4][0-9]{3}|' + + '[1-5][0-9]{4}|[1-9][0-9]{0,3}|0):(429496729[0-5]|' + + '42949672[0-8][0-9]|' + + '4294967[01][0-9]{2}|429496[0-6][0-9]{3}|' + + '42949[0-5][0-9]{4}|' + + '4294[0-8][0-9]{5}|429[0-3][0-9]{6}|' + + '42[0-8][0-9]{7}|4[01][0-9]{8}|' + + '[1-3][0-9]{9}|[1-9][0-9]{0,8}|0))|' + + '(1:((([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|' + + '25[0-5])\.){3}([0-9]|[1-9][0-9]|' + + '1[0-9]{2}|2[0-4][0-9]|25[0-5])):(6553[0-5]|' + + '655[0-2][0-9]|' + + '65[0-4][0-9]{2}|6[0-4][0-9]{3}|' + + '[1-5][0-9]{4}|[1-9][0-9]{0,3}|0))|' + + '(2:(429496729[0-5]|42949672[0-8][0-9]|' + + '4294967[01][0-9]{2}|' + + '429496[0-6][0-9]{3}|42949[0-5][0-9]{4}|' + + '4294[0-8][0-9]{5}|' + + '429[0-3][0-9]{6}|42[0-8][0-9]{7}|4[01][0-9]{8}|' + + '[1-3][0-9]{9}|[1-9][0-9]{0,8}|0):' + + '(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|' + + '6[0-4][0-9]{3}|' + + '[1-5][0-9]{4}|[1-9][0-9]{0,3}|0))|' + + '(6(:[a-fA-F0-9]{2}){6})|' + + '(([3-57-9a-fA-F]|[1-9a-fA-F][0-9a-fA-F]{1,3}):' + + '[0-9a-fA-F]{1,12})'; + } + + description + "A Route Target is an 8-octet BGP extended community + initially identifying a set of sites in a BGP VPN + (RFC 4364). However, it has since taken on a more general + role in BGP route filtering. A Route Target consists of two + or three fields: a 2-octet Type field, an administrator + field, and, optionally, an assigned number field. + + According to the data formats for types 0, 1, 2, and 6 as + defined in RFC 4360, RFC 5668, and RFC 7432, the encoding + pattern is defined as: + + 0:2-octet-asn:4-octet-number + 1:4-octet-ipv4addr:2-octet-number + 2:4-octet-asn:2-octet-number + 6:6-octet-mac-address + + Additionally, a generic pattern is defined for future + Route Target types: + + 2-octet-other-hex-number:6-octet-hex-number + + Some valid examples are 0:100:100, 1:1.1.1.1:100, + 2:1234567890:203, and 6:26:00:08:92:78:00."; + reference + "RFC 4360: BGP Extended Communities Attribute. + RFC 4364: BGP/MPLS IP Virtual Private Networks (VPNs). + RFC 5668: 4-Octet AS Specific BGP Extended Community. + RFC 7432: BGP MPLS-Based Ethernet VPN."; + } + + typedef ipv6-route-target { + type string { + pattern + '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}' + + '((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|' + + '(((25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])\.){3}' + + '(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])))' + + ':' + + '(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|' + + '6[0-4][0-9]{3}|' + + '[1-5][0-9]{4}|[1-9][0-9]{0,3}|0)'; + pattern '((([^:]+:){6}(([^:]+:[^:]+)|(.*\..*)))|' + + '((([^:]+:)*[^:]+)?::(([^:]+:)*[^:]+)?))' + + ':' + + '(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|' + + '6[0-4][0-9]{3}|' + + '[1-5][0-9]{4}|[1-9][0-9]{0,3}|0)'; + } + description + "An IPv6 Route Target is a 20-octet BGP IPv6 Address + Specific Extended Community serving the same function + as a standard 8-octet Route Target, except that it only + allows an IPv6 address as the global administrator. + The format is . + + Two valid examples are 2001:db8::1:6544 and + 2001:db8::5eb1:791:6b37:17958."; + reference + "RFC 5701: IPv6 Address Specific BGP Extended Community + Attribute."; + } + + typedef route-target-type { + type enumeration { + enum import { + value 0; + description + "The Route Target applies to route import."; + } + enum export { + value 1; + description + "The Route Target applies to route export."; + } + + enum both { + value 2; + description + "The Route Target applies to both route import and + route export."; + } + } + description + "Indicates the role a Route Target takes in route filtering."; + reference + "RFC 4364: BGP/MPLS IP Virtual Private Networks (VPNs)."; + } + + typedef route-distinguisher { + type string { + pattern + '(0:(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|' + + '6[0-4][0-9]{3}|' + + '[1-5][0-9]{4}|[1-9][0-9]{0,3}|0):(429496729[0-5]|' + + '42949672[0-8][0-9]|' + + '4294967[01][0-9]{2}|429496[0-6][0-9]{3}|' + + '42949[0-5][0-9]{4}|' + + '4294[0-8][0-9]{5}|429[0-3][0-9]{6}|' + + '42[0-8][0-9]{7}|4[01][0-9]{8}|' + + '[1-3][0-9]{9}|[1-9][0-9]{0,8}|0))|' + + '(1:((([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|' + + '25[0-5])\.){3}([0-9]|[1-9][0-9]|' + + '1[0-9]{2}|2[0-4][0-9]|25[0-5])):(6553[0-5]|' + + '655[0-2][0-9]|' + + '65[0-4][0-9]{2}|6[0-4][0-9]{3}|' + + '[1-5][0-9]{4}|[1-9][0-9]{0,3}|0))|' + + '(2:(429496729[0-5]|42949672[0-8][0-9]|' + + '4294967[01][0-9]{2}|' + + '429496[0-6][0-9]{3}|42949[0-5][0-9]{4}|' + + '4294[0-8][0-9]{5}|' + + '429[0-3][0-9]{6}|42[0-8][0-9]{7}|4[01][0-9]{8}|' + + '[1-3][0-9]{9}|[1-9][0-9]{0,8}|0):' + + '(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|' + + '6[0-4][0-9]{3}|' + + '[1-5][0-9]{4}|[1-9][0-9]{0,3}|0))|' + + '(6(:[a-fA-F0-9]{2}){6})|' + + '(([3-57-9a-fA-F]|[1-9a-fA-F][0-9a-fA-F]{1,3}):' + + '[0-9a-fA-F]{1,12})'; + } + + description + "A Route Distinguisher is an 8-octet value used to + distinguish routes from different BGP VPNs (RFC 4364). + A Route Distinguisher will have the same format as a + Route Target as per RFC 4360 and will consist of + two or three fields: a 2-octet Type field, an administrator + field, and, optionally, an assigned number field. + + According to the data formats for types 0, 1, 2, and 6 as + defined in RFC 4360, RFC 5668, and RFC 7432, the encoding + pattern is defined as: + + 0:2-octet-asn:4-octet-number + 1:4-octet-ipv4addr:2-octet-number + 2:4-octet-asn:2-octet-number + 6:6-octet-mac-address + + Additionally, a generic pattern is defined for future + route discriminator types: + + 2-octet-other-hex-number:6-octet-hex-number + + Some valid examples are 0:100:100, 1:1.1.1.1:100, + 2:1234567890:203, and 6:26:00:08:92:78:00."; + reference + "RFC 4360: BGP Extended Communities Attribute. + RFC 4364: BGP/MPLS IP Virtual Private Networks (VPNs). + RFC 5668: 4-Octet AS Specific BGP Extended Community. + RFC 7432: BGP MPLS-Based Ethernet VPN."; + } + + typedef route-origin { + type string { + pattern + '(0:(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|' + + '6[0-4][0-9]{3}|' + + '[1-5][0-9]{4}|[1-9][0-9]{0,3}|0):(429496729[0-5]|' + + '42949672[0-8][0-9]|' + + '4294967[01][0-9]{2}|429496[0-6][0-9]{3}|' + + '42949[0-5][0-9]{4}|' + + '4294[0-8][0-9]{5}|429[0-3][0-9]{6}|' + + '42[0-8][0-9]{7}|4[01][0-9]{8}|' + + '[1-3][0-9]{9}|[1-9][0-9]{0,8}|0))|' + + '(1:((([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|' + + '25[0-5])\.){3}([0-9]|[1-9][0-9]|' + + '1[0-9]{2}|2[0-4][0-9]|25[0-5])):(6553[0-5]|' + + '655[0-2][0-9]|' + + '65[0-4][0-9]{2}|6[0-4][0-9]{3}|' + + '[1-5][0-9]{4}|[1-9][0-9]{0,3}|0))|' + + '(2:(429496729[0-5]|42949672[0-8][0-9]|' + + '4294967[01][0-9]{2}|' + + '429496[0-6][0-9]{3}|42949[0-5][0-9]{4}|' + + '4294[0-8][0-9]{5}|' + + '429[0-3][0-9]{6}|42[0-8][0-9]{7}|4[01][0-9]{8}|' + + '[1-3][0-9]{9}|[1-9][0-9]{0,8}|0):' + + '(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|' + + '6[0-4][0-9]{3}|' + + '[1-5][0-9]{4}|[1-9][0-9]{0,3}|0))|' + + '(6(:[a-fA-F0-9]{2}){6})|' + + '(([3-57-9a-fA-F]|[1-9a-fA-F][0-9a-fA-F]{1,3}):' + + '[0-9a-fA-F]{1,12})'; + } + description + "A Route Origin is an 8-octet BGP extended community + identifying the set of sites where the BGP route + originated (RFC 4364). A Route Origin will have the same + format as a Route Target as per RFC 4360 and will consist + of two or three fields: a 2-octet Type field, an + administrator field, and, optionally, an assigned number + field. + + According to the data formats for types 0, 1, 2, and 6 as + defined in RFC 4360, RFC 5668, and RFC 7432, the encoding + pattern is defined as: + + 0:2-octet-asn:4-octet-number + 1:4-octet-ipv4addr:2-octet-number + 2:4-octet-asn:2-octet-number + 6:6-octet-mac-address + Additionally, a generic pattern is defined for future + Route Origin types: + + 2-octet-other-hex-number:6-octet-hex-number + + Some valid examples are 0:100:100, 1:1.1.1.1:100, + 2:1234567890:203, and 6:26:00:08:92:78:00."; + reference + "RFC 4360: BGP Extended Communities Attribute. + RFC 4364: BGP/MPLS IP Virtual Private Networks (VPNs). + RFC 5668: 4-Octet AS Specific BGP Extended Community. + RFC 7432: BGP MPLS-Based Ethernet VPN."; + } + + typedef ipv6-route-origin { + type string { + pattern + '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}' + + '((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|' + + '(((25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])\.){3}' + + '(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])))' + + ':' + + '(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|' + + '6[0-4][0-9]{3}|' + + '[1-5][0-9]{4}|[1-9][0-9]{0,3}|0)'; + pattern '((([^:]+:){6}(([^:]+:[^:]+)|(.*\..*)))|' + + '((([^:]+:)*[^:]+)?::(([^:]+:)*[^:]+)?))' + + ':' + + '(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|' + + '6[0-4][0-9]{3}|' + + '[1-5][0-9]{4}|[1-9][0-9]{0,3}|0)'; + } + description + "An IPv6 Route Origin is a 20-octet BGP IPv6 Address + Specific Extended Community serving the same function + as a standard 8-octet route, except that it only allows + an IPv6 address as the global administrator. The format + is . + + Two valid examples are 2001:db8::1:6544 and + 2001:db8::5eb1:791:6b37:17958."; + reference + "RFC 5701: IPv6 Address Specific BGP Extended Community + Attribute."; + } + + /*** Collection of types common to multicast ***/ + + typedef ipv4-multicast-group-address { + type inet:ipv4-address { + pattern '(2((2[4-9])|(3[0-9]))\.).*'; + } + description + "This type represents an IPv4 multicast group address, + which is in the range of 224.0.0.0 to 239.255.255.255."; + reference + "RFC 1112: Host Extensions for IP Multicasting."; + } + + typedef ipv6-multicast-group-address { + type inet:ipv6-address { + pattern '(([fF]{2}[0-9a-fA-F]{2}):).*'; + } + description + "This type represents an IPv6 multicast group address, + which is in the range of ff00::/8."; + reference + "RFC 4291: IP Version 6 Addressing Architecture. Section 2.7. + RFC 7346: IPv6 Multicast Address Scopes."; + } + + typedef ip-multicast-group-address { + type union { + type ipv4-multicast-group-address; + type ipv6-multicast-group-address; + } + description + "This type represents a version-neutral IP multicast group + address. The format of the textual representation implies + the IP version."; + } + + typedef ipv4-multicast-source-address { + type union { + type enumeration { + enum * { + description + "Any source address."; + } + } + type inet:ipv4-address; + } + description + "Multicast source IPv4 address type."; + } + + typedef ipv6-multicast-source-address { + type union { + type enumeration { + enum * { + description + "Any source address."; + } + } + type inet:ipv6-address; + } + description + "Multicast source IPv6 address type."; + } + + /*** Collection of types common to protocols ***/ + + typedef bandwidth-ieee-float32 { + type string { + pattern + '0[xX](0((\.0?)?[pP](\+)?0?|(\.0?))|' + + '1(\.([0-9a-fA-F]{0,5}[02468aAcCeE]?)?)?[pP](\+)?(12[0-7]|' + + '1[01][0-9]|0?[0-9]?[0-9])?)'; + } + description + "Bandwidth in IEEE 754 floating-point 32-bit binary format: + (-1)**(S) * 2**(Exponent-127) * (1 + Fraction), + where Exponent uses 8 bits and Fraction uses 23 bits. + The units are octets per second. + The encoding format is the external hexadecimal-significant + character sequences specified in IEEE 754 and ISO/IEC C99. + The format is restricted to be normalized, non-negative, and + non-fraction: 0x1.hhhhhhp{+}d, 0X1.HHHHHHP{+}D, or 0x0p0, + where 'h' and 'H' are hexadecimal digits and 'd' and 'D' are + integers in the range of [0..127]. + When six hexadecimal digits are used for 'hhhhhh' or + 'HHHHHH', the least significant digit must be an even + number. 'x' and 'X' indicate hexadecimal; 'p' and 'P' + indicate a power of two. Some examples are 0x0p0, 0x1p10, + and 0x1.abcde2p+20."; + reference + "IEEE Std 754-2008: IEEE Standard for Floating-Point + Arithmetic. + ISO/IEC C99: Information technology - Programming + Languages - C."; + } + + typedef link-access-type { + type enumeration { + enum broadcast { + description + "Specify broadcast multi-access network."; + } + enum non-broadcast-multiaccess { + description + "Specify Non-Broadcast Multi-Access (NBMA) network."; + } + enum point-to-multipoint { + description + "Specify point-to-multipoint network."; + } + enum point-to-point { + description + "Specify point-to-point network."; + } + } + description + "Link access type."; + } + + typedef timer-multiplier { + type uint8; + description + "The number of timer value intervals that should be + interpreted as a failure."; + } + + typedef timer-value-seconds16 { + type union { + type uint16 { + range "1..65535"; + } + type enumeration { + enum infinity { + description + "The timer is set to infinity."; + } + enum not-set { + description + "The timer is not set."; + } + } + } + units "seconds"; + description + "Timer value type, in seconds (16-bit range)."; + } + + typedef timer-value-seconds32 { + type union { + type uint32 { + range "1..4294967295"; + } + type enumeration { + enum infinity { + description + "The timer is set to infinity."; + } + enum not-set { + description + "The timer is not set."; + } + } + } + units "seconds"; + description + "Timer value type, in seconds (32-bit range)."; + } + + typedef timer-value-milliseconds { + type union { + type uint32 { + range "1..4294967295"; + } + type enumeration { + enum infinity { + description + "The timer is set to infinity."; + } + enum not-set { + description + "The timer is not set."; + } + } + } + units "milliseconds"; + description + "Timer value type, in milliseconds."; + } + + typedef percentage { + type uint8 { + range "0..100"; + } + description + "Integer indicating a percentage value."; + } + + typedef timeticks64 { + type uint64; + description + "This type is based on the timeticks type defined in + RFC 6991, but with 64-bit width. It represents the time, + modulo 2^64, in hundredths of a second between two epochs."; + reference + "RFC 6991: Common YANG Data Types."; + } + + typedef uint24 { + type uint32 { + range "0..16777215"; + } + description + "24-bit unsigned integer."; + } + + /*** Collection of types related to MPLS/GMPLS ***/ + + typedef generalized-label { + type binary; + description + "Generalized Label. Nodes sending and receiving the + Generalized Label are aware of the link-specific + label context and type."; + reference + "RFC 3471: Generalized Multi-Protocol Label Switching (GMPLS) + Signaling Functional Description. Section 3.2."; + } + + typedef mpls-label-special-purpose { + type identityref { + base mpls-label-special-purpose-value; + } + description + "This type represents the special-purpose MPLS label values."; + reference + "RFC 3032: MPLS Label Stack Encoding. + RFC 7274: Allocating and Retiring Special-Purpose MPLS + Labels."; + } + + typedef mpls-label-general-use { + type uint32 { + range "16..1048575"; + } + description + "The 20-bit label value in an MPLS label stack as specified + in RFC 3032. This label value does not include the + encodings of Traffic Class and TTL (Time to Live). + The label range specified by this type is for general use, + with special-purpose MPLS label values excluded."; + reference + "RFC 3032: MPLS Label Stack Encoding."; + } + + typedef mpls-label { + type union { + type mpls-label-special-purpose; + type mpls-label-general-use; + } + description + "The 20-bit label value in an MPLS label stack as specified + in RFC 3032. This label value does not include the + encodings of Traffic Class and TTL."; + reference + "RFC 3032: MPLS Label Stack Encoding."; + } + + /*** Groupings **/ + + grouping mpls-label-stack { + description + "This grouping specifies an MPLS label stack. The label + stack is encoded as a list of label stack entries. The + list key is an identifier that indicates the relative + ordering of each entry, with the lowest-value identifier + corresponding to the top of the label stack."; + container mpls-label-stack { + description + "Container for a list of MPLS label stack entries."; + list entry { + key "id"; + description + "List of MPLS label stack entries."; + leaf id { + type uint8; + description + "Identifies the entry in a sequence of MPLS label + stack entries. An entry with a smaller identifier + value precedes an entry with a larger identifier + value in the label stack. The value of this ID has + no semantic meaning other than relative ordering + and referencing the entry."; + } + leaf label { + type rt-types:mpls-label; + description + "Label value."; + } + + leaf ttl { + type uint8; + description + "Time to Live (TTL)."; + reference + "RFC 3032: MPLS Label Stack Encoding."; + } + leaf traffic-class { + type uint8 { + range "0..7"; + } + description + "Traffic Class (TC)."; + reference + "RFC 5462: Multiprotocol Label Switching (MPLS) Label + Stack Entry: 'EXP' Field Renamed to 'Traffic Class' + Field."; + } + } + } + } + + grouping vpn-route-targets { + description + "A grouping that specifies Route Target import-export rules + used in BGP-enabled VPNs."; + reference + "RFC 4364: BGP/MPLS IP Virtual Private Networks (VPNs). + RFC 4664: Framework for Layer 2 Virtual Private Networks + (L2VPNs)."; + list vpn-target { + key "route-target"; + description + "List of Route Targets."; + leaf route-target { + type rt-types:route-target; + description + "Route Target value."; + } + leaf route-target-type { + type rt-types:route-target-type; + mandatory true; + description + "Import/export type of the Route Target."; + } + } + } +} \ No newline at end of file diff --git a/src/tests/tools/mock_nce_t_ctrl/yang/rfc8343/ietf-interfaces.yang b/src/tests/tools/mock_nce_t_ctrl/yang/rfc8343/ietf-interfaces.yang new file mode 100644 index 000000000..96d416753 --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/yang/rfc8343/ietf-interfaces.yang @@ -0,0 +1,1123 @@ +module ietf-interfaces { + yang-version 1.1; + namespace "urn:ietf:params:xml:ns:yang:ietf-interfaces"; + prefix if; + + import ietf-yang-types { + prefix yang; + } + + organization + "IETF NETMOD (Network Modeling) Working Group"; + + contact + "WG Web: + WG List: + + Editor: Martin Bjorklund + "; + + description + "This module contains a collection of YANG definitions for + managing network interfaces. + + Copyright (c) 2018 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC 8343; see + the RFC itself for full legal notices."; + + revision 2018-02-20 { + description + "Updated to support NMDA."; + reference + "RFC 8343: A YANG Data Model for Interface Management"; + } + + revision 2014-05-08 { + description + "Initial revision."; + reference + "RFC 7223: A YANG Data Model for Interface Management"; + } + + /* + * Typedefs + */ + + typedef interface-ref { + type leafref { + path "/if:interfaces/if:interface/if:name"; + } + description + "This type is used by data models that need to reference + interfaces."; + } + + /* + * Identities + */ + + identity interface-type { + description + "Base identity from which specific interface types are + derived."; + } + + /* + * Features + */ + + feature arbitrary-names { + description + "This feature indicates that the device allows user-controlled + interfaces to be named arbitrarily."; + } + feature pre-provisioning { + description + "This feature indicates that the device supports + pre-provisioning of interface configuration, i.e., it is + possible to configure an interface whose physical interface + hardware is not present on the device."; + } + feature if-mib { + description + "This feature indicates that the device implements + the IF-MIB."; + reference + "RFC 2863: The Interfaces Group MIB"; + } + + /* + * Data nodes + */ + + container interfaces { + description + "Interface parameters."; + + list interface { + key "name"; + + description + "The list of interfaces on the device. + + The status of an interface is available in this list in the + operational state. If the configuration of a + system-controlled interface cannot be used by the system + (e.g., the interface hardware present does not match the + interface type), then the configuration is not applied to + the system-controlled interface shown in the operational + state. If the configuration of a user-controlled interface + cannot be used by the system, the configured interface is + not instantiated in the operational state. + + System-controlled interfaces created by the system are + always present in this list in the operational state, + whether or not they are configured."; + + leaf name { + type string; + description + "The name of the interface. + + A device MAY restrict the allowed values for this leaf, + possibly depending on the type of the interface. + For system-controlled interfaces, this leaf is the + device-specific name of the interface. + + If a client tries to create configuration for a + system-controlled interface that is not present in the + operational state, the server MAY reject the request if + the implementation does not support pre-provisioning of + interfaces or if the name refers to an interface that can + never exist in the system. A Network Configuration + Protocol (NETCONF) server MUST reply with an rpc-error + with the error-tag 'invalid-value' in this case. + + If the device supports pre-provisioning of interface + configuration, the 'pre-provisioning' feature is + advertised. + + If the device allows arbitrarily named user-controlled + interfaces, the 'arbitrary-names' feature is advertised. + + When a configured user-controlled interface is created by + the system, it is instantiated with the same name in the + operational state. + + A server implementation MAY map this leaf to the ifName + MIB object. Such an implementation needs to use some + mechanism to handle the differences in size and characters + allowed between this leaf and ifName. The definition of + such a mechanism is outside the scope of this document."; + reference + "RFC 2863: The Interfaces Group MIB - ifName"; + } + + leaf description { + type string; + description + "A textual description of the interface. + + A server implementation MAY map this leaf to the ifAlias + MIB object. Such an implementation needs to use some + mechanism to handle the differences in size and characters + allowed between this leaf and ifAlias. The definition of + such a mechanism is outside the scope of this document. + + Since ifAlias is defined to be stored in non-volatile + storage, the MIB implementation MUST map ifAlias to the + value of 'description' in the persistently stored + configuration."; + reference + "RFC 2863: The Interfaces Group MIB - ifAlias"; + } + + leaf type { + type identityref { + base interface-type; + } + mandatory true; + description + "The type of the interface. + + When an interface entry is created, a server MAY + initialize the type leaf with a valid value, e.g., if it + is possible to derive the type from the name of the + interface. + + If a client tries to set the type of an interface to a + value that can never be used by the system, e.g., if the + type is not supported or if the type does not match the + name of the interface, the server MUST reject the request. + A NETCONF server MUST reply with an rpc-error with the + error-tag 'invalid-value' in this case."; + reference + "RFC 2863: The Interfaces Group MIB - ifType"; + } + + leaf enabled { + type boolean; + default "true"; + description + "This leaf contains the configured, desired state of the + interface. + + Systems that implement the IF-MIB use the value of this + leaf in the intended configuration to set + IF-MIB.ifAdminStatus to 'up' or 'down' after an ifEntry + has been initialized, as described in RFC 2863. + + Changes in this leaf in the intended configuration are + reflected in ifAdminStatus."; + reference + "RFC 2863: The Interfaces Group MIB - ifAdminStatus"; + } + + leaf link-up-down-trap-enable { + if-feature if-mib; + type enumeration { + enum enabled { + value 1; + description + "The device will generate linkUp/linkDown SNMP + notifications for this interface."; + } + enum disabled { + value 2; + description + "The device will not generate linkUp/linkDown SNMP + notifications for this interface."; + } + } + description + "Controls whether linkUp/linkDown SNMP notifications + should be generated for this interface. + + If this node is not configured, the value 'enabled' is + operationally used by the server for interfaces that do + not operate on top of any other interface (i.e., there are + no 'lower-layer-if' entries), and 'disabled' otherwise."; + reference + "RFC 2863: The Interfaces Group MIB - + ifLinkUpDownTrapEnable"; + } + + leaf admin-status { + if-feature if-mib; + type enumeration { + enum up { + value 1; + description + "Ready to pass packets."; + } + enum down { + value 2; + description + "Not ready to pass packets and not in some test mode."; + } + enum testing { + value 3; + description + "In some test mode."; + } + } + config false; + mandatory true; + description + "The desired state of the interface. + + This leaf has the same read semantics as ifAdminStatus."; + reference + "RFC 2863: The Interfaces Group MIB - ifAdminStatus"; + } + + leaf oper-status { + type enumeration { + enum up { + value 1; + description + "Ready to pass packets."; + } + enum down { + value 2; + + description + "The interface does not pass any packets."; + } + enum testing { + value 3; + description + "In some test mode. No operational packets can + be passed."; + } + enum unknown { + value 4; + description + "Status cannot be determined for some reason."; + } + enum dormant { + value 5; + description + "Waiting for some external event."; + } + enum not-present { + value 6; + description + "Some component (typically hardware) is missing."; + } + enum lower-layer-down { + value 7; + description + "Down due to state of lower-layer interface(s)."; + } + } + config false; + mandatory true; + description + "The current operational state of the interface. + + This leaf has the same semantics as ifOperStatus."; + reference + "RFC 2863: The Interfaces Group MIB - ifOperStatus"; + } + + leaf last-change { + type yang:date-and-time; + config false; + description + "The time the interface entered its current operational + state. If the current state was entered prior to the + last re-initialization of the local network management + subsystem, then this node is not present."; + reference + "RFC 2863: The Interfaces Group MIB - ifLastChange"; + } + + leaf if-index { + if-feature if-mib; + type int32 { + range "1..2147483647"; + } + config false; + mandatory true; + description + "The ifIndex value for the ifEntry represented by this + interface."; + reference + "RFC 2863: The Interfaces Group MIB - ifIndex"; + } + + leaf phys-address { + type yang:phys-address; + config false; + description + "The interface's address at its protocol sub-layer. For + example, for an 802.x interface, this object normally + contains a Media Access Control (MAC) address. The + interface's media-specific modules must define the bit + and byte ordering and the format of the value of this + object. For interfaces that do not have such an address + (e.g., a serial line), this node is not present."; + reference + "RFC 2863: The Interfaces Group MIB - ifPhysAddress"; + } + + leaf-list higher-layer-if { + type interface-ref; + config false; + description + "A list of references to interfaces layered on top of this + interface."; + reference + "RFC 2863: The Interfaces Group MIB - ifStackTable"; + } + + leaf-list lower-layer-if { + type interface-ref; + config false; + + description + "A list of references to interfaces layered underneath this + interface."; + reference + "RFC 2863: The Interfaces Group MIB - ifStackTable"; + } + + leaf speed { + type yang:gauge64; + units "bits/second"; + config false; + description + "An estimate of the interface's current bandwidth in bits + per second. For interfaces that do not vary in + bandwidth or for those where no accurate estimation can + be made, this node should contain the nominal bandwidth. + For interfaces that have no concept of bandwidth, this + node is not present."; + reference + "RFC 2863: The Interfaces Group MIB - + ifSpeed, ifHighSpeed"; + } + + container statistics { + config false; + description + "A collection of interface-related statistics objects."; + + leaf discontinuity-time { + type yang:date-and-time; + mandatory true; + description + "The time on the most recent occasion at which any one or + more of this interface's counters suffered a + discontinuity. If no such discontinuities have occurred + since the last re-initialization of the local management + subsystem, then this node contains the time the local + management subsystem re-initialized itself."; + } + + leaf in-octets { + type yang:counter64; + description + "The total number of octets received on the interface, + including framing characters. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCInOctets"; + } + + leaf in-unicast-pkts { + type yang:counter64; + description + "The number of packets, delivered by this sub-layer to a + higher (sub-)layer, that were not addressed to a + multicast or broadcast address at this sub-layer. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCInUcastPkts"; + } + + leaf in-broadcast-pkts { + type yang:counter64; + description + "The number of packets, delivered by this sub-layer to a + higher (sub-)layer, that were addressed to a broadcast + address at this sub-layer. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCInBroadcastPkts"; + } + + leaf in-multicast-pkts { + type yang:counter64; + description + "The number of packets, delivered by this sub-layer to a + higher (sub-)layer, that were addressed to a multicast + address at this sub-layer. For a MAC-layer protocol, + this includes both Group and Functional addresses. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCInMulticastPkts"; + } + + leaf in-discards { + type yang:counter32; + description + "The number of inbound packets that were chosen to be + discarded even though no errors had been detected to + prevent their being deliverable to a higher-layer + protocol. One possible reason for discarding such a + packet could be to free up buffer space. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifInDiscards"; + } + + leaf in-errors { + type yang:counter32; + description + "For packet-oriented interfaces, the number of inbound + packets that contained errors preventing them from being + deliverable to a higher-layer protocol. For character- + oriented or fixed-length interfaces, the number of + inbound transmission units that contained errors + preventing them from being deliverable to a higher-layer + protocol. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifInErrors"; + } + + leaf in-unknown-protos { + type yang:counter32; + + description + "For packet-oriented interfaces, the number of packets + received via the interface that were discarded because + of an unknown or unsupported protocol. For + character-oriented or fixed-length interfaces that + support protocol multiplexing, the number of + transmission units received via the interface that were + discarded because of an unknown or unsupported protocol. + For any interface that does not support protocol + multiplexing, this counter is not present. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifInUnknownProtos"; + } + + leaf out-octets { + type yang:counter64; + description + "The total number of octets transmitted out of the + interface, including framing characters. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCOutOctets"; + } + + leaf out-unicast-pkts { + type yang:counter64; + description + "The total number of packets that higher-level protocols + requested be transmitted and that were not addressed + to a multicast or broadcast address at this sub-layer, + including those that were discarded or not sent. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCOutUcastPkts"; + } + + leaf out-broadcast-pkts { + type yang:counter64; + description + "The total number of packets that higher-level protocols + requested be transmitted and that were addressed to a + broadcast address at this sub-layer, including those + that were discarded or not sent. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCOutBroadcastPkts"; + } + + leaf out-multicast-pkts { + type yang:counter64; + description + "The total number of packets that higher-level protocols + requested be transmitted and that were addressed to a + multicast address at this sub-layer, including those + that were discarded or not sent. For a MAC-layer + protocol, this includes both Group and Functional + addresses. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCOutMulticastPkts"; + } + + leaf out-discards { + type yang:counter32; + description + "The number of outbound packets that were chosen to be + discarded even though no errors had been detected to + prevent their being transmitted. One possible reason + for discarding such a packet could be to free up buffer + space. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifOutDiscards"; + } + + leaf out-errors { + type yang:counter32; + description + "For packet-oriented interfaces, the number of outbound + packets that could not be transmitted because of errors. + For character-oriented or fixed-length interfaces, the + number of outbound transmission units that could not be + transmitted because of errors. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifOutErrors"; + } + } + + } + } + + /* + * Legacy typedefs + */ + + typedef interface-state-ref { + type leafref { + path "/if:interfaces-state/if:interface/if:name"; + } + status deprecated; + description + "This type is used by data models that need to reference + the operationally present interfaces."; + } + + /* + * Legacy operational state data nodes + */ + + container interfaces-state { + config false; + status deprecated; + description + "Data nodes for the operational state of interfaces."; + + list interface { + key "name"; + status deprecated; + + description + "The list of interfaces on the device. + + System-controlled interfaces created by the system are + always present in this list, whether or not they are + configured."; + + leaf name { + type string; + status deprecated; + description + "The name of the interface. + + A server implementation MAY map this leaf to the ifName + MIB object. Such an implementation needs to use some + mechanism to handle the differences in size and characters + allowed between this leaf and ifName. The definition of + such a mechanism is outside the scope of this document."; + reference + "RFC 2863: The Interfaces Group MIB - ifName"; + } + + leaf type { + type identityref { + base interface-type; + } + mandatory true; + status deprecated; + description + "The type of the interface."; + reference + "RFC 2863: The Interfaces Group MIB - ifType"; + } + + leaf admin-status { + if-feature if-mib; + type enumeration { + enum up { + value 1; + description + "Ready to pass packets."; + } + enum down { + value 2; + description + "Not ready to pass packets and not in some test mode."; + } + enum testing { + value 3; + description + "In some test mode."; + } + } + mandatory true; + status deprecated; + description + "The desired state of the interface. + + This leaf has the same read semantics as ifAdminStatus."; + reference + "RFC 2863: The Interfaces Group MIB - ifAdminStatus"; + } + + leaf oper-status { + type enumeration { + enum up { + value 1; + description + "Ready to pass packets."; + } + enum down { + value 2; + description + "The interface does not pass any packets."; + } + enum testing { + value 3; + description + "In some test mode. No operational packets can + be passed."; + } + enum unknown { + value 4; + description + "Status cannot be determined for some reason."; + } + enum dormant { + value 5; + description + "Waiting for some external event."; + } + enum not-present { + value 6; + description + "Some component (typically hardware) is missing."; + } + enum lower-layer-down { + value 7; + description + "Down due to state of lower-layer interface(s)."; + } + } + mandatory true; + status deprecated; + description + "The current operational state of the interface. + + This leaf has the same semantics as ifOperStatus."; + reference + "RFC 2863: The Interfaces Group MIB - ifOperStatus"; + } + + leaf last-change { + type yang:date-and-time; + status deprecated; + description + "The time the interface entered its current operational + state. If the current state was entered prior to the + last re-initialization of the local network management + subsystem, then this node is not present."; + reference + "RFC 2863: The Interfaces Group MIB - ifLastChange"; + } + + leaf if-index { + if-feature if-mib; + type int32 { + range "1..2147483647"; + } + mandatory true; + status deprecated; + description + "The ifIndex value for the ifEntry represented by this + interface."; + + reference + "RFC 2863: The Interfaces Group MIB - ifIndex"; + } + + leaf phys-address { + type yang:phys-address; + status deprecated; + description + "The interface's address at its protocol sub-layer. For + example, for an 802.x interface, this object normally + contains a Media Access Control (MAC) address. The + interface's media-specific modules must define the bit + and byte ordering and the format of the value of this + object. For interfaces that do not have such an address + (e.g., a serial line), this node is not present."; + reference + "RFC 2863: The Interfaces Group MIB - ifPhysAddress"; + } + + leaf-list higher-layer-if { + type interface-state-ref; + status deprecated; + description + "A list of references to interfaces layered on top of this + interface."; + reference + "RFC 2863: The Interfaces Group MIB - ifStackTable"; + } + + leaf-list lower-layer-if { + type interface-state-ref; + status deprecated; + description + "A list of references to interfaces layered underneath this + interface."; + reference + "RFC 2863: The Interfaces Group MIB - ifStackTable"; + } + + leaf speed { + type yang:gauge64; + units "bits/second"; + status deprecated; + description + "An estimate of the interface's current bandwidth in bits + per second. For interfaces that do not vary in + bandwidth or for those where no accurate estimation can + + be made, this node should contain the nominal bandwidth. + For interfaces that have no concept of bandwidth, this + node is not present."; + reference + "RFC 2863: The Interfaces Group MIB - + ifSpeed, ifHighSpeed"; + } + + container statistics { + status deprecated; + description + "A collection of interface-related statistics objects."; + + leaf discontinuity-time { + type yang:date-and-time; + mandatory true; + status deprecated; + description + "The time on the most recent occasion at which any one or + more of this interface's counters suffered a + discontinuity. If no such discontinuities have occurred + since the last re-initialization of the local management + subsystem, then this node contains the time the local + management subsystem re-initialized itself."; + } + + leaf in-octets { + type yang:counter64; + status deprecated; + description + "The total number of octets received on the interface, + including framing characters. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCInOctets"; + } + + leaf in-unicast-pkts { + type yang:counter64; + status deprecated; + description + "The number of packets, delivered by this sub-layer to a + higher (sub-)layer, that were not addressed to a + multicast or broadcast address at this sub-layer. + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCInUcastPkts"; + } + + leaf in-broadcast-pkts { + type yang:counter64; + status deprecated; + description + "The number of packets, delivered by this sub-layer to a + higher (sub-)layer, that were addressed to a broadcast + address at this sub-layer. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCInBroadcastPkts"; + } + + leaf in-multicast-pkts { + type yang:counter64; + status deprecated; + description + "The number of packets, delivered by this sub-layer to a + higher (sub-)layer, that were addressed to a multicast + address at this sub-layer. For a MAC-layer protocol, + this includes both Group and Functional addresses. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCInMulticastPkts"; + } + + leaf in-discards { + type yang:counter32; + status deprecated; + + description + "The number of inbound packets that were chosen to be + discarded even though no errors had been detected to + prevent their being deliverable to a higher-layer + protocol. One possible reason for discarding such a + packet could be to free up buffer space. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifInDiscards"; + } + + leaf in-errors { + type yang:counter32; + status deprecated; + description + "For packet-oriented interfaces, the number of inbound + packets that contained errors preventing them from being + deliverable to a higher-layer protocol. For character- + oriented or fixed-length interfaces, the number of + inbound transmission units that contained errors + preventing them from being deliverable to a higher-layer + protocol. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifInErrors"; + } + + leaf in-unknown-protos { + type yang:counter32; + status deprecated; + description + "For packet-oriented interfaces, the number of packets + received via the interface that were discarded because + of an unknown or unsupported protocol. For + character-oriented or fixed-length interfaces that + support protocol multiplexing, the number of + transmission units received via the interface that were + discarded because of an unknown or unsupported protocol. + For any interface that does not support protocol + multiplexing, this counter is not present. + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifInUnknownProtos"; + } + + leaf out-octets { + type yang:counter64; + status deprecated; + description + "The total number of octets transmitted out of the + interface, including framing characters. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCOutOctets"; + } + + leaf out-unicast-pkts { + type yang:counter64; + status deprecated; + description + "The total number of packets that higher-level protocols + requested be transmitted and that were not addressed + to a multicast or broadcast address at this sub-layer, + including those that were discarded or not sent. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCOutUcastPkts"; + } + + leaf out-broadcast-pkts { + type yang:counter64; + status deprecated; + + description + "The total number of packets that higher-level protocols + requested be transmitted and that were addressed to a + broadcast address at this sub-layer, including those + that were discarded or not sent. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCOutBroadcastPkts"; + } + + leaf out-multicast-pkts { + type yang:counter64; + status deprecated; + description + "The total number of packets that higher-level protocols + requested be transmitted and that were addressed to a + multicast address at this sub-layer, including those + that were discarded or not sent. For a MAC-layer + protocol, this includes both Group and Functional + addresses. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCOutMulticastPkts"; + } + + leaf out-discards { + type yang:counter32; + status deprecated; + description + "The number of outbound packets that were chosen to be + discarded even though no errors had been detected to + prevent their being transmitted. One possible reason + for discarding such a packet could be to free up buffer + space. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifOutDiscards"; + } + + leaf out-errors { + type yang:counter32; + status deprecated; + description + "For packet-oriented interfaces, the number of outbound + packets that could not be transmitted because of errors. + For character-oriented or fixed-length interfaces, the + number of outbound transmission units that could not be + transmitted because of errors. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifOutErrors"; + } + } + } + } +} \ No newline at end of file diff --git a/src/tests/tools/mock_nce_t_ctrl/yang/rfc8345/ietf-network-topology.yang b/src/tests/tools/mock_nce_t_ctrl/yang/rfc8345/ietf-network-topology.yang new file mode 100644 index 000000000..df3685827 --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/yang/rfc8345/ietf-network-topology.yang @@ -0,0 +1,294 @@ +module ietf-network-topology { + yang-version 1.1; + namespace "urn:ietf:params:xml:ns:yang:ietf-network-topology"; + prefix nt; + + import ietf-inet-types { + prefix inet; + reference + "RFC 6991: Common YANG Data Types"; + } + import ietf-network { + prefix nw; + reference + "RFC 8345: A YANG Data Model for Network Topologies"; + } + + organization + "IETF I2RS (Interface to the Routing System) Working Group"; + + contact + "WG Web: + WG List: + + Editor: Alexander Clemm + + + Editor: Jan Medved + + + Editor: Robert Varga + + + Editor: Nitin Bahadur + + + Editor: Hariharan Ananthakrishnan + + + Editor: Xufeng Liu + "; + + description + "This module defines a common base model for a network topology, + augmenting the base network data model with links to connect + nodes, as well as termination points to terminate links + on nodes. + + Copyright (c) 2018 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC 8345; + see the RFC itself for full legal notices."; + + revision 2018-02-26 { + description + "Initial revision."; + reference + "RFC 8345: A YANG Data Model for Network Topologies"; + } + + typedef link-id { + type inet:uri; + description + "An identifier for a link in a topology. The precise + structure of the link-id will be up to the implementation. + The identifier SHOULD be chosen such that the same link in a + real network topology will always be identified through the + same identifier, even if the data model is instantiated in + separate datastores. An implementation MAY choose to capture + semantics in the identifier -- for example, to indicate the + type of link and/or the type of topology of which the link is + a part."; + } + + typedef tp-id { + type inet:uri; + description + "An identifier for termination points on a node. The precise + structure of the tp-id will be up to the implementation. + The identifier SHOULD be chosen such that the same termination + point in a real network topology will always be identified + through the same identifier, even if the data model is + instantiated in separate datastores. An implementation MAY + choose to capture semantics in the identifier -- for example, + to indicate the type of termination point and/or the type of + node that contains the termination point."; + } + + grouping link-ref { + description + "This grouping can be used to reference a link in a specific + network. Although it is not used in this module, it is + defined here for the convenience of augmenting modules."; + leaf link-ref { + type leafref { + path "/nw:networks/nw:network[nw:network-id=current()/../"+ + "network-ref]/nt:link/nt:link-id"; + require-instance false; + } + description + "A type for an absolute reference to a link instance. + (This type should not be used for relative references. + In such a case, a relative path should be used instead.)"; + } + uses nw:network-ref; + } + + grouping tp-ref { + description + "This grouping can be used to reference a termination point + in a specific node. Although it is not used in this module, + it is defined here for the convenience of augmenting + modules."; + leaf tp-ref { + type leafref { + path "/nw:networks/nw:network[nw:network-id=current()/../"+ + "network-ref]/nw:node[nw:node-id=current()/../"+ + "node-ref]/nt:termination-point/nt:tp-id"; + require-instance false; + } + description + "A type for an absolute reference to a termination point. + (This type should not be used for relative references. + In such a case, a relative path should be used instead.)"; + } + uses nw:node-ref; + } + + augment "/nw:networks/nw:network" { + description + "Add links to the network data model."; + list link { + key "link-id"; + description + "A network link connects a local (source) node and + a remote (destination) node via a set of the respective + node's termination points. It is possible to have several + links between the same source and destination nodes. + Likewise, a link could potentially be re-homed between + termination points. Therefore, in order to ensure that we + would always know to distinguish between links, every link + is identified by a dedicated link identifier. Note that a + link models a point-to-point link, not a multipoint link."; + leaf link-id { + type link-id; + description + "The identifier of a link in the topology. + A link is specific to a topology to which it belongs."; + } + container source { + description + "This container holds the logical source of a particular + link."; + leaf source-node { + type leafref { + path "../../../nw:node/nw:node-id"; + require-instance false; + } + description + "Source node identifier. Must be in the same topology."; + } + leaf source-tp { + type leafref { + path "../../../nw:node[nw:node-id=current()/../"+ + "source-node]/termination-point/tp-id"; + require-instance false; + } + description + "This termination point is located within the source node + and terminates the link."; + } + } + + container destination { + description + "This container holds the logical destination of a + particular link."; + leaf dest-node { + type leafref { + path "../../../nw:node/nw:node-id"; + require-instance false; + } + description + "Destination node identifier. Must be in the same + network."; + } + leaf dest-tp { + type leafref { + path "../../../nw:node[nw:node-id=current()/../"+ + "dest-node]/termination-point/tp-id"; + require-instance false; + } + description + "This termination point is located within the + destination node and terminates the link."; + } + } + list supporting-link { + key "network-ref link-ref"; + description + "Identifies the link or links on which this link depends."; + leaf network-ref { + type leafref { + path "../../../nw:supporting-network/nw:network-ref"; + require-instance false; + } + description + "This leaf identifies in which underlay topology + the supporting link is present."; + } + + leaf link-ref { + type leafref { + path "/nw:networks/nw:network[nw:network-id=current()/"+ + "../network-ref]/link/link-id"; + require-instance false; + } + description + "This leaf identifies a link that is a part + of this link's underlay. Reference loops in which + a link identifies itself as its underlay, either + directly or transitively, are not allowed."; + } + } + } + } + augment "/nw:networks/nw:network/nw:node" { + description + "Augments termination points that terminate links. + Termination points can ultimately be mapped to interfaces."; + list termination-point { + key "tp-id"; + description + "A termination point can terminate a link. + Depending on the type of topology, a termination point + could, for example, refer to a port or an interface."; + leaf tp-id { + type tp-id; + description + "Termination point identifier."; + } + list supporting-termination-point { + key "network-ref node-ref tp-ref"; + description + "This list identifies any termination points on which a + given termination point depends or onto which it maps. + Those termination points will themselves be contained + in a supporting node. This dependency information can be + inferred from the dependencies between links. Therefore, + this item is not separately configurable. Hence, no + corresponding constraint needs to be articulated. + The corresponding information is simply provided by the + implementing system."; + + leaf network-ref { + type leafref { + path "../../../nw:supporting-node/nw:network-ref"; + require-instance false; + } + description + "This leaf identifies in which topology the + supporting termination point is present."; + } + leaf node-ref { + type leafref { + path "../../../nw:supporting-node/nw:node-ref"; + require-instance false; + } + description + "This leaf identifies in which node the supporting + termination point is present."; + } + leaf tp-ref { + type leafref { + path "/nw:networks/nw:network[nw:network-id=current()/"+ + "../network-ref]/nw:node[nw:node-id=current()/../"+ + "node-ref]/termination-point/tp-id"; + require-instance false; + } + description + "Reference to the underlay node (the underlay node must + be in a different topology)."; + } + } + } + } +} \ No newline at end of file diff --git a/src/tests/tools/mock_nce_t_ctrl/yang/rfc8345/ietf-network.yang b/src/tests/tools/mock_nce_t_ctrl/yang/rfc8345/ietf-network.yang new file mode 100644 index 000000000..c67a3fa40 --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/yang/rfc8345/ietf-network.yang @@ -0,0 +1,192 @@ +module ietf-network { + yang-version 1.1; + namespace "urn:ietf:params:xml:ns:yang:ietf-network"; + prefix nw; + + import ietf-inet-types { + prefix inet; + reference + "RFC 6991: Common YANG Data Types"; + } + + organization + "IETF I2RS (Interface to the Routing System) Working Group"; + + contact + "WG Web: + WG List: + + Editor: Alexander Clemm + + + Editor: Jan Medved + + + Editor: Robert Varga + + + Editor: Nitin Bahadur + + + Editor: Hariharan Ananthakrishnan + + + Editor: Xufeng Liu + "; + description + "This module defines a common base data model for a collection + of nodes in a network. Node definitions are further used + in network topologies and inventories. + + Copyright (c) 2018 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC 8345; + see the RFC itself for full legal notices."; + + revision 2018-02-26 { + description + "Initial revision."; + reference + "RFC 8345: A YANG Data Model for Network Topologies"; + } + + typedef node-id { + type inet:uri; + description + "Identifier for a node. The precise structure of the node-id + will be up to the implementation. For example, some + implementations MAY pick a URI that includes the network-id + as part of the path. The identifier SHOULD be chosen + such that the same node in a real network topology will + always be identified through the same identifier, even if + the data model is instantiated in separate datastores. An + implementation MAY choose to capture semantics in the + identifier -- for example, to indicate the type of node."; + } + + typedef network-id { + type inet:uri; + description + "Identifier for a network. The precise structure of the + network-id will be up to the implementation. The identifier + SHOULD be chosen such that the same network will always be + identified through the same identifier, even if the data model + is instantiated in separate datastores. An implementation MAY + choose to capture semantics in the identifier -- for example, + to indicate the type of network."; + } + + grouping network-ref { + description + "Contains the information necessary to reference a network -- + for example, an underlay network."; + leaf network-ref { + type leafref { + path "/nw:networks/nw:network/nw:network-id"; + require-instance false; + } + description + "Used to reference a network -- for example, an underlay + network."; + } + } + + grouping node-ref { + description + "Contains the information necessary to reference a node."; + leaf node-ref { + type leafref { + path "/nw:networks/nw:network[nw:network-id=current()/../"+ + "network-ref]/nw:node/nw:node-id"; + require-instance false; + } + description + "Used to reference a node. + Nodes are identified relative to the network that + contains them."; + } + uses network-ref; + } + + container networks { + description + "Serves as a top-level container for a list of networks."; + list network { + key "network-id"; + description + "Describes a network. + A network typically contains an inventory of nodes, + topological information (augmented through the + network-topology data model), and layering information."; + leaf network-id { + type network-id; + description + "Identifies a network."; + } + container network-types { + description + "Serves as an augmentation target. + The network type is indicated through corresponding + presence containers augmented into this container."; + } + list supporting-network { + key "network-ref"; + description + "An underlay network, used to represent layered network + topologies."; + leaf network-ref { + type leafref { + path "/nw:networks/nw:network/nw:network-id"; + require-instance false; + } + description + "References the underlay network."; + } + } + + list node { + key "node-id"; + description + "The inventory of nodes of this network."; + leaf node-id { + type node-id; + description + "Uniquely identifies a node within the containing + network."; + } + list supporting-node { + key "network-ref node-ref"; + description + "Represents another node that is in an underlay network + and that supports this node. Used to represent layering + structure."; + leaf network-ref { + type leafref { + path "../../../nw:supporting-network/nw:network-ref"; + require-instance false; + } + description + "References the underlay network of which the + underlay node is a part."; + } + leaf node-ref { + type leafref { + path "/nw:networks/nw:network/nw:node/nw:node-id"; + require-instance false; + } + description + "References the underlay node itself."; + } + } + } + } + } +} \ No newline at end of file diff --git a/src/tests/tools/mock_nce_t_ctrl/yang/rfc8346/ietf-l3-unicast-topology.yang b/src/tests/tools/mock_nce_t_ctrl/yang/rfc8346/ietf-l3-unicast-topology.yang new file mode 100644 index 000000000..56941fdca --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/yang/rfc8346/ietf-l3-unicast-topology.yang @@ -0,0 +1,359 @@ +module ietf-l3-unicast-topology { + yang-version 1.1; + namespace + "urn:ietf:params:xml:ns:yang:ietf-l3-unicast-topology"; + prefix "l3t"; + import ietf-network { + prefix "nw"; + } + import ietf-network-topology { + prefix "nt"; + } + import ietf-inet-types { + prefix "inet"; + } + import ietf-routing-types { + prefix "rt-types"; + } + organization + "IETF I2RS (Interface to the Routing System) Working Group"; + contact + "WG Web: + WG List: + Editor: Alexander Clemm + + Editor: Jan Medved + + Editor: Robert Varga + + Editor: Xufeng Liu + + Editor: Nitin Bahadur + + Editor: Hariharan Ananthakrishnan + "; + description + "This module defines a model for Layer 3 Unicast + topologies. + + Copyright (c) 2018 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + + This version of this YANG module is part of + RFC 8346; see the RFC itself for full legal notices."; + revision "2018-02-26" { + description + "Initial revision."; + reference + "RFC 8346: A YANG Data Model for Layer 3 Topologies"; + } + + identity flag-identity { + description "Base type for flags"; + } + + typedef l3-event-type { + type enumeration { + enum "add" { + description + "A Layer 3 node, link, prefix, or termination point has + been added"; + } + enum "remove" { + description + "A Layer 3 node, link, prefix, or termination point has + been removed"; + } + enum "update" { + description + "A Layer 3 node, link, prefix, or termination point has + been updated"; + } + } + description "Layer 3 event type for notifications"; + } + + typedef prefix-flag-type { + type identityref { + base "flag-identity"; + } + description "Prefix flag attributes"; + } + + typedef node-flag-type { + type identityref { + base "flag-identity"; + } + description "Node flag attributes"; + } + + typedef link-flag-type { + type identityref { + base "flag-identity"; + } + description "Link flag attributes"; + } + + typedef l3-flag-type { + type identityref { + base "flag-identity"; + } + description "L3 flag attributes"; + } + + grouping l3-prefix-attributes { + description + "L3 prefix attributes"; + leaf prefix { + type inet:ip-prefix; + description + "IP prefix value"; + } + leaf metric { + type uint32; + description + "Prefix metric"; + } + leaf-list flag { + type prefix-flag-type; + description + "Prefix flags"; + } + } + grouping l3-unicast-topology-type { + description "Identifies the topology type to be L3 Unicast."; + container l3-unicast-topology { + presence "indicates L3 Unicast topology"; + description + "The presence of the container node indicates L3 Unicast + topology"; + } + } + grouping l3-topology-attributes { + description "Topology scope attributes"; + container l3-topology-attributes { + description "Contains topology attributes"; + leaf name { + type string; + description + "Name of the topology"; + } + leaf-list flag { + type l3-flag-type; + description + "Topology flags"; + } + } + } + grouping l3-node-attributes { + description "L3 node scope attributes"; + container l3-node-attributes { + description + "Contains node attributes"; + leaf name { + type inet:domain-name; + description + "Node name"; + } + leaf-list flag { + type node-flag-type; + description + "Node flags"; + } + leaf-list router-id { + type rt-types:router-id; + description + "Router-id for the node"; + } + list prefix { + key "prefix"; + description + "A list of prefixes along with their attributes"; + uses l3-prefix-attributes; + } + } + } + grouping l3-link-attributes { + description + "L3 link scope attributes"; + container l3-link-attributes { + description + "Contains link attributes"; + leaf name { + type string; + description + "Link Name"; + } + leaf-list flag { + type link-flag-type; + description + "Link flags"; + } + leaf metric1 { + type uint64; + description + "Link Metric 1"; + } + leaf metric2 { + type uint64; + description + "Link Metric 2"; + } + } + } + grouping l3-termination-point-attributes { + description "L3 termination point scope attributes"; + container l3-termination-point-attributes { + description + "Contains termination point attributes"; + choice termination-point-type { + description + "Indicates the termination point type"; + case ip { + leaf-list ip-address { + type inet:ip-address; + description + "IPv4 or IPv6 address."; + } + } + case unnumbered { + leaf unnumbered-id { + type uint32; + description + "Unnumbered interface identifier. + The identifier will correspond to the ifIndex value + of the interface, i.e., the ifIndex value of the + ifEntry that represents the interface in + implementations where the Interfaces Group MIB + (RFC 2863) is supported."; + reference + "RFC 2863: The Interfaces Group MIB"; + } + } + case interface-name { + leaf interface-name { + type string; + description + "Name of the interface. The name can (but does not + have to) correspond to an interface reference of a + containing node's interface, i.e., the path name of a + corresponding interface data node on the containing + node reminiscent of data type interface-ref defined + in RFC 8343. It should be noted that data type + interface-ref of RFC 8343 cannot be used directly, + + as this data type is used to reference an interface + in a datastore of a single node in the network, not + to uniquely reference interfaces across a network."; + reference + "RFC 8343: A YANG Data Model for Interface Management"; + } + } + } + } + } + augment "/nw:networks/nw:network/nw:network-types" { + description + "Introduces new network type for L3 Unicast topology"; + uses l3-unicast-topology-type; + } + augment "/nw:networks/nw:network" { + when "nw:network-types/l3t:l3-unicast-topology" { + description + "Augmentation parameters apply only for networks with + L3 Unicast topology"; + } + description + "L3 Unicast for the network as a whole"; + uses l3-topology-attributes; + } + augment "/nw:networks/nw:network/nw:node" { + when "../nw:network-types/l3t:l3-unicast-topology" { + description + "Augmentation parameters apply only for networks with + L3 Unicast topology"; + } + description + "L3 Unicast node-level attributes "; + uses l3-node-attributes; + } + augment "/nw:networks/nw:network/nt:link" { + when "../nw:network-types/l3t:l3-unicast-topology" { + description + "Augmentation parameters apply only for networks with + L3 Unicast topology"; + } + description + "Augments topology link attributes"; + uses l3-link-attributes; + } + augment "/nw:networks/nw:network/nw:node/" + +"nt:termination-point" { + when "../../nw:network-types/l3t:l3-unicast-topology" { + description + "Augmentation parameters apply only for networks with + L3 Unicast topology"; + } + description "Augments topology termination point configuration"; + uses l3-termination-point-attributes; + } + notification l3-node-event { + description + "Notification event for L3 node"; + leaf l3-event-type { + type l3-event-type; + description + "Event type"; + } + uses nw:node-ref; + uses l3-unicast-topology-type; + uses l3-node-attributes; + } + notification l3-link-event { + description + "Notification event for L3 link"; + leaf l3-event-type { + type l3-event-type; + description + "Event type"; + } + uses nt:link-ref; + uses l3-unicast-topology-type; + uses l3-link-attributes; + } + notification l3-prefix-event { + description + "Notification event for L3 prefix"; + leaf l3-event-type { + type l3-event-type; + description + "Event type"; + } + uses nw:node-ref; + uses l3-unicast-topology-type; + container prefix { + description + "Contains L3 prefix attributes"; + uses l3-prefix-attributes; + } + } + notification termination-point-event { + description + "Notification event for L3 termination point"; + leaf l3-event-type { + type l3-event-type; + description + "Event type"; + } + uses nt:tp-ref; + uses l3-unicast-topology-type; + uses l3-termination-point-attributes; + } +} \ No newline at end of file diff --git a/src/tests/tools/mock_nce_t_ctrl/yang/rfc8795/ietf-te-topology.yang b/src/tests/tools/mock_nce_t_ctrl/yang/rfc8795/ietf-te-topology.yang new file mode 100644 index 000000000..41edbcd1f --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/yang/rfc8795/ietf-te-topology.yang @@ -0,0 +1,1952 @@ +module ietf-te-topology { + yang-version 1.1; + namespace "urn:ietf:params:xml:ns:yang:ietf-te-topology"; + prefix tet; + + import ietf-yang-types { + prefix yang; + reference + "RFC 6991: Common YANG Data Types"; + } + import ietf-inet-types { + prefix inet; + reference + "RFC 6991: Common YANG Data Types"; + } + import ietf-te-types { + prefix te-types; + reference + "RFC 8776: Common YANG Data Types for Traffic Engineering"; + } + import ietf-network { + prefix nw; + reference + "RFC 8345: A YANG Data Model for Network Topologies"; + } + import ietf-network-topology { + prefix nt; + reference + "RFC 8345: A YANG Data Model for Network Topologies"; + } + + organization + "IETF Traffic Engineering Architecture and Signaling (TEAS) + Working Group"; + contact + "WG Web: + WG List: + + Editor: Xufeng Liu + + + Editor: Igor Bryskin + + + Editor: Vishnu Pavan Beeram + + + Editor: Tarek Saad + + + Editor: Himanshu Shah + + + Editor: Oscar Gonzalez de Dios + "; + description + "This YANG module defines a TE topology model for representing, + retrieving, and manipulating technology-agnostic TE topologies. + + Copyright (c) 2020 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject to + the license terms contained in, the Simplified BSD License set + forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC 8795; see the + RFC itself for full legal notices."; + + revision 2020-08-06 { + description + "Initial revision."; + reference + "RFC 8795: YANG Data Model for Traffic Engineering (TE) + Topologies"; + } + + /* + * Features + */ + + feature nsrlg { + description + "This feature indicates that the system supports NSRLGs + (Non-Shared Risk Link Groups)."; + } + + feature te-topology-hierarchy { + description + "This feature indicates that the system allows an underlay + and/or overlay TE topology hierarchy."; + } + + feature template { + description + "This feature indicates that the system supports + template configuration."; + } + + /* + * Typedefs + */ + + typedef geographic-coordinate-degree { + type decimal64 { + fraction-digits 8; + } + description + "Decimal degree (DD) used to express latitude and longitude + geographic coordinates."; + } + // geographic-coordinate-degree + + typedef te-info-source { + type enumeration { + enum unknown { + description + "The source is unknown."; + } + enum locally-configured { + description + "Configured entity."; + } + enum ospfv2 { + description + "OSPFv2."; + } + enum ospfv3 { + description + "OSPFv3."; + } + enum isis { + description + "IS-IS."; + } + enum bgp-ls { + description + "BGP-LS."; + reference + "RFC 7752: North-Bound Distribution of Link-State and + Traffic Engineering (TE) Information Using BGP"; + } + enum system-processed { + description + "System-processed entity."; + } + enum other { + description + "Other source."; + } + } + description + "Describes the type of source that has provided the + related information, and the source's credibility."; + } + // te-info-source + + /* + * Groupings + */ + + grouping connectivity-matrix-entry-path-attributes { + description + "Attributes of a connectivity matrix entry."; + leaf is-allowed { + type boolean; + description + "'true' - switching is allowed; + 'false' - switching is disallowed."; + } + container underlay { + if-feature "te-topology-hierarchy"; + description + "Attributes of the TE link underlay."; + reference + "RFC 4206: Label Switched Paths (LSP) Hierarchy with + Generalized Multi-Protocol Label Switching (GMPLS) + Traffic Engineering (TE)"; + uses te-link-underlay-attributes; + } + uses te-types:generic-path-constraints; + uses te-types:generic-path-optimization; + uses te-types:generic-path-properties; + } + // connectivity-matrix-entry-path-attributes + + grouping geolocation-container { + description + "Contains a GPS location."; + container geolocation { + config false; + description + "Contains a GPS location."; + leaf altitude { + type int64; + units "millimeters"; + description + "Distance above sea level."; + } + leaf latitude { + type geographic-coordinate-degree { + range "-90..90"; + } + description + "Relative position north or south on the Earth's surface."; + } + leaf longitude { + type geographic-coordinate-degree { + range "-180..180"; + } + description + "Angular distance east or west on the Earth's surface."; + } + } + // geolocation + } + // geolocation-container + + grouping information-source-state-attributes { + description + "The attributes identifying the source that has provided the + related information, and the source's credibility."; + leaf credibility-preference { + type uint16; + description + "The preference value for calculating the Traffic + Engineering database credibility value used for + tie-break selection between different information-source + values. A higher value is preferable."; + } + leaf logical-network-element { + type string; + description + "When applicable, this is the name of a logical network + element from which the information is learned."; + } + leaf network-instance { + type string; + description + "When applicable, this is the name of a network instance + from which the information is learned."; + } + } + // information-source-state-attributes + + grouping information-source-per-link-attributes { + description + "Per-node container of the attributes identifying the source + that has provided the related information, and the source's + credibility."; + leaf information-source { + type te-info-source; + config false; + description + "Indicates the type of information source."; + } + leaf information-source-instance { + type string; + config false; + description + "The name indicating the instance of the information + source."; + } + container information-source-state { + config false; + description + "Contains state attributes related to the information + source."; + uses information-source-state-attributes; + container topology { + description + "When the information is processed by the system, + the attributes in this container indicate which topology + is used to generate the result information."; + uses nt:link-ref; + } + } + } + // information-source-per-link-attributes + + grouping information-source-per-node-attributes { + description + "Per-node container of the attributes identifying the source + that has provided the related information, and the source's + credibility."; + leaf information-source { + type te-info-source; + config false; + description + "Indicates the type of information source."; + } + leaf information-source-instance { + type string; + config false; + description + "The name indicating the instance of the information + source."; + } + container information-source-state { + config false; + description + "Contains state attributes related to the information + source."; + uses information-source-state-attributes; + container topology { + description + "When the information is processed by the system, + the attributes in this container indicate which topology + is used to generate the result information."; + uses nw:node-ref; + } + } + } + // information-source-per-node-attributes + + grouping interface-switching-capability-list { + description + "List of Interface Switching Capability Descriptors (ISCDs)."; + list interface-switching-capability { + key "switching-capability encoding"; + description + "List of ISCDs for this link."; + reference + "RFC 3471: Generalized Multi-Protocol Label Switching (GMPLS) + Signaling Functional Description + RFC 4203: OSPF Extensions in Support of Generalized + Multi-Protocol Label Switching (GMPLS)"; + leaf switching-capability { + type identityref { + base te-types:switching-capabilities; + } + description + "Switching capability for this interface."; + } + leaf encoding { + type identityref { + base te-types:lsp-encoding-types; + } + description + "Encoding supported by this interface."; + } + uses te-link-iscd-attributes; + } + // interface-switching-capability + } + // interface-switching-capability-list + + grouping statistics-per-link { + description + "Statistics attributes per TE link."; + leaf discontinuity-time { + type yang:date-and-time; + description + "The time of the most recent occasion at which any one or + more of this interface's counters suffered a + discontinuity. If no such discontinuities have occurred + since the last re-initialization of the local management + subsystem, then this node contains the time the local + management subsystem re-initialized itself."; + } + /* Administrative attributes */ + leaf disables { + type yang:counter32; + description + "Number of times that a link was disabled."; + } + leaf enables { + type yang:counter32; + description + "Number of times that a link was enabled."; + } + leaf maintenance-clears { + type yang:counter32; + description + "Number of times that a link was taken out of maintenance."; + } + leaf maintenance-sets { + type yang:counter32; + description + "Number of times that a link was put in maintenance."; + } + leaf modifies { + type yang:counter32; + description + "Number of times that a link was modified."; + } + /* Operational attributes */ + leaf downs { + type yang:counter32; + description + "Number of times that a link was set to an operational state + of 'down'."; + } + leaf ups { + type yang:counter32; + description + "Number of times that a link was set to an operational state + of 'up'."; + } + /* Recovery attributes */ + leaf fault-clears { + type yang:counter32; + description + "Number of times that a link experienced a fault-clear + event."; + } + leaf fault-detects { + type yang:counter32; + description + "Number of times that a link experienced fault detection."; + } + leaf protection-switches { + type yang:counter32; + description + "Number of times that a link experienced protection + switchover."; + } + leaf protection-reverts { + type yang:counter32; + description + "Number of times that a link experienced protection + reversion."; + } + leaf restoration-failures { + type yang:counter32; + description + "Number of times that a link experienced restoration + failure."; + } + leaf restoration-starts { + type yang:counter32; + description + "Number of times that a link experienced restoration + start."; + } + leaf restoration-successes { + type yang:counter32; + description + "Number of times that a link experienced restoration + success."; + } + leaf restoration-reversion-failures { + type yang:counter32; + description + "Number of times that a link experienced restoration + reversion failure."; + } + leaf restoration-reversion-starts { + type yang:counter32; + description + "Number of times that a link experienced restoration + reversion start."; + } + leaf restoration-reversion-successes { + type yang:counter32; + description + "Number of times that a link experienced restoration + reversion success."; + } + } + // statistics-per-link + + grouping statistics-per-node { + description + "Statistics attributes per TE node."; + leaf discontinuity-time { + type yang:date-and-time; + description + "The time of the most recent occasion at which any one or + more of this interface's counters suffered a + discontinuity. If no such discontinuities have occurred + since the last re-initialization of the local management + subsystem, then this node contains the time the local + management subsystem re-initialized itself."; + } + container node { + description + "Contains statistics attributes at the TE node level."; + leaf disables { + type yang:counter32; + description + "Number of times that a node was disabled."; + } + leaf enables { + type yang:counter32; + description + "Number of times that a node was enabled."; + } + leaf maintenance-sets { + type yang:counter32; + description + "Number of times that a node was put in maintenance."; + } + leaf maintenance-clears { + type yang:counter32; + description + "Number of times that a node was taken out of + maintenance."; + } + leaf modifies { + type yang:counter32; + description + "Number of times that a node was modified."; + } + } + // node + container connectivity-matrix-entry { + description + "Contains statistics attributes at the level of a + connectivity matrix entry."; + leaf creates { + type yang:counter32; + description + "Number of times that a connectivity matrix entry was + created."; + reference + "RFC 6241: Network Configuration Protocol (NETCONF), + Section 7.2, 'create' operation"; + } + leaf deletes { + type yang:counter32; + description + "Number of times that a connectivity matrix entry was + deleted."; + reference + "RFC 6241: Network Configuration Protocol (NETCONF), + Section 7.2, 'delete' operation"; + } + leaf disables { + type yang:counter32; + description + "Number of times that a connectivity matrix entry was + disabled."; + } + leaf enables { + type yang:counter32; + description + "Number of times that a connectivity matrix entry was + enabled."; + } + leaf modifies { + type yang:counter32; + description + "Number of times that a connectivity matrix entry was + modified."; + } + } + // connectivity-matrix-entry + } + // statistics-per-node + + grouping statistics-per-ttp { + description + "Statistics attributes per TE TTP (Tunnel Termination Point)."; + leaf discontinuity-time { + type yang:date-and-time; + description + "The time of the most recent occasion at which any one or + more of this interface's counters suffered a + discontinuity. If no such discontinuities have occurred + since the last re-initialization of the local management + subsystem, then this node contains the time the local + management subsystem re-initialized itself."; + } + container tunnel-termination-point { + description + "Contains statistics attributes at the TE TTP level."; + /* Administrative attributes */ + leaf disables { + type yang:counter32; + description + "Number of times that a TTP was disabled."; + } + leaf enables { + type yang:counter32; + description + "Number of times that a TTP was enabled."; + } + leaf maintenance-clears { + type yang:counter32; + description + "Number of times that a TTP was taken out of maintenance."; + } + leaf maintenance-sets { + type yang:counter32; + description + "Number of times that a TTP was put in maintenance."; + } + leaf modifies { + type yang:counter32; + description + "Number of times that a TTP was modified."; + } + /* Operational attributes */ + leaf downs { + type yang:counter32; + description + "Number of times that a TTP was set to an operational state + of 'down'."; + } + leaf ups { + type yang:counter32; + description + "Number of times that a TTP was set to an operational state + of 'up'."; + } + leaf in-service-clears { + type yang:counter32; + description + "Number of times that a TTP was taken out of service + (TE tunnel was released)."; + } + leaf in-service-sets { + type yang:counter32; + description + "Number of times that a TTP was put in service by a TE + tunnel (TE tunnel was set up)."; + } + } + // tunnel-termination-point + container local-link-connectivity { + description + "Contains statistics attributes at the TE LLCL (Local Link + Connectivity List) level."; + leaf creates { + type yang:counter32; + description + "Number of times that an LLCL entry was created."; + reference + "RFC 6241: Network Configuration Protocol (NETCONF), + Section 7.2, 'create' operation"; + } + leaf deletes { + type yang:counter32; + description + "Number of times that an LLCL entry was deleted."; + reference + "RFC 6241: Network Configuration Protocol (NETCONF), + Section 7.2, 'delete' operation"; + } + leaf disables { + type yang:counter32; + description + "Number of times that an LLCL entry was disabled."; + } + leaf enables { + type yang:counter32; + description + "Number of times that an LLCL entry was enabled."; + } + leaf modifies { + type yang:counter32; + description + "Number of times that an LLCL entry was modified."; + } + } + // local-link-connectivity + } + // statistics-per-ttp + + grouping te-link-augment { + description + "Augmentation for a TE link."; + uses te-link-config; + uses te-link-state-derived; + container statistics { + config false; + description + "Statistics data."; + uses statistics-per-link; + } + } + // te-link-augment + + grouping te-link-config { + description + "TE link configuration grouping."; + choice bundle-stack-level { + description + "The TE link can be partitioned into bundled links or + component links."; + case bundle { + container bundled-links { + description + "A set of bundled links."; + reference + "RFC 4201: Link Bundling in MPLS Traffic + Engineering (TE)"; + list bundled-link { + key "sequence"; + description + "Specifies a bundled interface that is + further partitioned."; + leaf sequence { + type uint32; + description + "Identifies the sequence in the bundle."; + } + } + } + } + case component { + container component-links { + description + "A set of component links."; + list component-link { + key "sequence"; + description + "Specifies a component interface that is + sufficient to unambiguously identify the + appropriate resources."; + leaf sequence { + type uint32; + description + "Identifies the sequence in the bundle."; + } + leaf src-interface-ref { + type string; + description + "Reference to a component link interface on the + source node."; + } + leaf des-interface-ref { + type string; + description + "Reference to a component link interface on the + destination node."; + } + } + } + } + } + // bundle-stack-level + leaf-list te-link-template { + if-feature "template"; + type leafref { + path "../../../../te/templates/link-template/name"; + } + description + "The reference to a TE link template."; + } + uses te-link-config-attributes; + } + // te-link-config + + grouping te-link-config-attributes { + description + "Link configuration attributes in a TE topology."; + container te-link-attributes { + description + "Link attributes in a TE topology."; + leaf access-type { + type te-types:te-link-access-type; + description + "Link access type, which can be point-to-point or + multi-access."; + } + container external-domain { + description + "For an inter-domain link, specifies the attributes of + the remote end of the link, to facilitate the signaling at + the local end."; + uses nw:network-ref; + leaf remote-te-node-id { + type te-types:te-node-id; + description + "Remote TE node identifier, used together with + 'remote-te-link-tp-id' to identify the remote Link + Termination Point (LTP) in a different domain."; + } + leaf remote-te-link-tp-id { + type te-types:te-tp-id; + description + "Remote TE LTP identifier, used together with + 'remote-te-node-id' to identify the remote LTP in a + different domain."; + } + } + leaf is-abstract { + type empty; + description + "Present if the link is abstract."; + } + leaf name { + type string; + description + "Link name."; + } + container underlay { + if-feature "te-topology-hierarchy"; + description + "Attributes of the TE link underlay."; + reference + "RFC 4206: Label Switched Paths (LSP) Hierarchy with + Generalized Multi-Protocol Label Switching (GMPLS) + Traffic Engineering (TE)"; + uses te-link-underlay-attributes; + } + leaf admin-status { + type te-types:te-admin-status; + description + "The administrative state of the link."; + } + uses te-link-info-attributes; + } + // te-link-attributes + } + // te-link-config-attributes + + grouping te-link-info-attributes { + description + "Advertised TE information attributes."; + leaf link-index { + type uint64; + description + "The link identifier. If OSPF is used, this object + represents an ospfLsdbID. If IS-IS is used, this object + represents an isisLSPID. If a locally configured link is + used, this object represents a unique value, which is + locally defined in a router."; + } + leaf administrative-group { + type te-types:admin-groups; + description + "Administrative group or color of the link. + This attribute covers both administrative groups (defined + in RFCs 3630 and 5305) and Extended Administrative Groups + (defined in RFC 7308)."; + reference + "RFC 3630: Traffic Engineering (TE) Extensions to OSPF + Version 2 + RFC 5305: IS-IS Extensions for Traffic Engineering + RFC 7308: Extended Administrative Groups in MPLS Traffic + Engineering (MPLS-TE)"; + } + uses interface-switching-capability-list; + uses te-types:label-set-info; + leaf link-protection-type { + type identityref { + base te-types:link-protection-type; + } + description + "Link Protection Type desired for this link."; + reference + "RFC 4202: Routing Extensions in Support of + Generalized Multi-Protocol Label Switching (GMPLS)"; + } + container max-link-bandwidth { + uses te-types:te-bandwidth; + description + "Maximum bandwidth that can be seen on this link in this + direction. Units are in bytes per second."; + reference + "RFC 3630: Traffic Engineering (TE) Extensions to OSPF + Version 2 + RFC 5305: IS-IS Extensions for Traffic Engineering"; + } + container max-resv-link-bandwidth { + uses te-types:te-bandwidth; + description + "Maximum amount of bandwidth that can be reserved in this + direction in this link. Units are in bytes per second."; + reference + "RFC 3630: Traffic Engineering (TE) Extensions to OSPF + Version 2 + RFC 5305: IS-IS Extensions for Traffic Engineering"; + } + list unreserved-bandwidth { + key "priority"; + max-elements 8; + description + "Unreserved bandwidth for priority levels 0-7. Units are in + bytes per second."; + reference + "RFC 3630: Traffic Engineering (TE) Extensions to OSPF + Version 2 + RFC 5305: IS-IS Extensions for Traffic Engineering"; + leaf priority { + type uint8 { + range "0..7"; + } + description + "Priority."; + } + uses te-types:te-bandwidth; + } + leaf te-default-metric { + type uint32; + description + "Traffic Engineering metric."; + reference + "RFC 3630: Traffic Engineering (TE) Extensions to OSPF + Version 2 + RFC 5305: IS-IS Extensions for Traffic Engineering"; + } + leaf te-delay-metric { + type uint32; + description + "Traffic Engineering delay metric."; + reference + "RFC 7471: OSPF Traffic Engineering (TE) Metric Extensions"; + } + leaf te-igp-metric { + type uint32; + description + "IGP metric used for Traffic Engineering."; + reference + "RFC 3785: Use of Interior Gateway Protocol (IGP) Metric as a + second MPLS Traffic Engineering (TE) Metric"; + } + container te-srlgs { + description + "Contains a list of SRLGs."; + leaf-list value { + type te-types:srlg; + description + "SRLG value."; + reference + "RFC 4202: Routing Extensions in Support of + Generalized Multi-Protocol Label Switching (GMPLS)"; + } + } + container te-nsrlgs { + if-feature "nsrlg"; + description + "Contains a list of NSRLGs (Non-Shared Risk Link Groups). + When an abstract TE link is configured, this list specifies + the request that underlay TE paths need to be mutually + disjoint with other TE links in the same groups."; + leaf-list id { + type uint32; + description + "NSRLG ID, uniquely configured within a topology."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + } + } + } + // te-link-info-attributes + + grouping te-link-iscd-attributes { + description + "TE link ISCD attributes."; + reference + "RFC 4203: OSPF Extensions in Support of Generalized + Multi-Protocol Label Switching (GMPLS), Section 1.4"; + list max-lsp-bandwidth { + key "priority"; + max-elements 8; + description + "Maximum Label Switched Path (LSP) bandwidth at + priorities 0-7."; + leaf priority { + type uint8 { + range "0..7"; + } + description + "Priority."; + } + uses te-types:te-bandwidth; + } + } + // te-link-iscd-attributes + + grouping te-link-state-derived { + description + "Link state attributes in a TE topology."; + leaf oper-status { + type te-types:te-oper-status; + config false; + description + "The current operational state of the link."; + } + leaf is-transitional { + type empty; + config false; + description + "Present if the link is transitional; used as an + alternative approach in lieu of 'inter-layer-lock-id' + for path computation in a TE topology covering multiple + layers or multiple regions."; + reference + "RFC 5212: Requirements for GMPLS-Based Multi-Region and + Multi-Layer Networks (MRN/MLN) + RFC 6001: Generalized MPLS (GMPLS) Protocol Extensions + for Multi-Layer and Multi-Region Networks (MLN/MRN)"; + } + uses information-source-per-link-attributes; + list information-source-entry { + key "information-source information-source-instance"; + config false; + description + "A list of information sources learned, including the source + that is used."; + uses information-source-per-link-attributes; + uses te-link-info-attributes; + } + container recovery { + config false; + description + "Status of the recovery process."; + leaf restoration-status { + type te-types:te-recovery-status; + description + "Restoration status."; + } + leaf protection-status { + type te-types:te-recovery-status; + description + "Protection status."; + } + } + container underlay { + if-feature "te-topology-hierarchy"; + config false; + description + "State attributes for the TE link underlay."; + leaf dynamic { + type boolean; + description + "'true' if the underlay is dynamically created."; + } + leaf committed { + type boolean; + description + "'true' if the underlay is committed."; + } + } + } + // te-link-state-derived + + grouping te-link-underlay-attributes { + description + "Attributes for the TE link underlay."; + reference + "RFC 4206: Label Switched Paths (LSP) Hierarchy with + Generalized Multi-Protocol Label Switching (GMPLS) + Traffic Engineering (TE)"; + leaf enabled { + type boolean; + description + "'true' if the underlay is enabled. + 'false' if the underlay is disabled."; + } + container primary-path { + description + "The service path on the underlay topology that + supports this link."; + uses nw:network-ref; + list path-element { + key "path-element-id"; + description + "A list of path elements describing the service path."; + leaf path-element-id { + type uint32; + description + "To identify the element in a path."; + } + uses te-path-element; + } + } + // primary-path + list backup-path { + key "index"; + description + "A list of backup service paths on the underlay topology that + protect the underlay primary path. If the primary path is + not protected, the list contains zero elements. If the + primary path is protected, the list contains one or more + elements."; + leaf index { + type uint32; + description + "A sequence number to identify a backup path."; + } + uses nw:network-ref; + list path-element { + key "path-element-id"; + description + "A list of path elements describing the backup service + path."; + leaf path-element-id { + type uint32; + description + "To identify the element in a path."; + } + uses te-path-element; + } + } + // backup-path + leaf protection-type { + type identityref { + base te-types:lsp-protection-type; + } + description + "Underlay protection type desired for this link."; + } + container tunnel-termination-points { + description + "Underlay TTPs desired for this link."; + leaf source { + type binary; + description + "Source TTP identifier."; + } + leaf destination { + type binary; + description + "Destination TTP identifier."; + } + } + container tunnels { + description + "Underlay TE tunnels supporting this TE link."; + leaf sharing { + type boolean; + default "true"; + description + "'true' if the underlay tunnel can be shared with other + TE links; + 'false' if the underlay tunnel is dedicated to this + TE link. + This leaf is the default option for all TE tunnels + and may be overridden by the per-TE-tunnel value."; + } + list tunnel { + key "tunnel-name"; + description + "Zero, one, or more underlay TE tunnels that support this + TE link."; + leaf tunnel-name { + type string; + description + "A tunnel name uniquely identifies an underlay TE tunnel, + used together with the 'source-node' value for this + link."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels"; + } + leaf sharing { + type boolean; + description + "'true' if the underlay tunnel can be shared with other + TE links; + 'false' if the underlay tunnel is dedicated to this + TE link."; + } + } + // tunnel + } + // tunnels + } + // te-link-underlay-attributes + + grouping te-node-augment { + description + "Augmentation for a TE node."; + uses te-node-config; + uses te-node-state-derived; + container statistics { + config false; + description + "Statistics data."; + uses statistics-per-node; + } + list tunnel-termination-point { + key "tunnel-tp-id"; + description + "A termination point can terminate a tunnel."; + leaf tunnel-tp-id { + type binary; + description + "TTP identifier."; + } + uses te-node-tunnel-termination-point-config; + leaf oper-status { + type te-types:te-oper-status; + config false; + description + "The current operational state of the TTP."; + } + uses geolocation-container; + container statistics { + config false; + description + "Statistics data."; + uses statistics-per-ttp; + } + // Relationship to other TTPs + list supporting-tunnel-termination-point { + key "node-ref tunnel-tp-ref"; + description + "Identifies the TTPs on which this TTP depends."; + leaf node-ref { + type inet:uri; + description + "This leaf identifies the node in which the supporting + TTP is present. + This node is either the supporting node or a node in + an underlay topology."; + } + leaf tunnel-tp-ref { + type binary; + description + "Reference to a TTP that is in either the supporting node + or a node in an underlay topology."; + } + } + // supporting-tunnel-termination-point + } + // tunnel-termination-point + } + // te-node-augment + + grouping te-node-config { + description + "TE node configuration grouping."; + leaf-list te-node-template { + if-feature "template"; + type leafref { + path "../../../../te/templates/node-template/name"; + } + description + "The reference to a TE node template."; + } + uses te-node-config-attributes; + } + // te-node-config + + grouping te-node-config-attributes { + description + "Configuration node attributes in a TE topology."; + container te-node-attributes { + description + "Contains node attributes in a TE topology."; + leaf admin-status { + type te-types:te-admin-status; + description + "The administrative state of the link."; + } + uses te-node-connectivity-matrices; + uses te-node-info-attributes; + } + } + // te-node-config-attributes + + grouping te-node-config-attributes-template { + description + "Configuration node attributes for a template in a TE + topology."; + container te-node-attributes { + description + "Contains node attributes in a TE topology."; + leaf admin-status { + type te-types:te-admin-status; + description + "The administrative state of the link."; + } + uses te-node-info-attributes; + } + } + // te-node-config-attributes-template + + grouping te-node-connectivity-matrices { + description + "Connectivity matrix on a TE node."; + container connectivity-matrices { + description + "Contains a connectivity matrix on a TE node."; + leaf number-of-entries { + type uint16; + description + "The number of connectivity matrix entries. + If this number is specified in the configuration request, + the number is the requested number of entries, which may + not all be listed in the list; + if this number is reported in the state data, + the number is the current number of operational entries."; + } + uses te-types:label-set-info; + uses connectivity-matrix-entry-path-attributes; + list connectivity-matrix { + key "id"; + description + "Represents a node's switching limitations, i.e., + limitations in the interconnecting network TE links + across the node."; + reference + "RFC 7579: General Network Element Constraint Encoding + for GMPLS-Controlled Networks"; + leaf id { + type uint32; + description + "Identifies the connectivity matrix entry."; + } + } + // connectivity-matrix + } + // connectivity-matrices + } + // te-node-connectivity-matrices + + grouping te-node-connectivity-matrix-attributes { + description + "Termination point references of a connectivity matrix entry."; + container from { + description + "Reference to a source LTP."; + leaf tp-ref { + type leafref { + path "../../../../../../nt:termination-point/nt:tp-id"; + } + description + "Relative reference to a termination point."; + } + uses te-types:label-set-info; + } + container to { + description + "Reference to a destination LTP."; + leaf tp-ref { + type leafref { + path "../../../../../../nt:termination-point/nt:tp-id"; + } + description + "Relative reference to a termination point."; + } + uses te-types:label-set-info; + } + uses connectivity-matrix-entry-path-attributes; + } + // te-node-connectivity-matrix-attributes + + grouping te-node-info-attributes { + description + "Advertised TE information attributes."; + leaf domain-id { + type uint32; + description + "Identifies the domain to which this node belongs. + This attribute is used to support inter-domain links."; + reference + "RFC 5152: A Per-Domain Path Computation Method for + Establishing Inter-Domain Traffic Engineering (TE) + Label Switched Paths (LSPs) + RFC 5316: ISIS Extensions in Support of Inter-Autonomous + System (AS) MPLS and GMPLS Traffic Engineering + RFC 5392: OSPF Extensions in Support of Inter-Autonomous + System (AS) MPLS and GMPLS Traffic Engineering"; + } + leaf is-abstract { + type empty; + description + "Present if the node is abstract; not present if the node + is actual."; + } + leaf name { + type string; + description + "Node name."; + } + leaf-list signaling-address { + type inet:ip-address; + description + "The node's signaling address."; + } + container underlay-topology { + if-feature "te-topology-hierarchy"; + description + "When an abstract node encapsulates a topology, the + attributes in this container point to said topology."; + uses nw:network-ref; + } + } + // te-node-info-attributes + + grouping te-node-state-derived { + description + "Node state attributes in a TE topology."; + leaf oper-status { + type te-types:te-oper-status; + config false; + description + "The current operational state of the node."; + } + uses geolocation-container; + leaf is-multi-access-dr { + type empty; + config false; + description + "The presence of this attribute indicates that this TE node + is a pseudonode elected as a designated router."; + reference + "RFC 1195: Use of OSI IS-IS for Routing in TCP/IP and Dual + Environments + RFC 3630: Traffic Engineering (TE) Extensions to OSPF + Version 2"; + } + uses information-source-per-node-attributes; + list information-source-entry { + key "information-source information-source-instance"; + config false; + description + "A list of information sources learned, including the source + that is used."; + uses information-source-per-node-attributes; + uses te-node-connectivity-matrices; + uses te-node-info-attributes; + } + } + // te-node-state-derived + + grouping te-node-tunnel-termination-point-config { + description + "Termination capability of a TTP on a TE node."; + uses te-node-tunnel-termination-point-config-attributes; + container local-link-connectivities { + description + "Contains an LLCL for a TTP on a TE node."; + leaf number-of-entries { + type uint16; + description + "The number of LLCL entries. + If this number is specified in the configuration request, + the number is the requested number of entries, which may + not all be listed in the list; + if this number is reported in the state data, + the number is the current number of operational entries."; + } + uses te-types:label-set-info; + uses connectivity-matrix-entry-path-attributes; + } + } + // te-node-tunnel-termination-point-config + + grouping te-node-tunnel-termination-point-config-attributes { + description + "Configuration attributes of a TTP on a TE node."; + leaf admin-status { + type te-types:te-admin-status; + description + "The administrative state of the TTP."; + } + leaf name { + type string; + description + "A descriptive name for the TTP."; + } + leaf switching-capability { + type identityref { + base te-types:switching-capabilities; + } + description + "Switching capability for this interface."; + } + leaf encoding { + type identityref { + base te-types:lsp-encoding-types; + } + description + "Encoding supported by this interface."; + } + leaf-list inter-layer-lock-id { + type uint32; + description + "Inter-layer lock ID, used for path computation in a TE + topology covering multiple layers or multiple regions."; + reference + "RFC 5212: Requirements for GMPLS-Based Multi-Region and + Multi-Layer Networks (MRN/MLN) + RFC 6001: Generalized MPLS (GMPLS) Protocol Extensions + for Multi-Layer and Multi-Region Networks (MLN/MRN)"; + } + leaf protection-type { + type identityref { + base te-types:lsp-protection-type; + } + description + "The protection type that this TTP is capable of."; + } + container client-layer-adaptation { + description + "Contains capability information to support a client-layer + adaptation in a multi-layer topology."; + list switching-capability { + key "switching-capability encoding"; + description + "List of supported switching capabilities."; + reference + "RFC 4202: Routing Extensions in Support of + Generalized Multi-Protocol Label Switching (GMPLS) + RFC 6001: Generalized MPLS (GMPLS) Protocol Extensions + for Multi-Layer and Multi-Region Networks (MLN/MRN)"; + leaf switching-capability { + type identityref { + base te-types:switching-capabilities; + } + description + "Switching capability for the client-layer adaptation."; + } + leaf encoding { + type identityref { + base te-types:lsp-encoding-types; + } + description + "Encoding supported by the client-layer adaptation."; + } + uses te-types:te-bandwidth; + } + } + } + // te-node-tunnel-termination-point-config-attributes + + grouping te-node-tunnel-termination-point-llc-list { + description + "LLCL of a TTP on a TE node."; + list local-link-connectivity { + key "link-tp-ref"; + description + "The termination capabilities between the TTP and the LTP. + This capability information can be used to compute + the tunnel path. + The Interface Adjustment Capability Descriptors (IACDs) + (defined in RFC 6001) on each LTP can be derived from + this list."; + reference + "RFC 6001: Generalized MPLS (GMPLS) Protocol Extensions + for Multi-Layer and Multi-Region Networks (MLN/MRN)"; + leaf link-tp-ref { + type leafref { + path "../../../../../nt:termination-point/nt:tp-id"; + } + description + "LTP."; + } + uses te-types:label-set-info; + uses connectivity-matrix-entry-path-attributes; + } + } + // te-node-tunnel-termination-point-llc-list + + grouping te-path-element { + description + "A group of attributes defining an element in a TE path, + such as a TE node, TE link, TE atomic resource, or label."; + uses te-types:explicit-route-hop; + } + // te-path-element + + grouping te-termination-point-augment { + description + "Augmentation for a TE termination point."; + leaf te-tp-id { + type te-types:te-tp-id; + description + "An identifier that uniquely identifies a TE termination + point."; + } + container te { + must '../te-tp-id'; + presence "TE support"; + description + "Indicates TE support."; + uses te-termination-point-config; + leaf oper-status { + type te-types:te-oper-status; + config false; + description + "The current operational state of the LTP."; + } + uses geolocation-container; + } + } + // te-termination-point-augment + + grouping te-termination-point-config { + description + "TE termination point configuration grouping."; + leaf admin-status { + type te-types:te-admin-status; + description + "The administrative state of the LTP."; + } + leaf name { + type string; + description + "A descriptive name for the LTP."; + } + uses interface-switching-capability-list; + leaf inter-domain-plug-id { + type binary; + description + "A network-wide unique number that identifies on the + network a connection that supports a given inter-domain + TE link. This is a more flexible alternative to specifying + 'remote-te-node-id' and 'remote-te-link-tp-id' on a TE link + when the provider either does not know 'remote-te-node-id' + and 'remote-te-link-tp-id' or needs to give the client the + flexibility to mix and match multiple topologies."; + } + leaf-list inter-layer-lock-id { + type uint32; + description + "Inter-layer lock ID, used for path computation in a TE + topology covering multiple layers or multiple regions."; + reference + "RFC 5212: Requirements for GMPLS-Based Multi-Region and + Multi-Layer Networks (MRN/MLN) + RFC 6001: Generalized MPLS (GMPLS) Protocol Extensions + for Multi-Layer and Multi-Region Networks (MLN/MRN)"; + } + } + // te-termination-point-config + + grouping te-topologies-augment { + description + "Augmentation for TE topologies."; + container te { + presence "TE support"; + description + "Indicates TE support."; + container templates { + description + "Configuration parameters for templates used for a TE + topology."; + list node-template { + if-feature "template"; + key "name"; + leaf name { + type te-types:te-template-name; + description + "The name to identify a TE node template."; + } + description + "The list of TE node templates used to define sharable + and reusable TE node attributes."; + uses template-attributes; + uses te-node-config-attributes-template; + } + // node-template + list link-template { + if-feature "template"; + key "name"; + leaf name { + type te-types:te-template-name; + description + "The name to identify a TE link template."; + } + description + "The list of TE link templates used to define sharable + and reusable TE link attributes."; + uses template-attributes; + uses te-link-config-attributes; + } + // link-template + } + // templates + } + // te + } + // te-topologies-augment + + grouping te-topology-augment { + description + "Augmentation for a TE topology."; + uses te-types:te-topology-identifier; + container te { + must '../te-topology-identifier/provider-id' + + ' and ../te-topology-identifier/client-id' + + ' and ../te-topology-identifier/topology-id'; + presence "TE support"; + description + "Indicates TE support."; + uses te-topology-config; + uses geolocation-container; + } + } + // te-topology-augment + + grouping te-topology-config { + description + "TE topology configuration grouping."; + leaf name { + type string; + description + "Name of the TE topology. This attribute is optional and can + be specified by the operator to describe the TE topology, + which can be useful when 'network-id' (RFC 8345) is not + descriptive and not modifiable because of being generated + by the system."; + reference + "RFC 8345: A YANG Data Model for Network Topologies"; + } + leaf preference { + type uint8 { + range "1..255"; + } + description + "Specifies a preference for this topology. A lower number + indicates a higher preference."; + } + leaf optimization-criterion { + type identityref { + base te-types:objective-function-type; + } + description + "Optimization criterion applied to this topology."; + reference + "RFC 3272: Overview and Principles of Internet Traffic + Engineering"; + } + list nsrlg { + if-feature "nsrlg"; + key "id"; + description + "List of NSRLGs (Non-Shared Risk Link Groups)."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + leaf id { + type uint32; + description + "Identifies the NSRLG entry."; + } + leaf disjointness { + type te-types:te-path-disjointness; + description + "The type of resource disjointness."; + } + } + // nsrlg + } + // te-topology-config + + grouping template-attributes { + description + "Common attributes for all templates."; + leaf priority { + type uint16; + description + "The preference value for resolving conflicts between + different templates. When two or more templates specify + values for one configuration attribute, the value from the + template with the highest priority is used. + A lower number indicates a higher priority. The highest + priority is 0."; + } + leaf reference-change-policy { + type enumeration { + enum no-action { + description + "When an attribute changes in this template, the + configuration node referring to this template does + not take any action."; + } + enum not-allowed { + description + "When any configuration object has a reference to this + template, changing this template is not allowed."; + } + enum cascade { + description + "When an attribute changes in this template, the + configuration object referring to this template applies + the new attribute value to the corresponding + configuration."; + } + } + description + "This attribute specifies the action taken for a + configuration node that has a reference to this template."; + } + } + // template-attributes + + /* + * Data nodes + */ + + augment "/nw:networks/nw:network/nw:network-types" { + description + "Introduces a new network type for a TE topology."; + container te-topology { + presence "Indicates a TE topology"; + description + "Its presence identifies the TE topology type."; + } + } + + augment "/nw:networks" { + description + "Augmentation parameters for TE topologies."; + uses te-topologies-augment; + } + + augment "/nw:networks/nw:network" { + when 'nw:network-types/tet:te-topology' { + description + "Augmentation parameters apply only for networks with a + TE topology type."; + } + description + "Configuration parameters for a TE topology."; + uses te-topology-augment; + } + + augment "/nw:networks/nw:network/nw:node" { + when '../nw:network-types/tet:te-topology' { + description + "Augmentation parameters apply only for networks with a + TE topology type."; + } + description + "Configuration parameters for TE at the node level."; + leaf te-node-id { + type te-types:te-node-id; + description + "The identifier of a node in the TE topology. + A node is specific to a topology to which it belongs."; + } + container te { + must '../te-node-id' { + description + "'te-node-id' is mandatory."; + } + must 'count(../nw:supporting-node)<=1' { + description + "For a node in a TE topology, there cannot be more + than one supporting node. If multiple nodes are + abstracted, the 'underlay-topology' field is used."; + } + presence "TE support"; + description + "Indicates TE support."; + uses te-node-augment; + } + } + + augment "/nw:networks/nw:network/nt:link" { + when '../nw:network-types/tet:te-topology' { + description + "Augmentation parameters apply only for networks with a + TE topology type."; + } + description + "Configuration parameters for TE at the link level."; + container te { + must 'count(../nt:supporting-link)<=1' { + description + "For a link in a TE topology, there cannot be more + than one supporting link. If one or more link paths are + abstracted, the underlay is used."; + } + presence "TE support"; + description + "Indicates TE support."; + uses te-link-augment; + } + } + + augment "/nw:networks/nw:network/nw:node/" + + "nt:termination-point" { + when '../../nw:network-types/tet:te-topology' { + description + "Augmentation parameters apply only for networks with a + TE topology type."; + } + description + "Configuration parameters for TE at the termination point + level."; + uses te-termination-point-augment; + } + + augment "/nw:networks/nw:network/nt:link/te/bundle-stack-level/" + + "bundle/bundled-links/bundled-link" { + when '../../../../nw:network-types/tet:te-topology' { + description + "Augmentation parameters apply only for networks with a + TE topology type."; + } + description + "Augmentation for a TE bundled link."; + leaf src-tp-ref { + type leafref { + path "../../../../../nw:node[nw:node-id = " + + "current()/../../../../nt:source/" + + "nt:source-node]/" + + "nt:termination-point/nt:tp-id"; + require-instance true; + } + description + "Reference to another TE termination point on the + same source node."; + } + leaf des-tp-ref { + type leafref { + path "../../../../../nw:node[nw:node-id = " + + "current()/../../../../nt:destination/" + + "nt:dest-node]/" + + "nt:termination-point/nt:tp-id"; + require-instance true; + } + description + "Reference to another TE termination point on the + same destination node."; + } + } + + augment "/nw:networks/nw:network/nw:node/te/" + + "information-source-entry/connectivity-matrices/" + + "connectivity-matrix" { + when '../../../../../nw:network-types/tet:te-topology' { + description + "Augmentation parameters apply only for networks with a + TE topology type."; + } + description + "Augmentation for the TE node connectivity matrix."; + uses te-node-connectivity-matrix-attributes; + } + + augment "/nw:networks/nw:network/nw:node/te/te-node-attributes/" + + "connectivity-matrices/connectivity-matrix" { + when '../../../../../nw:network-types/tet:te-topology' { + description + "Augmentation parameters apply only for networks with a + TE topology type."; + } + description + "Augmentation for the TE node connectivity matrix."; + uses te-node-connectivity-matrix-attributes; + } + + augment "/nw:networks/nw:network/nw:node/te/" + + "tunnel-termination-point/local-link-connectivities" { + when '../../../../nw:network-types/tet:te-topology' { + description + "Augmentation parameters apply only for networks with a + TE topology type."; + } + description + "Augmentation for TE node TTP LLCs (Local Link + Connectivities)."; + uses te-node-tunnel-termination-point-llc-list; + } +} \ No newline at end of file diff --git a/src/tests/tools/mock_nce_t_ctrl/yang/yang-repo-url.txt b/src/tests/tools/mock_nce_t_ctrl/yang/yang-repo-url.txt new file mode 100644 index 000000000..df60dab3b --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/yang/yang-repo-url.txt @@ -0,0 +1 @@ +https://github.com/YangModels/yang -- GitLab From 62916357a3202e7ae7e53fa348d25bdb818a0e38 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 29 Aug 2025 19:03:26 +0000 Subject: [PATCH 048/367] ECOC F5GA Telemetry Demo: - Updated deploy scripts to use new NCE-T libyang-based mock - Updated descriptors to use named UUIDs instead of IP-based - Corrected nodes and links - Added missing attributes --- .../data/old-numbered/topology-agg.json | 145 ++++++++++++++++++ .../data/old-numbered/topology-e2e.json | 118 ++++++++++++++ .../data/old-numbered/topology-ip.json | 129 ++++++++++++++++ .../data/topology-agg.json | 144 ++++++----------- .../data/topology-e2e.json | 103 +------------ .../data/topology-ip.json | 95 ++++-------- src/tests/ecoc25-f5ga-telemetry/redeploy.sh | 5 +- 7 files changed, 479 insertions(+), 260 deletions(-) create mode 100644 src/tests/ecoc25-f5ga-telemetry/data/old-numbered/topology-agg.json create mode 100644 src/tests/ecoc25-f5ga-telemetry/data/old-numbered/topology-e2e.json create mode 100644 src/tests/ecoc25-f5ga-telemetry/data/old-numbered/topology-ip.json diff --git a/src/tests/ecoc25-f5ga-telemetry/data/old-numbered/topology-agg.json b/src/tests/ecoc25-f5ga-telemetry/data/old-numbered/topology-agg.json new file mode 100644 index 000000000..5d23fdd5c --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/data/old-numbered/topology-agg.json @@ -0,0 +1,145 @@ +{ + "contexts": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}} + ], + "topologies": [ + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}} + ], + "devices": [ + {"device_id": {"device_uuid": {"uuid": "TFS-IP"}}, "name": "TFS-IP", "device_type": "teraflowsdn", + "device_drivers": ["DEVICEDRIVER_IETF_L3VPN"], "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.0.12"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "80" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { + "scheme": "http", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", + "timeout": 120, "verify": false + }}} + ]}}, + {"device_id": {"device_uuid": {"uuid": "NCE-T"}}, "name": "NCE-T", "device_type": "ip-sdn-controller", + "device_drivers": ["DEVICEDRIVER_IETF_ACTN"], "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.0.9"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8444" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { + "scheme": "https", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", + "timeout": 120, "verify": false + }}} + ]}}, + {"device_id": {"device_uuid": {"uuid": "172.16.58.10"}}, "name": "172.16.58.10", "device_type": "emu-packet-router", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "lo", "name": "lo", "type": "loopback"}, + {"uuid": "500", "name": "500", "type": "copper"}, + {"uuid": "501", "name": "501", "type": "copper"} + ]}}} + ]}}, + {"device_id": {"device_uuid": {"uuid": "172.16.204.221"}}, "name": "172.16.204.221", "device_type": "emu-packet-router", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "200", "name": "200", "type": "copper"}, + {"uuid": "500", "name": "500", "type": "copper"}, + {"uuid": "501", "name": "501", "type": "copper"} + ]}}} + ]}}, + {"device_id": {"device_uuid": {"uuid": "172.1.101.22"}}, "name": "172.1.101.22", "device_type": "emu-datacenter", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "lo", "name": "lo", "type": "loopback"}, + {"uuid": "500", "name": "500", "type": "copper"} + ]}}} + ]} + }, + {"device_id": {"device_uuid": {"uuid": "172.16.204.220"}}, "name": "172.16.204.220", "device_type": "emu-packet-router", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "200", "name": "200", "type": "copper"}, + {"uuid": "500", "name": "500", "type": "copper"}, + {"uuid": "501", "name": "501", "type": "copper"} + ]}}} + ]}}, + {"device_id": {"device_uuid": {"uuid": "172.1.201.22"}}, "name": "172.1.201.22", "device_type": "emu-datacenter", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "lo", "name": "lo", "type": "loopback"}, + {"uuid": "500", "name": "500", "type": "copper"} + ]}}} + ]} + } + ], + "links": [ + {"link_id": {"link_uuid": {"uuid": "172.16.58.10-501"}}, "name": "172.16.58.10-501", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.58.10"}}, "endpoint_uuid": {"uuid": "501"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.125.25"}}, "endpoint_uuid": {"uuid": "200"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "172.16.125.25-200"}}, "name": "172.16.125.25-200", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.125.25"}}, "endpoint_uuid": {"uuid": "200"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.58.10"}}, "endpoint_uuid": {"uuid": "501"}} + ]}, + + {"link_id": {"link_uuid": {"uuid": "172.16.58.10-500"}}, "name": "172.16.58.10-500", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.58.10"}}, "endpoint_uuid": {"uuid": "500"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.182.25"}}, "endpoint_uuid": {"uuid": "200"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "172.16.182.25-200"}}, "name": "172.16.182.25-200", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.182.25"}}, "endpoint_uuid": {"uuid": "200"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.58.10"}}, "endpoint_uuid": {"uuid": "500"}} + ]}, + + {"link_id": {"link_uuid": {"uuid": "172.16.125.32-200"}}, "name": "172.16.125.32-200", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.125.32"}}, "endpoint_uuid": {"uuid": "200"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.204.221"}}, "endpoint_uuid": {"uuid": "500"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "172.16.204.221-500"}}, "name": "172.16.204.221-500", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.204.221"}}, "endpoint_uuid": {"uuid": "500"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.125.32"}}, "endpoint_uuid": {"uuid": "200"}} + ]}, + + {"link_id": {"link_uuid": {"uuid": "172.16.204.220-500"}}, "name": "172.16.204.220-500", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.204.220"}}, "endpoint_uuid": {"uuid": "500"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.185.32"}}, "endpoint_uuid": {"uuid": "200"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "172.16.185.32-200"}}, "name": "172.16.185.32-200", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.185.32"}}, "endpoint_uuid": {"uuid": "200"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.204.220"}}, "endpoint_uuid": {"uuid": "500"}} + ]}, + + {"link_id": {"link_uuid": {"uuid": "172.16.204.221-200"}}, "name": "172.16.204.221-200", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.204.221"}}, "endpoint_uuid": {"uuid": "200"}}, + {"device_id": {"device_uuid": {"uuid": "172.1.101.22"}}, "endpoint_uuid": {"uuid": "500"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "172.1.101.22-500"}}, "name": "172.1.101.22-500", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.1.101.22"}}, "endpoint_uuid": {"uuid": "500"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.204.221"}}, "endpoint_uuid": {"uuid": "200"}} + ]}, + + {"link_id": {"link_uuid": {"uuid": "172.16.204.220-200"}}, "name": "172.16.204.220-200", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.204.220"}}, "endpoint_uuid": {"uuid": "200"}}, + {"device_id": {"device_uuid": {"uuid": "172.1.201.22"}}, "endpoint_uuid": {"uuid": "500"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "172.1.201.22-500"}}, "name": "172.1.201.22-500", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.1.201.22"}}, "endpoint_uuid": {"uuid": "500"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.204.220"}}, "endpoint_uuid": {"uuid": "200"}} + ]} + ] +} diff --git a/src/tests/ecoc25-f5ga-telemetry/data/old-numbered/topology-e2e.json b/src/tests/ecoc25-f5ga-telemetry/data/old-numbered/topology-e2e.json new file mode 100644 index 000000000..d8634caf0 --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/data/old-numbered/topology-e2e.json @@ -0,0 +1,118 @@ +{ + "contexts": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}} + ], + "topologies": [ + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}} + ], + "devices": [ + {"device_id": {"device_uuid": {"uuid": "TFS-AGG"}}, "name": "TFS-AGG", "device_type": "teraflowsdn", + "device_drivers": ["DEVICEDRIVER_IETF_L3VPN"], "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.0.11"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "80" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { + "scheme": "http", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", + "timeout": 120, "verify": false + }}} + ]}}, + {"device_id": {"device_uuid": {"uuid": "NCE-FAN"}}, "name": "NCE-FAN", "device_type": "ip-sdn-controller", + "device_drivers": ["DEVICEDRIVER_IETF_ACTN"], "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.0.9"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8443" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { + "scheme": "https", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", + "timeout": 120, "verify": false + }}} + ]}}, + + {"device_id": {"device_uuid": {"uuid": "172.16.204.220"}}, "device_type": "emu-datacenter", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "lo", "name": "lo", "type": "loopback"}, + {"uuid": "500a", "name": "500a", "type": "copper"}, + {"uuid": "500b", "name": "500b", "type": "copper"} + ]}}} + ]} + }, + {"device_id": {"device_uuid": {"uuid": "172.16.204.220"}}, "device_type": "emu-datacenter", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "lo", "name": "lo", "type": "loopback"}, + {"uuid": "500a", "name": "500a", "type": "copper"}, + {"uuid": "500b", "name": "500b", "type": "copper"} + ]}}} + ]} + }, + {"device_id": {"device_uuid": {"uuid": "172.16.204.220"}}, "device_type": "emu-datacenter", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "lo", "name": "lo", "type": "loopback"}, + {"uuid": "500a", "name": "500a", "type": "copper"}, + {"uuid": "500b", "name": "500b", "type": "copper"} + ]}}} + ]} + }, + {"device_id": {"device_uuid": {"uuid": "172.16.204.220"}}, "device_type": "emu-datacenter", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "lo", "name": "lo", "type": "loopback"}, + {"uuid": "500a", "name": "500a", "type": "copper"}, + {"uuid": "500b", "name": "500b", "type": "copper"} + ]}}} + ]} + } + ], + "links": [ + {"link_id": {"link_uuid": {"uuid": "172.16.58.10-501"}}, "name": "172.16.58.10-501", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.58.10"}}, "endpoint_uuid": {"uuid": "501"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.125.25"}}, "endpoint_uuid": {"uuid": "200"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "172.16.125.25-200"}}, "name": "172.16.125.25-200", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.125.25"}}, "endpoint_uuid": {"uuid": "200"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.58.10"}}, "endpoint_uuid": {"uuid": "501"}} + ]}, + + {"link_id": {"link_uuid": {"uuid": "172.16.58.10-500"}}, "name": "172.16.58.10-500", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.58.10"}}, "endpoint_uuid": {"uuid": "500"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.182.25"}}, "endpoint_uuid": {"uuid": "200"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "172.16.182.25-200"}}, "name": "172.16.182.25-200", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.182.25"}}, "endpoint_uuid": {"uuid": "200"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.58.10"}}, "endpoint_uuid": {"uuid": "500"}} + ]}, + + {"link_id": {"link_uuid": {"uuid": "172.16.125.32-200"}}, "name": "172.16.125.32-200", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.125.32"}}, "endpoint_uuid": {"uuid": "200"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.204.22x"}}, "endpoint_uuid": {"uuid": "500a"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "172.16.204.22x-500a"}}, "name": "172.16.204.22x-500a", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.204.22x"}}, "endpoint_uuid": {"uuid": "500a"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.125.32"}}, "endpoint_uuid": {"uuid": "200"}} + ]}, + + {"link_id": {"link_uuid": {"uuid": "172.16.204.22x-500b"}}, "name": "172.16.204.22x-500b", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.204.22x"}}, "endpoint_uuid": {"uuid": "500b"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.185.32"}}, "endpoint_uuid": {"uuid": "200"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "172.16.185.32-200"}}, "name": "172.16.185.32-200", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.185.32"}}, "endpoint_uuid": {"uuid": "200"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.204.22x"}}, "endpoint_uuid": {"uuid": "500b"}} + ]} + ] +} diff --git a/src/tests/ecoc25-f5ga-telemetry/data/old-numbered/topology-ip.json b/src/tests/ecoc25-f5ga-telemetry/data/old-numbered/topology-ip.json new file mode 100644 index 000000000..f4258d52a --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/data/old-numbered/topology-ip.json @@ -0,0 +1,129 @@ +{ + "contexts": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}} + ], + "topologies": [ + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}} + ], + "devices": [ + { + "device_id": {"device_uuid": {"uuid": "172.16.125.25"}}, "name": "172.16.125.25", "device_type": "emu-packet-router", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "200", "name": "200", "type": "copper"}, + {"uuid": "500", "name": "500", "type": "copper"}, + {"uuid": "501", "name": "501", "type": "copper"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "172.16.125.31"}}, "name": "172.16.125.31", "device_type": "emu-packet-router", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "500", "name": "500", "type": "copper"}, + {"uuid": "501", "name": "501", "type": "copper"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "172.16.125.33"}}, "name": "172.16.125.33", "device_type": "emu-packet-router", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "500", "name": "500", "type": "copper"}, + {"uuid": "501", "name": "501", "type": "copper"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "172.16.125.32"}}, "name": "172.16.125.32", "device_type": "emu-packet-router", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "200", "name": "200", "type": "copper"}, + {"uuid": "500", "name": "500", "type": "copper"}, + {"uuid": "501", "name": "501", "type": "copper"} + ]}}} + ]} + } + ], + "links": [ + { + "link_id": {"link_uuid": {"uuid": "172.16.125.25-500"}}, "name": "172.16.125.25-500", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.125.25"}}, "endpoint_uuid": {"uuid": "500"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.125.31"}}, "endpoint_uuid": {"uuid": "500"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "172.16.125.31-500"}}, "name": "172.16.125.31-500", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.125.31"}}, "endpoint_uuid": {"uuid": "500"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.125.25"}}, "endpoint_uuid": {"uuid": "500"}} + ] + }, + + { + "link_id": {"link_uuid": {"uuid": "172.16.125.25-501"}}, "name": "172.16.125.25-501", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.125.25"}}, "endpoint_uuid": {"uuid": "501"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.125.33"}}, "endpoint_uuid": {"uuid": "500"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "172.16.125.33-500"}}, "name": "172.16.125.33-500", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.125.33"}}, "endpoint_uuid": {"uuid": "500"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.125.25"}}, "endpoint_uuid": {"uuid": "501"}} + ] + }, + + { + "link_id": {"link_uuid": {"uuid": "172.16.125.31-501"}}, "name": "172.16.125.31-501", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.125.31"}}, "endpoint_uuid": {"uuid": "501"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.125.32"}}, "endpoint_uuid": {"uuid": "500"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "172.16.125.32-500"}}, "name": "172.16.125.32-500", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.125.32"}}, "endpoint_uuid": {"uuid": "500"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.125.31"}}, "endpoint_uuid": {"uuid": "501"}} + ] + }, + + { + "link_id": {"link_uuid": {"uuid": "172.16.125.32-501"}}, "name": "172.16.125.32-501", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.125.32"}}, "endpoint_uuid": {"uuid": "501"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.125.33"}}, "endpoint_uuid": {"uuid": "501"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "172.16.125.33-501"}}, "name": "172.16.125.33-501", + "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "172.16.125.33"}}, "endpoint_uuid": {"uuid": "501"}}, + {"device_id": {"device_uuid": {"uuid": "172.16.125.32"}}, "endpoint_uuid": {"uuid": "501"}} + ] + } + ] +} diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json b/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json index 5d23fdd5c..4cc2c04a9 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json @@ -6,140 +6,82 @@ {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}} ], "devices": [ - {"device_id": {"device_uuid": {"uuid": "TFS-IP"}}, "name": "TFS-IP", "device_type": "teraflowsdn", - "device_drivers": ["DEVICEDRIVER_IETF_L3VPN"], "device_config": {"config_rules": [ + {"device_id": {"device_uuid": {"uuid": "TFS-IP"}}, "device_type": "teraflowsdn", + "device_drivers": ["DEVICEDRIVER_IETF_L3VPN"], + "device_config": {"config_rules": [ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.0.12"}}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "80" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "80"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { "scheme": "http", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", "timeout": 120, "verify": false }}} ]}}, - {"device_id": {"device_uuid": {"uuid": "NCE-T"}}, "name": "NCE-T", "device_type": "ip-sdn-controller", - "device_drivers": ["DEVICEDRIVER_IETF_ACTN"], "device_config": {"config_rules": [ + {"device_id": {"device_uuid": {"uuid": "NCE-T"}}, "device_type": "ip-sdn-controller", + "device_drivers": ["DEVICEDRIVER_IETF_ACTN"], + "device_config": {"config_rules": [ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.0.9"}}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8444" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8444"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { "scheme": "https", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", - "timeout": 120, "verify": false + "timeout": 120, "verify": false, "import_topology": "topology" }}} ]}}, - {"device_id": {"device_uuid": {"uuid": "172.16.58.10"}}, "name": "172.16.58.10", "device_type": "emu-packet-router", - "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"device_id": {"device_uuid": {"uuid": "OLT"}}, "device_type": "emu-packet-router", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.16.58.10"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ {"uuid": "lo", "name": "lo", "type": "loopback"}, {"uuid": "500", "name": "500", "type": "copper"}, {"uuid": "501", "name": "501", "type": "copper"} ]}}} ]}}, - {"device_id": {"device_uuid": {"uuid": "172.16.204.221"}}, "name": "172.16.204.221", "device_type": "emu-packet-router", - "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"device_id": {"device_uuid": {"uuid": "POP1"}}, "device_type": "emu-packet-router", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.16.204.220"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "lo", "name": "lo", "type": "loopback"}, {"uuid": "200", "name": "200", "type": "copper"}, {"uuid": "500", "name": "500", "type": "copper"}, {"uuid": "501", "name": "501", "type": "copper"} ]}}} ]}}, - {"device_id": {"device_uuid": {"uuid": "172.1.101.22"}}, "name": "172.1.101.22", "device_type": "emu-datacenter", - "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ - {"uuid": "lo", "name": "lo", "type": "loopback"}, - {"uuid": "500", "name": "500", "type": "copper"} - ]}}} - ]} - }, - {"device_id": {"device_uuid": {"uuid": "172.16.204.220"}}, "name": "172.16.204.220", "device_type": "emu-packet-router", - "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"device_id": {"device_uuid": {"uuid": "POP2"}}, "device_type": "emu-packet-router", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.16.204.221"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "lo", "name": "lo", "type": "loopback"}, {"uuid": "200", "name": "200", "type": "copper"}, {"uuid": "500", "name": "500", "type": "copper"}, {"uuid": "501", "name": "501", "type": "copper"} ]}}} - ]}}, - {"device_id": {"device_uuid": {"uuid": "172.1.201.22"}}, "name": "172.1.201.22", "device_type": "emu-datacenter", - "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ - {"uuid": "lo", "name": "lo", "type": "loopback"}, - {"uuid": "500", "name": "500", "type": "copper"} - ]}}} - ]} - } + ]}} ], "links": [ - {"link_id": {"link_uuid": {"uuid": "172.16.58.10-501"}}, "name": "172.16.58.10-501", - "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "172.16.58.10"}}, "endpoint_uuid": {"uuid": "501"}}, - {"device_id": {"device_uuid": {"uuid": "172.16.125.25"}}, "endpoint_uuid": {"uuid": "200"}} - ]}, - {"link_id": {"link_uuid": {"uuid": "172.16.125.25-200"}}, "name": "172.16.125.25-200", - "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "172.16.125.25"}}, "endpoint_uuid": {"uuid": "200"}}, - {"device_id": {"device_uuid": {"uuid": "172.16.58.10"}}, "endpoint_uuid": {"uuid": "501"}} - ]}, - - {"link_id": {"link_uuid": {"uuid": "172.16.58.10-500"}}, "name": "172.16.58.10-500", - "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "172.16.58.10"}}, "endpoint_uuid": {"uuid": "500"}}, - {"device_id": {"device_uuid": {"uuid": "172.16.182.25"}}, "endpoint_uuid": {"uuid": "200"}} - ]}, - {"link_id": {"link_uuid": {"uuid": "172.16.182.25-200"}}, "name": "172.16.182.25-200", - "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "172.16.182.25"}}, "endpoint_uuid": {"uuid": "200"}}, - {"device_id": {"device_uuid": {"uuid": "172.16.58.10"}}, "endpoint_uuid": {"uuid": "500"}} - ]}, - - {"link_id": {"link_uuid": {"uuid": "172.16.125.32-200"}}, "name": "172.16.125.32-200", - "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "172.16.125.32"}}, "endpoint_uuid": {"uuid": "200"}}, - {"device_id": {"device_uuid": {"uuid": "172.16.204.221"}}, "endpoint_uuid": {"uuid": "500"}} - ]}, - {"link_id": {"link_uuid": {"uuid": "172.16.204.221-500"}}, "name": "172.16.204.221-500", - "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "172.16.204.221"}}, "endpoint_uuid": {"uuid": "500"}}, - {"device_id": {"device_uuid": {"uuid": "172.16.125.32"}}, "endpoint_uuid": {"uuid": "200"}} - ]}, - - {"link_id": {"link_uuid": {"uuid": "172.16.204.220-500"}}, "name": "172.16.204.220-500", - "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "172.16.204.220"}}, "endpoint_uuid": {"uuid": "500"}}, - {"device_id": {"device_uuid": {"uuid": "172.16.185.32"}}, "endpoint_uuid": {"uuid": "200"}} - ]}, - {"link_id": {"link_uuid": {"uuid": "172.16.185.32-200"}}, "name": "172.16.185.32-200", - "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "172.16.185.32"}}, "endpoint_uuid": {"uuid": "200"}}, - {"device_id": {"device_uuid": {"uuid": "172.16.204.220"}}, "endpoint_uuid": {"uuid": "500"}} - ]}, - - {"link_id": {"link_uuid": {"uuid": "172.16.204.221-200"}}, "name": "172.16.204.221-200", - "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "172.16.204.221"}}, "endpoint_uuid": {"uuid": "200"}}, - {"device_id": {"device_uuid": {"uuid": "172.1.101.22"}}, "endpoint_uuid": {"uuid": "500"}} + {"link_id": {"link_uuid": {"uuid": "OLT-PPE1"}}, "link_type" : "LINKTYPE_COPPER", + "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "OLT" }}, "endpoint_uuid": {"uuid": "501"}}, + {"device_id": {"device_uuid": {"uuid": "P-PE1"}}, "endpoint_uuid": {"uuid": "200"}} ]}, - {"link_id": {"link_uuid": {"uuid": "172.1.101.22-500"}}, "name": "172.1.101.22-500", - "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "172.1.101.22"}}, "endpoint_uuid": {"uuid": "500"}}, - {"device_id": {"device_uuid": {"uuid": "172.16.204.221"}}, "endpoint_uuid": {"uuid": "200"}} + {"link_id": {"link_uuid": {"uuid": "OLT-OPE1"}}, "link_type" : "LINKTYPE_COPPER", + "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "OLT" }}, "endpoint_uuid": {"uuid": "500"}}, + {"device_id": {"device_uuid": {"uuid": "O-PE1"}}, "endpoint_uuid": {"uuid": "200"}} ]}, - - {"link_id": {"link_uuid": {"uuid": "172.16.204.220-200"}}, "name": "172.16.204.220-200", - "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "172.16.204.220"}}, "endpoint_uuid": {"uuid": "200"}}, - {"device_id": {"device_uuid": {"uuid": "172.1.201.22"}}, "endpoint_uuid": {"uuid": "500"}} + {"link_id": {"link_uuid": {"uuid": "PPE2-POP2"}}, "link_type" : "LINKTYPE_COPPER", + "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "P-PE2"}}, "endpoint_uuid": {"uuid": "200"}}, + {"device_id": {"device_uuid": {"uuid": "POP2" }}, "endpoint_uuid": {"uuid": "500"}} ]}, - {"link_id": {"link_uuid": {"uuid": "172.1.201.22-500"}}, "name": "172.1.201.22-500", - "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "172.1.201.22"}}, "endpoint_uuid": {"uuid": "500"}}, - {"device_id": {"device_uuid": {"uuid": "172.16.204.220"}}, "endpoint_uuid": {"uuid": "200"}} + {"link_id": {"link_uuid": {"uuid": "OPE2-POP2"}}, "link_type" : "LINKTYPE_COPPER", + "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "O-PE2"}}, "endpoint_uuid": {"uuid": "200"}}, + {"device_id": {"device_uuid": {"uuid": "POP1" }}, "endpoint_uuid": {"uuid": "500"}} ]} ] } diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology-e2e.json b/src/tests/ecoc25-f5ga-telemetry/data/topology-e2e.json index d8634caf0..d5d314989 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/topology-e2e.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology-e2e.json @@ -6,8 +6,9 @@ {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}} ], "devices": [ - {"device_id": {"device_uuid": {"uuid": "TFS-AGG"}}, "name": "TFS-AGG", "device_type": "teraflowsdn", - "device_drivers": ["DEVICEDRIVER_IETF_L3VPN"], "device_config": {"config_rules": [ + {"device_id": {"device_uuid": {"uuid": "TFS-AGG"}}, "device_type": "teraflowsdn", + "device_drivers": ["DEVICEDRIVER_IETF_L3VPN"], + "device_config": {"config_rules": [ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.0.11"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "80" }}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { @@ -15,104 +16,16 @@ "timeout": 120, "verify": false }}} ]}}, - {"device_id": {"device_uuid": {"uuid": "NCE-FAN"}}, "name": "NCE-FAN", "device_type": "ip-sdn-controller", - "device_drivers": ["DEVICEDRIVER_IETF_ACTN"], "device_config": {"config_rules": [ + {"device_id": {"device_uuid": {"uuid": "NCE-FAN"}}, "device_type": "ip-sdn-controller", + "device_drivers": ["DEVICEDRIVER_IETF_ACTN"], + "device_config": {"config_rules": [ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.0.9"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8443" }}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { "scheme": "https", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", "timeout": 120, "verify": false }}} - ]}}, - - {"device_id": {"device_uuid": {"uuid": "172.16.204.220"}}, "device_type": "emu-datacenter", - "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ - {"uuid": "lo", "name": "lo", "type": "loopback"}, - {"uuid": "500a", "name": "500a", "type": "copper"}, - {"uuid": "500b", "name": "500b", "type": "copper"} - ]}}} - ]} - }, - {"device_id": {"device_uuid": {"uuid": "172.16.204.220"}}, "device_type": "emu-datacenter", - "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ - {"uuid": "lo", "name": "lo", "type": "loopback"}, - {"uuid": "500a", "name": "500a", "type": "copper"}, - {"uuid": "500b", "name": "500b", "type": "copper"} - ]}}} - ]} - }, - {"device_id": {"device_uuid": {"uuid": "172.16.204.220"}}, "device_type": "emu-datacenter", - "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ - {"uuid": "lo", "name": "lo", "type": "loopback"}, - {"uuid": "500a", "name": "500a", "type": "copper"}, - {"uuid": "500b", "name": "500b", "type": "copper"} - ]}}} - ]} - }, - {"device_id": {"device_uuid": {"uuid": "172.16.204.220"}}, "device_type": "emu-datacenter", - "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ - {"uuid": "lo", "name": "lo", "type": "loopback"}, - {"uuid": "500a", "name": "500a", "type": "copper"}, - {"uuid": "500b", "name": "500b", "type": "copper"} - ]}}} - ]} - } + ]}} ], - "links": [ - {"link_id": {"link_uuid": {"uuid": "172.16.58.10-501"}}, "name": "172.16.58.10-501", - "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "172.16.58.10"}}, "endpoint_uuid": {"uuid": "501"}}, - {"device_id": {"device_uuid": {"uuid": "172.16.125.25"}}, "endpoint_uuid": {"uuid": "200"}} - ]}, - {"link_id": {"link_uuid": {"uuid": "172.16.125.25-200"}}, "name": "172.16.125.25-200", - "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "172.16.125.25"}}, "endpoint_uuid": {"uuid": "200"}}, - {"device_id": {"device_uuid": {"uuid": "172.16.58.10"}}, "endpoint_uuid": {"uuid": "501"}} - ]}, - - {"link_id": {"link_uuid": {"uuid": "172.16.58.10-500"}}, "name": "172.16.58.10-500", - "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "172.16.58.10"}}, "endpoint_uuid": {"uuid": "500"}}, - {"device_id": {"device_uuid": {"uuid": "172.16.182.25"}}, "endpoint_uuid": {"uuid": "200"}} - ]}, - {"link_id": {"link_uuid": {"uuid": "172.16.182.25-200"}}, "name": "172.16.182.25-200", - "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "172.16.182.25"}}, "endpoint_uuid": {"uuid": "200"}}, - {"device_id": {"device_uuid": {"uuid": "172.16.58.10"}}, "endpoint_uuid": {"uuid": "500"}} - ]}, - - {"link_id": {"link_uuid": {"uuid": "172.16.125.32-200"}}, "name": "172.16.125.32-200", - "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "172.16.125.32"}}, "endpoint_uuid": {"uuid": "200"}}, - {"device_id": {"device_uuid": {"uuid": "172.16.204.22x"}}, "endpoint_uuid": {"uuid": "500a"}} - ]}, - {"link_id": {"link_uuid": {"uuid": "172.16.204.22x-500a"}}, "name": "172.16.204.22x-500a", - "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "172.16.204.22x"}}, "endpoint_uuid": {"uuid": "500a"}}, - {"device_id": {"device_uuid": {"uuid": "172.16.125.32"}}, "endpoint_uuid": {"uuid": "200"}} - ]}, - - {"link_id": {"link_uuid": {"uuid": "172.16.204.22x-500b"}}, "name": "172.16.204.22x-500b", - "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "172.16.204.22x"}}, "endpoint_uuid": {"uuid": "500b"}}, - {"device_id": {"device_uuid": {"uuid": "172.16.185.32"}}, "endpoint_uuid": {"uuid": "200"}} - ]}, - {"link_id": {"link_uuid": {"uuid": "172.16.185.32-200"}}, "name": "172.16.185.32-200", - "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "172.16.185.32"}}, "endpoint_uuid": {"uuid": "200"}}, - {"device_id": {"device_uuid": {"uuid": "172.16.204.22x"}}, "endpoint_uuid": {"uuid": "500b"}} - ]} - ] + "links": [] } diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology-ip.json b/src/tests/ecoc25-f5ga-telemetry/data/topology-ip.json index f4258d52a..66f9f877c 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/topology-ip.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology-ip.json @@ -7,12 +7,13 @@ ], "devices": [ { - "device_id": {"device_uuid": {"uuid": "172.16.125.25"}}, "name": "172.16.125.25", "device_type": "emu-packet-router", + "device_id": {"device_uuid": {"uuid": "P-PE1"}}, "device_type": "emu-packet-router", "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.16.122.25"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "lo", "name": "lo", "type": "loopback"}, {"uuid": "200", "name": "200", "type": "copper"}, {"uuid": "500", "name": "500", "type": "copper"}, {"uuid": "501", "name": "501", "type": "copper"} @@ -20,36 +21,39 @@ ]} }, { - "device_id": {"device_uuid": {"uuid": "172.16.125.31"}}, "name": "172.16.125.31", "device_type": "emu-packet-router", + "device_id": {"device_uuid": {"uuid": "P-P1"}}, "device_type": "emu-packet-router", "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.16.125.31"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "lo", "name": "lo", "type": "loopback"}, {"uuid": "500", "name": "500", "type": "copper"}, {"uuid": "501", "name": "501", "type": "copper"} ]}}} ]} }, { - "device_id": {"device_uuid": {"uuid": "172.16.125.33"}}, "name": "172.16.125.33", "device_type": "emu-packet-router", + "device_id": {"device_uuid": {"uuid": "P-P2"}}, "device_type": "emu-packet-router", "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.16.125.33"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "lo", "name": "lo", "type": "loopback"}, {"uuid": "500", "name": "500", "type": "copper"}, {"uuid": "501", "name": "501", "type": "copper"} ]}}} ]} }, { - "device_id": {"device_uuid": {"uuid": "172.16.125.32"}}, "name": "172.16.125.32", "device_type": "emu-packet-router", + "device_id": {"device_uuid": {"uuid": "P-PE2"}}, "device_type": "emu-packet-router", "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.16.125.32"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "lo", "name": "lo", "type": "loopback"}, {"uuid": "200", "name": "200", "type": "copper"}, {"uuid": "500", "name": "500", "type": "copper"}, {"uuid": "501", "name": "501", "type": "copper"} @@ -59,70 +63,35 @@ ], "links": [ { - "link_id": {"link_uuid": {"uuid": "172.16.125.25-500"}}, "name": "172.16.125.25-500", - "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, + "link_id": {"link_uuid": {"uuid": "L5"}}, "link_type" : "LINKTYPE_COPPER", + "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "172.16.125.25"}}, "endpoint_uuid": {"uuid": "500"}}, - {"device_id": {"device_uuid": {"uuid": "172.16.125.31"}}, "endpoint_uuid": {"uuid": "500"}} + {"device_id": {"device_uuid": {"uuid": "P-PE1"}}, "endpoint_uuid": {"uuid": "500"}}, + {"device_id": {"device_uuid": {"uuid": "P-P1" }}, "endpoint_uuid": {"uuid": "500"}} ] }, { - "link_id": {"link_uuid": {"uuid": "172.16.125.31-500"}}, "name": "172.16.125.31-500", - "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, + "link_id": {"link_uuid": {"uuid": "L6"}}, "link_type" : "LINKTYPE_COPPER", + "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "172.16.125.31"}}, "endpoint_uuid": {"uuid": "500"}}, - {"device_id": {"device_uuid": {"uuid": "172.16.125.25"}}, "endpoint_uuid": {"uuid": "500"}} + {"device_id": {"device_uuid": {"uuid": "P-PE1"}}, "endpoint_uuid": {"uuid": "501"}}, + {"device_id": {"device_uuid": {"uuid": "P-P2" }}, "endpoint_uuid": {"uuid": "500"}} ] }, - { - "link_id": {"link_uuid": {"uuid": "172.16.125.25-501"}}, "name": "172.16.125.25-501", - "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, + "link_id": {"link_uuid": {"uuid": "L9"}}, "link_type" : "LINKTYPE_COPPER", + "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "172.16.125.25"}}, "endpoint_uuid": {"uuid": "501"}}, - {"device_id": {"device_uuid": {"uuid": "172.16.125.33"}}, "endpoint_uuid": {"uuid": "500"}} + {"device_id": {"device_uuid": {"uuid": "P-P1" }}, "endpoint_uuid": {"uuid": "501"}}, + {"device_id": {"device_uuid": {"uuid": "P-PE2"}}, "endpoint_uuid": {"uuid": "500"}} ] }, { - "link_id": {"link_uuid": {"uuid": "172.16.125.33-500"}}, "name": "172.16.125.33-500", - "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, + "link_id": {"link_uuid": {"uuid": "L10"}}, "link_type" : "LINKTYPE_COPPER", + "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "172.16.125.33"}}, "endpoint_uuid": {"uuid": "500"}}, - {"device_id": {"device_uuid": {"uuid": "172.16.125.25"}}, "endpoint_uuid": {"uuid": "501"}} - ] - }, - - { - "link_id": {"link_uuid": {"uuid": "172.16.125.31-501"}}, "name": "172.16.125.31-501", - "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, - "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "172.16.125.31"}}, "endpoint_uuid": {"uuid": "501"}}, - {"device_id": {"device_uuid": {"uuid": "172.16.125.32"}}, "endpoint_uuid": {"uuid": "500"}} - ] - }, - { - "link_id": {"link_uuid": {"uuid": "172.16.125.32-500"}}, "name": "172.16.125.32-500", - "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, - "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "172.16.125.32"}}, "endpoint_uuid": {"uuid": "500"}}, - {"device_id": {"device_uuid": {"uuid": "172.16.125.31"}}, "endpoint_uuid": {"uuid": "501"}} - ] - }, - - { - "link_id": {"link_uuid": {"uuid": "172.16.125.32-501"}}, "name": "172.16.125.32-501", - "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, - "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "172.16.125.32"}}, "endpoint_uuid": {"uuid": "501"}}, - {"device_id": {"device_uuid": {"uuid": "172.16.125.33"}}, "endpoint_uuid": {"uuid": "501"}} - ] - }, - { - "link_id": {"link_uuid": {"uuid": "172.16.125.33-501"}}, "name": "172.16.125.33-501", - "attributes": {"total_capacity_gbps": 10, "used_capacity_gbps": 0}, - "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "172.16.125.33"}}, "endpoint_uuid": {"uuid": "501"}}, - {"device_id": {"device_uuid": {"uuid": "172.16.125.32"}}, "endpoint_uuid": {"uuid": "501"}} + {"device_id": {"device_uuid": {"uuid": "P-P2" }}, "endpoint_uuid": {"uuid": "501"}}, + {"device_id": {"device_uuid": {"uuid": "P-PE2"}}, "endpoint_uuid": {"uuid": "501"}} ] } ] diff --git a/src/tests/ecoc25-f5ga-telemetry/redeploy.sh b/src/tests/ecoc25-f5ga-telemetry/redeploy.sh index 47d14ecd8..63eb3e155 100755 --- a/src/tests/ecoc25-f5ga-telemetry/redeploy.sh +++ b/src/tests/ecoc25-f5ga-telemetry/redeploy.sh @@ -32,7 +32,7 @@ case "$HOSTNAME" in docker buildx build -t nce-fan-ctrl:mock -f Dockerfile . echo "Building NCE-T Controller..." - cd ~/tfs-ctrl/src/tests/tools/mock_ietf_actn_sdn_ctrl + cd ~/tfs-ctrl/src/tests/tools/mock_nce_t_ctrl docker buildx build -t nce-t-ctrl:mock -f Dockerfile . echo "Cleaning up..." @@ -50,6 +50,7 @@ case "$HOSTNAME" in ;; tfs-e2e-ctrl) echo "Deploying TFS E2E Controller..." + sed -i 's|\(

ETSI TeraFlowSDN Controller\)

|\1 (- End-to-End)|' src/webui/service/templates/main/home.html source ~/tfs-ctrl/src/tests/ecoc25-f5ga-telemetry/deploy-specs-e2e.sh ./deploy/all.sh @@ -59,6 +60,7 @@ case "$HOSTNAME" in ;; tfs-agg-ctrl) echo "Deploying TFS Agg Controller..." + sed -i 's|\(

ETSI TeraFlowSDN Controller\)

|\1 (- Aggregation)|' src/webui/service/templates/main/home.html source ~/tfs-ctrl/src/tests/ecoc25-f5ga-telemetry/deploy-specs-agg.sh ./deploy/all.sh @@ -68,6 +70,7 @@ case "$HOSTNAME" in ;; tfs-ip-ctrl) echo "Deploying TFS IP Controller..." + sed -i 's|\(

ETSI TeraFlowSDN Controller\)

|\1 (- IP)|' src/webui/service/templates/main/home.html source ~/tfs-ctrl/src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh ./deploy/all.sh -- GitLab From dd28d2d10167b16f720ac99cbd5de1ab1e70313b Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 29 Aug 2025 19:11:01 +0000 Subject: [PATCH 049/367] Tests - Tools - Mock NCE-T Controller - Corrected startup config - Corrected copy of YANG data models --- src/tests/tools/mock_nce_t_ctrl/Dockerfile | 2 +- src/tests/tools/mock_nce_t_ctrl/startup.json | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/tests/tools/mock_nce_t_ctrl/Dockerfile b/src/tests/tools/mock_nce_t_ctrl/Dockerfile index a1f70694e..a80b5fe93 100644 --- a/src/tests/tools/mock_nce_t_ctrl/Dockerfile +++ b/src/tests/tools/mock_nce_t_ctrl/Dockerfile @@ -50,7 +50,7 @@ RUN pip-compile --quiet --output-file=requirements.txt requirements.in RUN python3 -m pip install -r requirements.txt # Add component files into working directory -COPY ./yang/*.yang ./yang/ +COPY ./yang/. ./yang/ COPY ./nce_t_ctrl/*.py ./nce_t_ctrl/ COPY ./startup.json ./startup.json diff --git a/src/tests/tools/mock_nce_t_ctrl/startup.json b/src/tests/tools/mock_nce_t_ctrl/startup.json index c88f00378..4ae24e51b 100644 --- a/src/tests/tools/mock_nce_t_ctrl/startup.json +++ b/src/tests/tools/mock_nce_t_ctrl/startup.json @@ -16,8 +16,8 @@ "node-id": "O-PE1", "ietf-te-topology:te-node-id": "172.16.182.25", "ietf-te-topology:te": {"te-node-attributes": {"otn-node": {}, "name": "O-PE1", "admin-status": "up"}, "oper-status": "up"}, "ietf-network-topology:termination-point": [ - {"tp-id": "500", "ietf-te-topology:te": {"name": "500"}}, - {"tp-id": "501", "ietf-te-topology:te": {"name": "501"}}, + {"tp-id": "500", "ietf-te-topology:te": {"name": "500"}, "ietf-te-topology:te-tp-id": "0.0.0.0"}, + {"tp-id": "501", "ietf-te-topology:te": {"name": "501"}, "ietf-te-topology:te-tp-id": "0.0.0.0"}, {"tp-id": "200", "ietf-te-topology:te": {"name": "200"}, "ietf-te-topology:te-tp-id": "128.32.33.254"} ] }, @@ -25,24 +25,24 @@ "node-id": "O-P1", "ietf-te-topology:te-node-id": "172.16.185.31", "ietf-te-topology:te": {"te-node-attributes": {"otn-node": {}, "name": "O-P1", "admin-status": "up"}, "oper-status": "up"}, "ietf-network-topology:termination-point": [ - {"tp-id": "500", "ietf-te-topology:te": {"name": "500"}}, - {"tp-id": "501", "ietf-te-topology:te": {"name": "501"}} + {"tp-id": "500", "ietf-te-topology:te": {"name": "500"}, "ietf-te-topology:te-tp-id": "0.0.0.0"}, + {"tp-id": "501", "ietf-te-topology:te": {"name": "501"}, "ietf-te-topology:te-tp-id": "0.0.0.0"} ] }, { "node-id": "O-P2", "ietf-te-topology:te-node-id": "172.16.185.33", "ietf-te-topology:te": {"te-node-attributes": {"otn-node": {}, "name": "O-P2", "admin-status": "up"}, "oper-status": "up"}, "ietf-network-topology:termination-point": [ - {"tp-id": "500", "ietf-te-topology:te": {"name": "500"}}, - {"tp-id": "501", "ietf-te-topology:te": {"name": "501"}} + {"tp-id": "500", "ietf-te-topology:te": {"name": "500"}, "ietf-te-topology:te-tp-id": "0.0.0.0"}, + {"tp-id": "501", "ietf-te-topology:te": {"name": "501"}, "ietf-te-topology:te-tp-id": "0.0.0.0"} ] }, { "node-id": "O-PE2", "ietf-te-topology:te-node-id": "172.16.185.32", "ietf-te-topology:te": {"te-node-attributes": {"otn-node": {}, "name": "O-PE2", "admin-status": "up"}, "oper-status": "up"}, "ietf-network-topology:termination-point": [ - {"tp-id": "500", "ietf-te-topology:te": {"name": "500"}}, - {"tp-id": "501", "ietf-te-topology:te": {"name": "501"}}, + {"tp-id": "500", "ietf-te-topology:te": {"name": "500"}, "ietf-te-topology:te-tp-id": "0.0.0.0"}, + {"tp-id": "501", "ietf-te-topology:te": {"name": "501"}, "ietf-te-topology:te-tp-id": "0.0.0.0"}, {"tp-id": "200", "ietf-te-topology:te": {"name": "200"}, "ietf-te-topology:te-tp-id": "128.32.33.254"} ] } -- GitLab From 5cbd4e55b116e01d99862121fdc2477608cd8a24 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 29 Aug 2025 19:13:04 +0000 Subject: [PATCH 050/367] ECOC F5GA Telemetry Demo: - Removed component slice from IP controller --- src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh b/src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh index c7b5e98b5..c02dac122 100644 --- a/src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh +++ b/src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh @@ -20,7 +20,7 @@ export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" # Set the list of components, separated by spaces, you want to build images for, and deploy. -export TFS_COMPONENTS="context device pathcomp service slice nbi webui" +export TFS_COMPONENTS="context device pathcomp service nbi webui" # Uncomment to activate Monitoring (old) #export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring" -- GitLab From bcbdb6d2b82dd7eda25959fba4c6af7b044e1241 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 29 Aug 2025 19:18:01 +0000 Subject: [PATCH 051/367] ECOC F5GA Telemetry Demo: - Fixed labelling of SDN Controllers in WebUI --- src/tests/ecoc25-f5ga-telemetry/redeploy.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/tests/ecoc25-f5ga-telemetry/redeploy.sh b/src/tests/ecoc25-f5ga-telemetry/redeploy.sh index 63eb3e155..811d1471c 100755 --- a/src/tests/ecoc25-f5ga-telemetry/redeploy.sh +++ b/src/tests/ecoc25-f5ga-telemetry/redeploy.sh @@ -50,7 +50,7 @@ case "$HOSTNAME" in ;; tfs-e2e-ctrl) echo "Deploying TFS E2E Controller..." - sed -i 's|\(

ETSI TeraFlowSDN Controller\)

|\1 (- End-to-End)|' src/webui/service/templates/main/home.html + sed -i 's|\(

ETSI TeraFlowSDN Controller\)

|\1 (End-to-End)|' src/webui/service/templates/main/home.html source ~/tfs-ctrl/src/tests/ecoc25-f5ga-telemetry/deploy-specs-e2e.sh ./deploy/all.sh @@ -60,7 +60,7 @@ case "$HOSTNAME" in ;; tfs-agg-ctrl) echo "Deploying TFS Agg Controller..." - sed -i 's|\(

ETSI TeraFlowSDN Controller\)

|\1 (- Aggregation)|' src/webui/service/templates/main/home.html + sed -i 's|\(

ETSI TeraFlowSDN Controller\)

|\1 (Aggregation)|' src/webui/service/templates/main/home.html source ~/tfs-ctrl/src/tests/ecoc25-f5ga-telemetry/deploy-specs-agg.sh ./deploy/all.sh @@ -70,7 +70,7 @@ case "$HOSTNAME" in ;; tfs-ip-ctrl) echo "Deploying TFS IP Controller..." - sed -i 's|\(

ETSI TeraFlowSDN Controller\)

|\1 (- IP)|' src/webui/service/templates/main/home.html + sed -i 's|\(

ETSI TeraFlowSDN Controller\)

|\1 (IP)|' src/webui/service/templates/main/home.html source ~/tfs-ctrl/src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh ./deploy/all.sh -- GitLab From d6fee7ba6a5c589b77d2b6a2410cc89be2c9e755 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 29 Aug 2025 19:29:14 +0000 Subject: [PATCH 052/367] Device component - ACTN Driver: - Fixed RestConfClient instantiation --- src/device/service/drivers/ietf_actn/IetfActnDriver.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/device/service/drivers/ietf_actn/IetfActnDriver.py b/src/device/service/drivers/ietf_actn/IetfActnDriver.py index e78dc7976..c0c7a6f51 100644 --- a/src/device/service/drivers/ietf_actn/IetfActnDriver.py +++ b/src/device/service/drivers/ietf_actn/IetfActnDriver.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json, logging, requests, threading +import copy, json, logging, requests, threading from typing import Any, Iterator, List, Optional, Tuple, Union from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method from common.tools.client.RestConfClient import RestConfClient @@ -40,8 +40,14 @@ class IetfActnDriver(_Driver): self.__lock = threading.Lock() self.__started = threading.Event() self.__terminate = threading.Event() + self._rest_api_client = RestApiClient(address, port, settings=settings) - self._rest_conf_client = RestConfClient(address, port, **settings) + + restconf_settings = copy.deepcopy(settings) + restconf_settings.pop('base_url', None) + restconf_settings.pop('import_topology', None) + self._rest_conf_client = RestConfClient(address, port=port, **restconf_settings) + self._handler_etht_service = EthtServiceHandler(self._rest_api_client) self._handler_net_topology = NetworkTopologyHandler(self._rest_conf_client, **settings) self._handler_osu_tunnel = OsuTunnelHandler(self._rest_api_client) -- GitLab From 626c0dc9bb57ccbb2ef8c3042d37fdca00def5bc Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 1 Sep 2025 08:06:15 +0000 Subject: [PATCH 053/367] ECOC F5GA Telemetry Demo: - Fixed agg topology descriptor --- src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json b/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json index 4cc2c04a9..77f9aab53 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json @@ -13,7 +13,7 @@ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "80"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { "scheme": "http", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", - "timeout": 120, "verify": false + "timeout": 120, "verify_certs": false, "import_topology": "topology" }}} ]}}, {"device_id": {"device_uuid": {"uuid": "NCE-T"}}, "device_type": "ip-sdn-controller", @@ -23,7 +23,7 @@ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8444"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { "scheme": "https", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", - "timeout": 120, "verify": false, "import_topology": "topology" + "timeout": 120, "verify_certs": false, "import_topology": "topology" }}} ]}}, {"device_id": {"device_uuid": {"uuid": "OLT"}}, "device_type": "emu-packet-router", -- GitLab From 69834496b07bdbdee04e4621b40cfc078a491d48 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 1 Sep 2025 08:27:26 +0000 Subject: [PATCH 054/367] Common tools: - Corrected logger utilization in RestApiClient --- src/common/tools/client/RestApiClient.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/common/tools/client/RestApiClient.py b/src/common/tools/client/RestApiClient.py index 1a85f91da..68977a60e 100644 --- a/src/common/tools/client/RestApiClient.py +++ b/src/common/tools/client/RestApiClient.py @@ -97,7 +97,7 @@ class RestApiClient: str(method.value).upper(), str(request_url), str(body), str(http_status_code), str(reply.text) ) - self._logger.error(msg) + if self._logger is not None: self._logger.error(msg) raise Exception(msg) @@ -124,7 +124,7 @@ class RestApiClient: except Exception as e: MSG = 'Request failed. method={:s} url={:s} body={:s}' msg = MSG.format(str(method.value).upper(), request_url, str(body)) - self._logger.exception(msg) + if self._logger is not None: self._logger.exception(msg) raise Exception(msg) from e self._log_msg_check_reply(method, request_url, body, reply, expected_status_codes) -- GitLab From d23c568f295178d5ba2934743cbe395cf6d87f93 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 1 Sep 2025 08:28:03 +0000 Subject: [PATCH 055/367] Device component - ACTN Driver: - Added logger for RestConfClient instantiation --- src/device/service/drivers/ietf_actn/IetfActnDriver.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/device/service/drivers/ietf_actn/IetfActnDriver.py b/src/device/service/drivers/ietf_actn/IetfActnDriver.py index c0c7a6f51..42b02ca40 100644 --- a/src/device/service/drivers/ietf_actn/IetfActnDriver.py +++ b/src/device/service/drivers/ietf_actn/IetfActnDriver.py @@ -46,6 +46,7 @@ class IetfActnDriver(_Driver): restconf_settings = copy.deepcopy(settings) restconf_settings.pop('base_url', None) restconf_settings.pop('import_topology', None) + restconf_settings['logger'] = logging.getLogger(__name__ + '.RestConfClient') self._rest_conf_client = RestConfClient(address, port=port, **restconf_settings) self._handler_etht_service = EthtServiceHandler(self._rest_api_client) -- GitLab From 358b214dbfce3c5df5d46ce1fb0d35f13cb2fe99 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 1 Sep 2025 08:28:36 +0000 Subject: [PATCH 056/367] ECOC F5GA Telemetry Demo: - Add example slice descriptor files --- .../data/slice1_post_ietf_network_slice.json | 190 ++++++++++++++++++ .../data/slice1_put_ietf_network_slice.json | 58 ++++++ 2 files changed, 248 insertions(+) create mode 100644 src/tests/ecoc25-f5ga-telemetry/data/slice1_post_ietf_network_slice.json create mode 100644 src/tests/ecoc25-f5ga-telemetry/data/slice1_put_ietf_network_slice.json diff --git a/src/tests/ecoc25-f5ga-telemetry/data/slice1_post_ietf_network_slice.json b/src/tests/ecoc25-f5ga-telemetry/data/slice1_post_ietf_network_slice.json new file mode 100644 index 000000000..ac1f09dd8 --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/data/slice1_post_ietf_network_slice.json @@ -0,0 +1,190 @@ +{ + "network-slice-services": { + "slice-service": [ + { + "connection-groups": { + "connection-group": [ + { + "connectivity-construct": [ + { + "id": 1, + "p2p-receiver-sdp": "2", + "p2p-sender-sdp": "1", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "bound": 10, + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds" + }, + { + "bound": 5000, + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": 0.001 + } + ] + } + } + }, + { + "id": 2, + "p2p-receiver-sdp": "1", + "p2p-sender-sdp": "2", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "bound": 20, + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds" + }, + { + "bound": 1000, + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": 0.001 + } + ] + } + } + } + ], + "connectivity-type": "point-to-point", + "id": "line1" + } + ] + }, + "description": "dsc", + "id": "slice1", + "sdps": { + "sdp": [ + { + "attachment-circuits": { + "attachment-circuit": [ + { + "ac-node-id": "172.16.185.32", + "ac-tp-id": "200", + "description": "dsc", + "id": "0" + } + ] + }, + "id": "1", + "node-id": "172.16.185.32", + "sdp-ip-address": [ + "172.16.185.32" + ], + "service-match-criteria": { + "match-criterion": [ + { + "index": 1, + "match-type": [ + { + "type": "ietf-network-slice-service:vlan", + "value": [ + "101" + ] + }, + { + "type": "ietf-network-slice-service:source-ip-prefix", + "value": [ + "172.1.101.22/24" + ] + }, + { + "type": "ietf-network-slice-service:source-tcp-port", + "value": [ + "10200" + ] + }, + { + "type": "ietf-network-slice-service:destination-ip-prefix", + "value": [ + "172.16.104.221/24" + ] + }, + { + "type": "ietf-network-slice-service:destination-tcp-port", + "value": [ + "10500" + ] + } + ], + "target-connection-group-id": "line1" + } + ] + } + }, + { + "attachment-circuits": { + "attachment-circuit": [ + { + "ac-node-id": "172.16.182.25", + "ac-tp-id": "200", + "description": "dsc", + "id": "0" + } + ] + }, + "id": "2", + "node-id": "172.16.182.25", + "sdp-ip-address": [ + "172.16.182.25" + ], + "service-match-criteria": { + "match-criterion": [ + { + "index": 1, + "match-type": [ + { + "type": "ietf-network-slice-service:vlan", + "value": [ + "21" + ] + }, + { + "type": "ietf-network-slice-service:source-ip-prefix", + "value": [ + "172.16.104.221/24" + ] + }, + { + "type": "ietf-network-slice-service:source-tcp-port", + "value": [ + "10500" + ] + }, + { + "type": "ietf-network-slice-service:destination-ip-prefix", + "value": [ + "172.1.101.22/24" + ] + }, + { + "type": "ietf-network-slice-service:destination-tcp-port", + "value": [ + "10200" + ] + } + ], + "target-connection-group-id": "line1" + } + ] + } + } + ] + } + } + ] + } +} diff --git a/src/tests/ecoc25-f5ga-telemetry/data/slice1_put_ietf_network_slice.json b/src/tests/ecoc25-f5ga-telemetry/data/slice1_put_ietf_network_slice.json new file mode 100644 index 000000000..690a84d91 --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/data/slice1_put_ietf_network_slice.json @@ -0,0 +1,58 @@ +{ + "connectivity-construct": [ + { + "id": 1, + "p2p-receiver-sdp": "2", + "p2p-sender-sdp": "1", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "bound": 10, + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds" + }, + { + "bound": 5000, + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": 0.001 + } + ] + } + } + }, + { + "id": 2, + "p2p-receiver-sdp": "1", + "p2p-sender-sdp": "2", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "bound": 20, + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds" + }, + { + "bound": 1000, + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": 0.001 + } + ] + } + } + } + ], + "connectivity-type": "point-to-point", + "id": "line1" + } -- GitLab From 5a4356d4670a86049872d0e06093ee135f085ce9 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 1 Sep 2025 09:36:58 +0000 Subject: [PATCH 057/367] SIMAP Connector: - Update SimapUpdater to skip management links and links connecting skipped devices --- .../service/simap_updater/SimapUpdater.py | 99 ++++++++++++++++++- 1 file changed, 97 insertions(+), 2 deletions(-) diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 456fd5901..97a9c056f 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -14,7 +14,7 @@ import logging, queue, threading -from typing import Any, Optional +from typing import Any, Optional, Set from common.DeviceTypes import DeviceTypeEnum from common.proto.context_pb2 import DeviceEvent, Empty, LinkEvent, TopologyEvent from common.tools.grpc.BaseEventCollector import BaseEventCollector @@ -51,6 +51,18 @@ class EventDispatcher(BaseEventDispatcher): ) self._simap_client = SimapClient(self._restconf_client) + self._skipped_devices : Set[str] = set() + + + def _add_skipped_device(self, device) -> None: + self._skipped_devices.add(device.device_id.device_uuid.uuid) + self._skipped_devices.add(device.name) + + + def _remove_skipped_device(self, device) -> None: + self._skipped_devices.discard(device.device_id.device_uuid.uuid) + self._skipped_devices.discard(device.name) + def dispatch(self, event : Any) -> None: MSG = 'Unexpected Event: {:s}' @@ -117,6 +129,7 @@ class EventDispatcher(BaseEventDispatcher): DeviceTypeEnum.TERAFLOWSDN_CONTROLLER.value, } if device_type in SKIPPED_DEVICE_TYPES: + self._add_skipped_device(device) MSG = ( 'DeviceEvent({:s}) skipped, is of a skipped device type. ' 'SIMAP should be updated by him: {:s}' @@ -128,6 +141,7 @@ class EventDispatcher(BaseEventDispatcher): device_controller_uuid = device.controller_id.device_uuid.uuid if len(device_controller_uuid) > 0: + self._add_skipped_device(device) MSG = ( 'DeviceEvent({:s}) skipped, is a remotely-managed device. ' 'SIMAP should be populated by remote controller: {:s}' @@ -139,6 +153,7 @@ class EventDispatcher(BaseEventDispatcher): topology_uuid, endpoint_names = get_device_endpoint(device) if topology_uuid is None: + self._add_skipped_device(device) MSG = 'DeviceEvent({:s}) skipped, no endpoints to identify topology: {:s}' str_device_event = grpc_message_to_json_string(device_event) str_device = grpc_message_to_json_string(device) @@ -153,6 +168,7 @@ class EventDispatcher(BaseEventDispatcher): device_name = device.name te_topo.node(device_name).create(termination_point_ids=endpoint_names) + self._remove_skipped_device(device) MSG = 'Device Created: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(device_event))) @@ -173,6 +189,7 @@ class EventDispatcher(BaseEventDispatcher): DeviceTypeEnum.TERAFLOWSDN_CONTROLLER.value, } if device_type in SKIPPED_DEVICE_TYPES: + self._add_skipped_device(device) MSG = ( 'DeviceEvent({:s}) skipped, is of a skipped device type. ' 'SIMAP should be updated by him: {:s}' @@ -184,6 +201,7 @@ class EventDispatcher(BaseEventDispatcher): device_controller_uuid = device.controller_id.device_uuid.uuid if len(device_controller_uuid) > 0: + self._add_skipped_device(device) MSG = ( 'DeviceEvent({:s}) skipped, is a remotely-managed device. ' 'SIMAP should be updated by remote controller: {:s}' @@ -195,6 +213,7 @@ class EventDispatcher(BaseEventDispatcher): topology_uuid, endpoint_names = get_device_endpoint(device) if topology_uuid is None: + self._add_skipped_device(device) MSG = 'DeviceEvent({:s}) skipped, no endpoints to identify topology: {:s}' str_device_event = grpc_message_to_json_string(device_event) str_device = grpc_message_to_json_string(device) @@ -214,6 +233,8 @@ class EventDispatcher(BaseEventDispatcher): for endpoint_name in endpoint_names: te_device.termination_point(endpoint_name).update() + self._remove_skipped_device(device) + MSG = 'Device Updated: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(device_event))) @@ -233,6 +254,7 @@ class EventDispatcher(BaseEventDispatcher): DeviceTypeEnum.TERAFLOWSDN_CONTROLLER.value, } if device_type in SKIPPED_DEVICE_TYPES: + self._add_skipped_device(device) MSG = ( 'DeviceEvent({:s}) skipped, is of a skipped device type. ' 'SIMAP should be updated by him: {:s}' @@ -244,6 +266,7 @@ class EventDispatcher(BaseEventDispatcher): device_controller_uuid = device.controller_id.device_uuid.uuid if len(device_controller_uuid) > 0: + self._add_skipped_device(device) MSG = ( 'DeviceEvent({:s}) skipped, is a remotely-managed device. ' 'SIMAP should be updated by remote controller: {:s}' @@ -274,6 +297,7 @@ class EventDispatcher(BaseEventDispatcher): te_device.delete() + self._remove_skipped_device(device) self._object_cache.delete(CachedEntities.DEVICE, device_uuid) self._object_cache.delete(CachedEntities.DEVICE, device_name) @@ -308,6 +332,28 @@ class EventDispatcher(BaseEventDispatcher): dst_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[1][0], auto_retrieve=False) dst_endpoint = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[1]), auto_retrieve=False) + # Skip links that connect two management endpoints + if src_endpoint is not None and dst_endpoint is not None: + if str(src_endpoint.name).lower() == 'mgmt' and str(dst_endpoint.name).lower() == 'mgmt': + MSG = 'LinkEvent({:s}) skipped, connects two management endpoints: {:s}' + str_link_event = grpc_message_to_json_string(link_event) + str_link = grpc_message_to_json_string(link) + LOGGER.warning(MSG.format(str_link_event, str_link)) + return + + # Skip links that connect to devices previously marked as skipped + src_uuid = src_device.device_id.device_uuid.uuid + dst_uuid = dst_device.device_id.device_uuid.uuid + src_name = src_device.name + dst_name = dst_device.name + if (src_uuid in self._skipped_devices or src_name in self._skipped_devices + or dst_uuid in self._skipped_devices or dst_name in self._skipped_devices): + MSG = 'LinkEvent({:s}) skipped, connects to skipped device(s): {:s}' + str_link_event = grpc_message_to_json_string(link_event) + str_link = grpc_message_to_json_string(link) + LOGGER.warning(MSG.format(str_link_event, str_link)) + return + try: if src_device is None: MSG = 'Device({:s}) not found in cache' @@ -357,6 +403,28 @@ class EventDispatcher(BaseEventDispatcher): dst_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[1][0], auto_retrieve=False) dst_endpoint = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[1]), auto_retrieve=False) + # Skip links that connect two management endpoints + if src_endpoint is not None and dst_endpoint is not None: + if str(src_endpoint.name).lower() == 'mgmt' and str(dst_endpoint.name).lower() == 'mgmt': + MSG = 'LinkEvent({:s}) skipped, connects two management endpoints: {:s}' + str_link_event = grpc_message_to_json_string(link_event) + str_link = grpc_message_to_json_string(link) + LOGGER.warning(MSG.format(str_link_event, str_link)) + return + + # Skip links that connect to devices previously marked as skipped + src_uuid = src_device.device_id.device_uuid.uuid + dst_uuid = dst_device.device_id.device_uuid.uuid + src_name = src_device.name + dst_name = dst_device.name + if (src_uuid in self._skipped_devices or src_name in self._skipped_devices + or dst_uuid in self._skipped_devices or dst_name in self._skipped_devices): + MSG = 'LinkEvent({:s}) skipped, connects to skipped device(s): {:s}' + str_link_event = grpc_message_to_json_string(link_event) + str_link = grpc_message_to_json_string(link) + LOGGER.warning(MSG.format(str_link_event, str_link)) + return + try: if src_device is None: MSG = 'Device({:s}) not found in cache' @@ -388,13 +456,40 @@ class EventDispatcher(BaseEventDispatcher): link = self._object_cache.get(CachedEntities.LINK, link_uuid) link_name = link.name - topology_uuid, _ = get_link_endpoint(link) + topology_uuid, endpoint_uuids = get_link_endpoint(link) topology = self._object_cache.get(CachedEntities.TOPOLOGY, topology_uuid) topology_name = topology.name te_topo = self._simap_client.network(topology_name) te_topo.update() + src_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[0][0], auto_retrieve=False) + src_endpoint = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[0]), auto_retrieve=False) + dst_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[1][0], auto_retrieve=False) + dst_endpoint = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[1]), auto_retrieve=False) + + # Skip links that connect two management endpoints + if src_endpoint is not None and dst_endpoint is not None: + if str(src_endpoint.name).lower() == 'mgmt' and str(dst_endpoint.name).lower() == 'mgmt': + MSG = 'LinkEvent({:s}) skipped, connects two management endpoints: {:s}' + str_link_event = grpc_message_to_json_string(link_event) + str_link = grpc_message_to_json_string(link) + LOGGER.warning(MSG.format(str_link_event, str_link)) + return + + # Skip links that connect to devices previously marked as skipped + src_uuid = src_device.device_id.device_uuid.uuid + dst_uuid = dst_device.device_id.device_uuid.uuid + src_name = src_device.name + dst_name = dst_device.name + if (src_uuid in self._skipped_devices or src_name in self._skipped_devices + or dst_uuid in self._skipped_devices or dst_name in self._skipped_devices): + MSG = 'LinkEvent({:s}) skipped, connects to skipped device(s): {:s}' + str_link_event = grpc_message_to_json_string(link_event) + str_link = grpc_message_to_json_string(link) + LOGGER.warning(MSG.format(str_link_event, str_link)) + return + te_link = te_topo.link(link_name) te_link.delete() -- GitLab From b2df759524a4dee5e5ccf4371bd1b82af771b030 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 1 Sep 2025 10:39:39 +0000 Subject: [PATCH 058/367] Events: - Add new AnyEvent message and GetAllEvents() RPC method. - Add methods in Context client and server - Adapt Simap Updater --- proto/context.proto | 15 ++++++- src/context/client/ContextClient.py | 9 +++- .../service/ContextServiceServicerImpl.py | 9 +++- src/context/service/database/Events.py | 29 ++++++++++++- .../service/simap_updater/SimapUpdater.py | 43 +++++++++++++++---- 5 files changed, 91 insertions(+), 14 deletions(-) diff --git a/proto/context.proto b/proto/context.proto index 0fdb64405..4e5277438 100644 --- a/proto/context.proto +++ b/proto/context.proto @@ -78,7 +78,8 @@ service ContextService { rpc RemoveConnection (ConnectionId ) returns ( Empty ) {} rpc GetConnectionEvents(Empty ) returns (stream ConnectionEvent ) {} - + rpc GetAllEvents (Empty ) returns (stream AnyEvent ) {} + // ------------------------------ Experimental ----------------------------- rpc GetOpticalConfig (Empty ) returns (OpticalConfigList) {} rpc SetOpticalConfig (OpticalConfig ) returns (OpticalConfigId ) {} @@ -118,6 +119,18 @@ message Event { EventTypeEnum event_type = 2; } +message AnyEvent { + oneof event { + ContextEvent context = 1; + TopologyEvent topology = 2; + DeviceEvent device = 3; + LinkEvent link = 4; + ServiceEvent service = 5; + SliceEvent slice = 6; + ConnectionEvent connection = 7; + } +} + // ----- Context ------------------------------------------------------------------------------------------------------- message ContextId { Uuid context_uuid = 1; diff --git a/src/context/client/ContextClient.py b/src/context/client/ContextClient.py index 565be7fef..cb6f9ca3e 100644 --- a/src/context/client/ContextClient.py +++ b/src/context/client/ContextClient.py @@ -19,7 +19,7 @@ from common.Settings import get_service_host, get_service_port_grpc from common.tools.client.RetryDecorator import retry, delay_exponential from common.tools.grpc.Tools import grpc_message_to_json_string from common.proto.context_pb2 import ( - Connection, ConnectionEvent, ConnectionId, ConnectionIdList, ConnectionList, + AnyEvent, Connection, ConnectionEvent, ConnectionId, ConnectionIdList, ConnectionList, Context, ContextEvent, ContextId, ContextIdList, ContextList, Device, DeviceEvent, DeviceFilter, DeviceId, DeviceIdList, DeviceList, Empty, EndPointIdList, EndPointNameList, @@ -61,6 +61,13 @@ class ContextClient: self.stub = None self.policy_stub = None + @RETRY_DECORATOR + def GetAllEvents(self, request: Empty) -> Iterator[AnyEvent]: + LOGGER.debug('GetAllEvents request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.GetAllEvents(request) + LOGGER.debug('GetAllEvents result: {:s}'.format(grpc_message_to_json_string(response))) + return response + @RETRY_DECORATOR def ListContextIds(self, request: Empty) -> ContextIdList: LOGGER.debug('ListContextIds request: {:s}'.format(grpc_message_to_json_string(request))) diff --git a/src/context/service/ContextServiceServicerImpl.py b/src/context/service/ContextServiceServicerImpl.py index 73dc32bd0..2873c598e 100644 --- a/src/context/service/ContextServiceServicerImpl.py +++ b/src/context/service/ContextServiceServicerImpl.py @@ -16,7 +16,7 @@ import grpc, logging, sqlalchemy from typing import Iterator from common.message_broker.MessageBroker import MessageBroker from common.proto.context_pb2 import ( - Connection, ConnectionEvent, ConnectionId, ConnectionIdList, ConnectionList, + AnyEvent, Connection, ConnectionEvent, ConnectionId, ConnectionIdList, ConnectionList, Context, ContextEvent, ContextId, ContextIdList, ContextList, Device, DeviceEvent, DeviceFilter, DeviceId, DeviceIdList, DeviceList, Empty, EndPointIdList, EndPointNameList, @@ -41,7 +41,7 @@ from .database.Device import ( device_delete, device_get, device_list_ids, device_list_objs, device_select, device_set ) from .database.EndPoint import endpoint_list_names -from .database.Events import EventTopicEnum, consume_events +from .database.Events import EventTopicEnum, consume_all_events, consume_events from .database.Link import ( link_delete, link_get, link_list_ids, link_list_objs, link_set ) @@ -83,6 +83,11 @@ class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceSer def _get_metrics(self) -> MetricsPool: return METRICS_POOL + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) + def GetAllEvents(self, request : Empty, context : grpc.ServicerContext) -> Iterator[AnyEvent]: + for message in consume_all_events(self.messagebroker): yield message + + # ----- Context ---------------------------------------------------------------------------------------------------- @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) diff --git a/src/context/service/database/Events.py b/src/context/service/database/Events.py index decd7f005..398352e20 100644 --- a/src/context/service/database/Events.py +++ b/src/context/service/database/Events.py @@ -17,7 +17,7 @@ from typing import Dict, Iterator, Set from common.message_broker.Message import Message from common.message_broker.MessageBroker import MessageBroker from common.proto.context_pb2 import ( - ConnectionEvent, ContextEvent, DeviceEvent, EventTypeEnum, LinkEvent, + AnyEvent, ConnectionEvent, ContextEvent, DeviceEvent, EventTypeEnum, LinkEvent, ServiceEvent, SliceEvent, TopologyEvent, OpticalConfigEvent ) @@ -130,3 +130,30 @@ def consume_events( MSG = 'Unable to identify EventClass for Message({:s}). Ignoring...' LOGGER.warning(MSG.format(str(message))) continue + +def consume_all_events( + messagebroker : MessageBroker, consume_timeout : float = CONSUME_TIMEOUT +) -> Iterator[AnyEvent]: + topic_names = [EventTopicEnum.ALL.value] + for message in messagebroker.consume(topic_names, consume_timeout=consume_timeout): + data = json.loads(message.content) + if 'context_id' in data: + yield AnyEvent(context=data) + elif 'topology_id' in data: + yield AnyEvent(topology=data) + elif 'device_id' in data: + yield AnyEvent(device=data) + elif 'opticalconfig_id' in data: + yield AnyEvent(device=data) + elif 'link_id' in data: + yield AnyEvent(link=data) + elif 'service_id' in data: + yield AnyEvent(service=data) + elif 'slice_id' in data: + yield AnyEvent(slice=data) + elif 'connection_id' in data: + yield AnyEvent(connection=data) + else: + MSG = 'Unable to identify EventClass for Message({:s}). Ignoring...' + LOGGER.warning(MSG.format(str(message))) + continue diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 97a9c056f..fbbf53f98 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -14,7 +14,7 @@ import logging, queue, threading -from typing import Any, Optional, Set +from typing import Any, Callable, Optional, Set from common.DeviceTypes import DeviceTypeEnum from common.proto.context_pb2 import DeviceEvent, Empty, LinkEvent, TopologyEvent from common.tools.grpc.BaseEventCollector import BaseEventCollector @@ -28,7 +28,7 @@ from simap_connector.Config import ( from .simap_client.RestConfClient import RestConfClient from .simap_client.SimapClient import SimapClient from .ObjectCache import CachedEntities, ObjectCache -from .Tools import get_device_endpoint, get_link_endpoint +from .Tools import EventTypeEnum, get_device_endpoint, get_link_endpoint LOGGER = logging.getLogger(__name__) @@ -68,6 +68,28 @@ class EventDispatcher(BaseEventDispatcher): MSG = 'Unexpected Event: {:s}' LOGGER.warning(MSG.format(grpc_message_to_json_string(event))) + # overwrite default _get_dispatcher() method + def _get_dispatcher(self, event : Any) -> Optional[Callable]: + object_name = str(event.__class__.__name__).lower().replace('event', '') + if object_name == 'any': + object_name = event.WhichOneof('event') + event = getattr(event, object_name) + + event_type = EventTypeEnum.Name(event.event.event_type).lower().replace('eventtype_', '') + + method_name = 'dispatch_{:s}_{:s}'.format(object_name, event_type) + dispatcher = getattr(self, method_name, None) + if dispatcher is not None: return dispatcher + + method_name = 'dispatch_{:s}'.format(object_name) + dispatcher = getattr(self, method_name, None) + if dispatcher is not None: return dispatcher + + method_name = 'dispatch' + dispatcher = getattr(self, method_name, None) + if dispatcher is not None: return dispatcher + + return None def dispatch_topology_create(self, topology_event : TopologyEvent) -> None: MSG = 'Processing Topology Event: {:s}' @@ -506,14 +528,17 @@ class SimapUpdater: self._event_collector = BaseEventCollector(terminate=terminate) self._event_collector.install_collector( - self._context_client.GetTopologyEvents, Empty(), log_events_received=True - ) - self._event_collector.install_collector( - self._context_client.GetDeviceEvents, Empty(), log_events_received=True - ) - self._event_collector.install_collector( - self._context_client.GetLinkEvents, Empty(), log_events_received=True + self._context_client.GetAllEvents, Empty(), log_events_received=True ) + #self._event_collector.install_collector( + # self._context_client.GetTopologyEvents, Empty(), log_events_received=True + #) + #self._event_collector.install_collector( + # self._context_client.GetDeviceEvents, Empty(), log_events_received=True + #) + #self._event_collector.install_collector( + # self._context_client.GetLinkEvents, Empty(), log_events_received=True + #) self._event_dispatcher = EventDispatcher( self._event_collector.get_events_queue(), self._context_client, -- GitLab From 1fe46ccdcdf617a581a324191b143ea6a90d2f04 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 1 Sep 2025 11:13:43 +0000 Subject: [PATCH 059/367] Events: - Correct extraction of timestamp --- src/common/tools/grpc/BaseEventCollector.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/common/tools/grpc/BaseEventCollector.py b/src/common/tools/grpc/BaseEventCollector.py index 1cf370426..637fa5979 100644 --- a/src/common/tools/grpc/BaseEventCollector.py +++ b/src/common/tools/grpc/BaseEventCollector.py @@ -47,7 +47,13 @@ class CollectorThread(threading.Thread): if self._log_events_received: str_event = grpc_message_to_json_string(event) LOGGER.info('[_collect] event: {:s}'.format(str_event)) - timestamp = event.event.timestamp.timestamp + object_name = str(event.__class__.__name__).lower().replace('event', '') + if object_name == 'any': + object_name = event.WhichOneof('event') + _event = getattr(event, object_name) + timestamp = _event.event.timestamp.timestamp + else: + timestamp = event.event.timestamp.timestamp self._events_queue.put_nowait((timestamp, event)) except grpc.RpcError as e: if e.code() == grpc.StatusCode.UNAVAILABLE: # pylint: disable=no-member -- GitLab From 5d5db486d7cbd903da8dbd4da8b10c6d27a7eb33 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 1 Sep 2025 11:20:26 +0000 Subject: [PATCH 060/367] ECOC F5GA Telemetry Demo: - Updated TCP ports in topology descriptors --- src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json | 2 +- src/tests/ecoc25-f5ga-telemetry/data/topology-e2e.json | 2 +- src/tests/ecoc25-f5ga-telemetry/redeploy.sh | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json b/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json index 77f9aab53..3a067d4bf 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json @@ -20,7 +20,7 @@ "device_drivers": ["DEVICEDRIVER_IETF_ACTN"], "device_config": {"config_rules": [ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.0.9"}}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8444"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8082"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { "scheme": "https", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", "timeout": 120, "verify_certs": false, "import_topology": "topology" diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology-e2e.json b/src/tests/ecoc25-f5ga-telemetry/data/topology-e2e.json index d5d314989..e11ccd10d 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/topology-e2e.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology-e2e.json @@ -20,7 +20,7 @@ "device_drivers": ["DEVICEDRIVER_IETF_ACTN"], "device_config": {"config_rules": [ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.0.9"}}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8443" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8081" }}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { "scheme": "https", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", "timeout": 120, "verify": false diff --git a/src/tests/ecoc25-f5ga-telemetry/redeploy.sh b/src/tests/ecoc25-f5ga-telemetry/redeploy.sh index 811d1471c..4c5959a5d 100755 --- a/src/tests/ecoc25-f5ga-telemetry/redeploy.sh +++ b/src/tests/ecoc25-f5ga-telemetry/redeploy.sh @@ -42,8 +42,8 @@ case "$HOSTNAME" in echo "Deploying support services..." docker run --detach --name simap-server --publish 8080:8080 simap-server:mock - docker run --detach --name nce-fan-ctrl --publish 8443:8443 nce-fan-ctrl:mock - docker run --detach --name nce-t-ctrl --publish 8444:8443 nce-t-ctrl:mock + docker run --detach --name nce-fan-ctrl --publish 8080:8081 nce-fan-ctrl:mock + docker run --detach --name nce-t-ctrl --publish 8080:8082 nce-t-ctrl:mock sleep 2 docker ps -a -- GitLab From 4b6c72b0babe78dc0504f1e75b7613f6f1727ddd Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 1 Sep 2025 11:21:58 +0000 Subject: [PATCH 061/367] ECOC F5GA Telemetry Demo: - Added symbolic link for simplicity --- f5ga | 1 + 1 file changed, 1 insertion(+) create mode 120000 f5ga diff --git a/f5ga b/f5ga new file mode 120000 index 000000000..f38bc47e9 --- /dev/null +++ b/f5ga @@ -0,0 +1 @@ +src/tests/ecoc25-f5ga-telemetry/ \ No newline at end of file -- GitLab From 15c881a3770df01263a80d30deafaa7b27a50574 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 1 Sep 2025 11:27:39 +0000 Subject: [PATCH 062/367] ECOC F5GA Telemetry Demo: - Corrected TCP ports in redeploy script --- src/tests/ecoc25-f5ga-telemetry/redeploy.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/tests/ecoc25-f5ga-telemetry/redeploy.sh b/src/tests/ecoc25-f5ga-telemetry/redeploy.sh index 4c5959a5d..2a0dd0736 100755 --- a/src/tests/ecoc25-f5ga-telemetry/redeploy.sh +++ b/src/tests/ecoc25-f5ga-telemetry/redeploy.sh @@ -42,8 +42,8 @@ case "$HOSTNAME" in echo "Deploying support services..." docker run --detach --name simap-server --publish 8080:8080 simap-server:mock - docker run --detach --name nce-fan-ctrl --publish 8080:8081 nce-fan-ctrl:mock - docker run --detach --name nce-t-ctrl --publish 8080:8082 nce-t-ctrl:mock + docker run --detach --name nce-fan-ctrl --publish 8081:8080 nce-fan-ctrl:mock + docker run --detach --name nce-t-ctrl --publish 8082:8080 nce-t-ctrl:mock sleep 2 docker ps -a -- GitLab From 1fdeb6c6200796203b82a0a9427f60cc027221c0 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 1 Sep 2025 11:46:01 +0000 Subject: [PATCH 063/367] SIMAP Connector: - Fix SimapUpdater AnyEvent dispatching --- .../service/simap_updater/SimapUpdater.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index fbbf53f98..d27c88b6c 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -16,7 +16,7 @@ import logging, queue, threading from typing import Any, Callable, Optional, Set from common.DeviceTypes import DeviceTypeEnum -from common.proto.context_pb2 import DeviceEvent, Empty, LinkEvent, TopologyEvent +from common.proto.context_pb2 import DeviceEvent, Empty, EventTypeEnum, LinkEvent, TopologyEvent from common.tools.grpc.BaseEventCollector import BaseEventCollector from common.tools.grpc.BaseEventDispatcher import BaseEventDispatcher from common.tools.grpc.Tools import grpc_message_to_json_string @@ -28,7 +28,7 @@ from simap_connector.Config import ( from .simap_client.RestConfClient import RestConfClient from .simap_client.SimapClient import SimapClient from .ObjectCache import CachedEntities, ObjectCache -from .Tools import EventTypeEnum, get_device_endpoint, get_link_endpoint +from .Tools import get_device_endpoint, get_link_endpoint LOGGER = logging.getLogger(__name__) @@ -70,12 +70,19 @@ class EventDispatcher(BaseEventDispatcher): # overwrite default _get_dispatcher() method def _get_dispatcher(self, event : Any) -> Optional[Callable]: + LOGGER.info('event[1]={:s}'.format(grpc_message_to_json_string(event))) object_name = str(event.__class__.__name__).lower().replace('event', '') + LOGGER.info('object_name[1]={:s}'.format(str(object_name))) if object_name == 'any': object_name = event.WhichOneof('event') + LOGGER.info('object_name[2]={:s}'.format(str(object_name))) event = getattr(event, object_name) + LOGGER.info('event[2]={:s}'.format(grpc_message_to_json_string(event))) - event_type = EventTypeEnum.Name(event.event.event_type).lower().replace('eventtype_', '') + event_type = event.event.event_type + LOGGER.info('event_type[1]={:s}'.format(str(event_type))) + event_type = EventTypeEnum.Name(event_type).lower().replace('eventtype_', '') + LOGGER.info('event_type[2]={:s}'.format(str(event_type))) method_name = 'dispatch_{:s}_{:s}'.format(object_name, event_type) dispatcher = getattr(self, method_name, None) -- GitLab From 1498f2b1642f7ac8ab2ad588e3ec84e6077ff852 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 1 Sep 2025 13:30:44 +0000 Subject: [PATCH 064/367] Events: - Correct dispatcher and runner of BaseEventDispatcher - Adapt Simap Updater --- src/common/tools/grpc/BaseEventDispatcher.py | 14 ++++++++- .../service/simap_updater/SimapUpdater.py | 29 ------------------- 2 files changed, 13 insertions(+), 30 deletions(-) diff --git a/src/common/tools/grpc/BaseEventDispatcher.py b/src/common/tools/grpc/BaseEventDispatcher.py index e654b4c73..dc4d7a0a8 100644 --- a/src/common/tools/grpc/BaseEventDispatcher.py +++ b/src/common/tools/grpc/BaseEventDispatcher.py @@ -44,7 +44,9 @@ class BaseEventDispatcher(threading.Thread): def _get_dispatcher(self, event : Any) -> Optional[Callable]: object_name = str(event.__class__.__name__).lower().replace('event', '') - event_type = EventTypeEnum.Name(event.event.event_type).lower().replace('eventtype_', '') + event_type = event.event.event_type + event_type = EventTypeEnum.Name(event_type) + event_type = event_type.lower().replace('eventtype_', '') method_name = 'dispatch_{:s}_{:s}'.format(object_name, event_type) dispatcher = getattr(self, method_name, None) @@ -65,6 +67,16 @@ class BaseEventDispatcher(threading.Thread): event = self._get_event() if event is None: continue + object_name = str(event.__class__.__name__) + object_name = object_name.lower().replace('event', '') + if object_name == 'any': + field_name = event.WhichOneof('event') + event = getattr(event, field_name) + + event_type = event.event.event_type + event_type = EventTypeEnum.Name(event_type) + event_type = event_type.lower().replace('eventtype_', '') + dispatcher = self._get_dispatcher(event) if dispatcher is None: MSG = 'No dispatcher available for Event({:s})' diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index d27c88b6c..c44702e13 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -68,35 +68,6 @@ class EventDispatcher(BaseEventDispatcher): MSG = 'Unexpected Event: {:s}' LOGGER.warning(MSG.format(grpc_message_to_json_string(event))) - # overwrite default _get_dispatcher() method - def _get_dispatcher(self, event : Any) -> Optional[Callable]: - LOGGER.info('event[1]={:s}'.format(grpc_message_to_json_string(event))) - object_name = str(event.__class__.__name__).lower().replace('event', '') - LOGGER.info('object_name[1]={:s}'.format(str(object_name))) - if object_name == 'any': - object_name = event.WhichOneof('event') - LOGGER.info('object_name[2]={:s}'.format(str(object_name))) - event = getattr(event, object_name) - LOGGER.info('event[2]={:s}'.format(grpc_message_to_json_string(event))) - - event_type = event.event.event_type - LOGGER.info('event_type[1]={:s}'.format(str(event_type))) - event_type = EventTypeEnum.Name(event_type).lower().replace('eventtype_', '') - LOGGER.info('event_type[2]={:s}'.format(str(event_type))) - - method_name = 'dispatch_{:s}_{:s}'.format(object_name, event_type) - dispatcher = getattr(self, method_name, None) - if dispatcher is not None: return dispatcher - - method_name = 'dispatch_{:s}'.format(object_name) - dispatcher = getattr(self, method_name, None) - if dispatcher is not None: return dispatcher - - method_name = 'dispatch' - dispatcher = getattr(self, method_name, None) - if dispatcher is not None: return dispatcher - - return None def dispatch_topology_create(self, topology_event : TopologyEvent) -> None: MSG = 'Processing Topology Event: {:s}' -- GitLab From 3b53983b78c9daa3bbe476e3cc881020ae7d7429 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 1 Sep 2025 13:33:06 +0000 Subject: [PATCH 065/367] ECOC F5GA Telemetry Demo: - Corrected scheme in topology descriptors --- src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json | 2 +- src/tests/ecoc25-f5ga-telemetry/data/topology-e2e.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json b/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json index 3a067d4bf..7a961d3e7 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json @@ -22,7 +22,7 @@ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.0.9"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8082"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { - "scheme": "https", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", + "scheme": "http", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", "timeout": 120, "verify_certs": false, "import_topology": "topology" }}} ]}}, diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology-e2e.json b/src/tests/ecoc25-f5ga-telemetry/data/topology-e2e.json index e11ccd10d..81d107031 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/topology-e2e.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology-e2e.json @@ -22,7 +22,7 @@ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.0.9"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8081" }}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { - "scheme": "https", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", + "scheme": "http", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", "timeout": 120, "verify": false }}} ]}} -- GitLab From 74384a73804a69f94fea36fb28b7de329483af81 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 1 Sep 2025 17:44:00 +0000 Subject: [PATCH 066/367] Tests - Tools - Mock NCE-T Controller - Corrected xPath=>restconf paths --- .../mock_nce_t_ctrl/nce_t_ctrl/YangHandler.py | 123 +++++++++++++++--- .../tools/mock_nce_t_ctrl/nce_t_ctrl/app.py | 4 +- 2 files changed, 109 insertions(+), 18 deletions(-) diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/YangHandler.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/YangHandler.py index 76e5ae6c2..dae7b6d91 100644 --- a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/YangHandler.py +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/YangHandler.py @@ -14,23 +14,22 @@ import json, libyang, logging -from typing import Dict, List, Optional, Type +from typing import Dict, List, Optional, Set LOGGER = logging.getLogger(__name__) -def walk_schema(node : libyang.SNode, path : str = '') -> Dict[str, Type]: - schema_paths : Dict[str, Type] = dict() +def walk_schema(node : libyang.SNode, path : str = '') -> Set[str]: current_path = f'{path}/{node.name()}' - schema_paths[current_path] = type(node) + schema_paths : Set[str] = {current_path} for child in node.children(): if isinstance(child, (libyang.SLeaf, libyang.SLeafList)): continue schema_paths.update(walk_schema(child, current_path)) return schema_paths -def extract_schema_paths(yang_module : libyang.Module) -> Dict[str, Type]: - schema_paths : Dict[str, Type] = dict() +def extract_schema_paths(yang_module : libyang.Module) -> Set[str]: + schema_paths : Set[str] = set() for node in yang_module.children(): schema_paths.update(walk_schema(node)) return schema_paths @@ -41,14 +40,14 @@ class YangHandler: yang_startup_data : Dict ) -> None: self._yang_context = libyang.Context(yang_search_path) - self._loaded_modules = set() - self._yang_module_paths : Dict[str, Type] = dict() + self._loaded_modules : Set[str] = set() + self._schema_paths : Set[str] = set() for yang_module_name in yang_module_names: LOGGER.info('Loading module: {:s}'.format(str(yang_module_name))) yang_module = self._yang_context.load_module(yang_module_name) yang_module.feature_enable_all() self._loaded_modules.add(yang_module_name) - self._yang_module_paths.update(extract_schema_paths(yang_module)) + self._schema_paths.update(extract_schema_paths(yang_module)) self._datastore = self._yang_context.parse_data_mem( json.dumps(yang_startup_data), fmt='json' @@ -57,11 +56,11 @@ class YangHandler: def destroy(self) -> None: self._yang_context.destroy() - def get_module_paths(self) -> Dict[str, Type]: - return self._yang_module_paths + def get_schema_paths(self) -> Set[str]: + return self._schema_paths def get(self, path : str) -> Optional[str]: - if not path.startswith('/'): path = '/' + path + path = self._normalize_path(path) data = self._datastore.find_path(path) if data is None: return None json_data = data.print_mem( @@ -71,7 +70,7 @@ class YangHandler: return json_data def get_xpath(self, xpath : str) -> List[str]: - if not path.startswith('/'): path = '/' + path + if not xpath.startswith('/'): xpath = '/' + xpath nodes = self._datastore.find_all(xpath) result = list() for node in nodes: @@ -82,7 +81,7 @@ class YangHandler: return result def create(self, path : str, payload : Dict) -> str: - if not path.startswith('/'): path = '/' + path + path = self._normalize_path(path) # TODO: client should not provide identifier of element to be created, add it to subpath dnode_parsed : Optional[libyang.DNode] = self._yang_context.parse_data_mem( json.dumps(payload), 'json', strict=True, parse_only=False, @@ -103,7 +102,7 @@ class YangHandler: return json_data def update(self, path : str, payload : Dict) -> str: - if not path.startswith('/'): path = '/' + path + path = self._normalize_path(path) # NOTE: client should provide identifier of element to be updated dnode_parsed : Optional[libyang.DNode] = self._yang_context.parse_data_mem( json.dumps(payload), 'json', strict=True, parse_only=False, @@ -124,7 +123,7 @@ class YangHandler: return json_data def delete(self, path : str) -> Optional[str]: - if not path.startswith('/'): path = '/' + path + path = self._normalize_path(path) # NOTE: client should provide identifier of element to be deleted @@ -142,3 +141,95 @@ class YangHandler: node.free() return json_data + + def _normalize_path(self, path : str) -> str: + """ + Normalize RESTCONF path segments using the standard `list=` + syntax into the libyang bracketed predicate form expected by + the datastore (e.g. `network="admin"` -> `network[network-id="admin"]`). + + This implementation looks up the schema node for the list and + uses its key leaf names to build the proper predicates. If the + schema information is unavailable, it falls back to using the + list name as the key name. + """ + + parts = [p for p in path.strip('/').split('/') if p != ''] + schema_path = '' + out_parts: List[str] = [] + + for part in parts: + if '=' in part: + # split into name and value (value may contain commas/quotes) + name, val = part.split('=', 1) + # keep original name (may include prefix) for output, but + # use local name (without module prefix) to lookup schema + local_name = name.split(':', 1)[1] if ':' in name else name + schema_path = schema_path + '/' + local_name if schema_path else '/' + local_name + LOGGER.info('[_normalize_path] schema_path={:s}'.format(str(schema_path))) + schema_nodes = list(self._yang_context.find_path(schema_path)) + if len(schema_nodes) != 1: + MSG = 'No/Multiple SchemaNodes({:s}) for SchemaPath({:s})' + raise Exception(MSG.format( + str([repr(sn) for sn in schema_nodes]), schema_path + )) + schema_node = schema_nodes[0] + LOGGER.info('[_normalize_path] schema_node={:s}'.format(str(repr(schema_node)))) + + # parse values splitting on commas outside quotes + values = [] + cur = '' + in_quotes = False + for ch in val: + if ch == '"': + in_quotes = not in_quotes + cur += ch + elif ch == ',' and not in_quotes: + values.append(cur) + cur = '' + else: + cur += ch + if cur != '': + values.append(cur) + + # determine key names from schema_node if possible + key_names = None + if isinstance(schema_node, libyang.SList): + key_names = [k.name() for k in schema_node.keys()] + LOGGER.info('[_normalize_path] [SList] key_names={:s}'.format(str(key_names))) + #if isinstance(keys, (list, tuple)): + # key_names = keys + # LOGGER.info('[_normalize_path] key_names={:s}'.format(str(key_names))) + #elif isinstance(keys, str): + # key_names = [kn for kn in k.split() if kn] + # LOGGER.info('[_normalize_path] 1 key_names={:s}'.format(str(key_names))) + #else: + # MSG = 'Unsupported keys format: {:s} / {:s}' + # raise Exception(MSG.format(str(type(keys)), str(keys))) + #elif hasattr(schema_node, 'key'): + # LOGGER.info('[_normalize_path] has key') + # k = schema_node.key() + # LOGGER.info('[_normalize_path] k={:s}'.format(str(k))) + # if isinstance(k, str): + # key_names = [kn for kn in k.split() if kn] + # LOGGER.info('[_normalize_path] 3 key_names={:s}'.format(str(key_names))) + + if not key_names: + # fallback: use the local list name as the single key + key_names = [local_name] + + LOGGER.info('[_normalize_path] 5 key_names={:s}'.format(str(key_names))) + + # build predicate(s) + preds = [] + for idx, kn in enumerate(key_names): + kv = values[idx] if idx < len(values) else values[0] + preds.append(f'[{kn}="{kv}"]') + + out_parts.append(name + ''.join(preds)) + else: + local_part = part.split(':', 1)[1] if ':' in part else part + schema_path = schema_path + '/' + local_part if schema_path else '/' + local_part + out_parts.append(part) + + return '/' + '/'.join(out_parts) diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py index 48ac8061e..d839d2179 100644 --- a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py @@ -46,7 +46,7 @@ with open(STARTUP_FILE, mode='r', encoding='UTF-8') as fp: yang_handler = YangHandler( YANG_SEARCH_PATH, YANG_MODULE_NAMES, YANG_STARTUP_DATA ) -restconf_paths = yang_handler.get_module_paths() +restconf_paths = yang_handler.get_schema_paths() app = Flask(__name__) app.config['SECRET_KEY'] = SECRET_KEY @@ -66,5 +66,5 @@ api.add_resource( ) LOGGER.info('Available RESTCONF paths:') -for restconf_path in restconf_paths: +for restconf_path in sorted(restconf_paths): LOGGER.info('- {:s}'.format(str(restconf_path))) -- GitLab From 46b973bd5cdc1a84b984f7471cd7cafb2f2371f4 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 2 Sep 2025 09:48:05 +0000 Subject: [PATCH 067/367] Tests - Tools - Mock NCE-T Controller - Corrected xPath=>restconf paths --- .../mock_nce_t_ctrl/nce_t_ctrl/YangHandler.py | 26 +++++-------------- 1 file changed, 7 insertions(+), 19 deletions(-) diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/YangHandler.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/YangHandler.py index dae7b6d91..eb92e012d 100644 --- a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/YangHandler.py +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/YangHandler.py @@ -64,19 +64,19 @@ class YangHandler: data = self._datastore.find_path(path) if data is None: return None json_data = data.print_mem( - fmt='json', with_siblings=True, pretty=True, - keep_empty_containers=True, include_implicit_defaults=True + fmt='json', with_siblings=False, pretty=True, + keep_empty_containers=False, include_implicit_defaults=True ) return json_data def get_xpath(self, xpath : str) -> List[str]: if not xpath.startswith('/'): xpath = '/' + xpath - nodes = self._datastore.find_all(xpath) + items = self._datastore.find_all(xpath) result = list() - for node in nodes: - result.append(node.print_mem( - fmt='json', with_siblings=True, pretty=True, - keep_empty_containers=True, include_implicit_defaults=True + for item in items: + result.append(item.print_mem( + fmt='json', with_siblings=False, pretty=True, + keep_empty_containers=False, include_implicit_defaults=True )) return result @@ -88,7 +88,6 @@ class YangHandler: validate_present=True, validate_multi_error=True ) if dnode_parsed is None: raise Exception('Unable to parse Data({:s})'.format(str(payload))) - #LOGGER.info('parsed = {:s}'.format(json.dumps(dnode.print_dict()))) dnode : Optional[libyang.DNode] = self._yang_context.create_data_path( path, parent=self._datastore, value=dnode_parsed, update=False @@ -109,7 +108,6 @@ class YangHandler: validate_present=True, validate_multi_error=True ) if dnode_parsed is None: raise Exception('Unable to parse Data({:s})'.format(str(payload))) - #LOGGER.info('parsed = {:s}'.format(json.dumps(dnode.print_dict()))) dnode = self._yang_context.create_data_path( path, parent=self._datastore, value=dnode_parsed, update=True @@ -166,7 +164,6 @@ class YangHandler: # use local name (without module prefix) to lookup schema local_name = name.split(':', 1)[1] if ':' in name else name schema_path = schema_path + '/' + local_name if schema_path else '/' + local_name - LOGGER.info('[_normalize_path] schema_path={:s}'.format(str(schema_path))) schema_nodes = list(self._yang_context.find_path(schema_path)) if len(schema_nodes) != 1: MSG = 'No/Multiple SchemaNodes({:s}) for SchemaPath({:s})' @@ -174,7 +171,6 @@ class YangHandler: str([repr(sn) for sn in schema_nodes]), schema_path )) schema_node = schema_nodes[0] - LOGGER.info('[_normalize_path] schema_node={:s}'.format(str(repr(schema_node)))) # parse values splitting on commas outside quotes values = [] @@ -196,29 +192,21 @@ class YangHandler: key_names = None if isinstance(schema_node, libyang.SList): key_names = [k.name() for k in schema_node.keys()] - LOGGER.info('[_normalize_path] [SList] key_names={:s}'.format(str(key_names))) #if isinstance(keys, (list, tuple)): # key_names = keys - # LOGGER.info('[_normalize_path] key_names={:s}'.format(str(key_names))) #elif isinstance(keys, str): # key_names = [kn for kn in k.split() if kn] - # LOGGER.info('[_normalize_path] 1 key_names={:s}'.format(str(key_names))) #else: # MSG = 'Unsupported keys format: {:s} / {:s}' # raise Exception(MSG.format(str(type(keys)), str(keys))) #elif hasattr(schema_node, 'key'): - # LOGGER.info('[_normalize_path] has key') # k = schema_node.key() - # LOGGER.info('[_normalize_path] k={:s}'.format(str(k))) # if isinstance(k, str): # key_names = [kn for kn in k.split() if kn] - # LOGGER.info('[_normalize_path] 3 key_names={:s}'.format(str(key_names))) if not key_names: # fallback: use the local list name as the single key key_names = [local_name] - - LOGGER.info('[_normalize_path] 5 key_names={:s}'.format(str(key_names))) # build predicate(s) preds = [] -- GitLab From cee2767c2ed6c2ef16e645d2fe064d03c7207c56 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 2 Sep 2025 10:49:10 +0000 Subject: [PATCH 068/367] SIMAP Connector - SimapUpdater: - Skip context events - Polish code --- .../service/simap_updater/SimapUpdater.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index c44702e13..56413cdc1 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -14,9 +14,9 @@ import logging, queue, threading -from typing import Any, Callable, Optional, Set +from typing import Any, Optional, Set from common.DeviceTypes import DeviceTypeEnum -from common.proto.context_pb2 import DeviceEvent, Empty, EventTypeEnum, LinkEvent, TopologyEvent +from common.proto.context_pb2 import ContextEvent, DeviceEvent, Empty, LinkEvent, TopologyEvent from common.tools.grpc.BaseEventCollector import BaseEventCollector from common.tools.grpc.BaseEventDispatcher import BaseEventDispatcher from common.tools.grpc.Tools import grpc_message_to_json_string @@ -69,6 +69,11 @@ class EventDispatcher(BaseEventDispatcher): LOGGER.warning(MSG.format(grpc_message_to_json_string(event))) + def dispatch_context(self, context_event : ContextEvent) -> None: + MSG = 'Skipping Context Event: {:s}' + LOGGER.debug(MSG.format(grpc_message_to_json_string(context_event))) + + def dispatch_topology_create(self, topology_event : TopologyEvent) -> None: MSG = 'Processing Topology Event: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(topology_event))) -- GitLab From 625cd8093f4128290c0fecf8dbaa6c1fd03ae4e4 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 2 Sep 2025 11:12:57 +0000 Subject: [PATCH 069/367] ECOC F5GA Telemetry Demo: - Updated structure of data files --- ...st_connection_group_to_network_slice1.json | 62 ++++++ ...post_match_criteria_to_sdp1_in_slice1.json | 40 ++++ .../data/slice1/post_network_slice1.json | 188 +++++++++++++++++ .../slice1/post_sdp_to_network_slice1.json | 61 ++++++ .../data/slice1_post_ietf_network_slice.json | 190 ------------------ .../data/slice1_put_ietf_network_slice.json | 58 ------ .../data/{ => topology}/topology-agg.json | 0 .../data/{ => topology}/topology-e2e.json | 0 .../data/{ => topology}/topology-ip.json | 0 9 files changed, 351 insertions(+), 248 deletions(-) create mode 100644 src/tests/ecoc25-f5ga-telemetry/data/slice1/post_connection_group_to_network_slice1.json create mode 100644 src/tests/ecoc25-f5ga-telemetry/data/slice1/post_match_criteria_to_sdp1_in_slice1.json create mode 100644 src/tests/ecoc25-f5ga-telemetry/data/slice1/post_network_slice1.json create mode 100644 src/tests/ecoc25-f5ga-telemetry/data/slice1/post_sdp_to_network_slice1.json delete mode 100644 src/tests/ecoc25-f5ga-telemetry/data/slice1_post_ietf_network_slice.json delete mode 100644 src/tests/ecoc25-f5ga-telemetry/data/slice1_put_ietf_network_slice.json rename src/tests/ecoc25-f5ga-telemetry/data/{ => topology}/topology-agg.json (100%) rename src/tests/ecoc25-f5ga-telemetry/data/{ => topology}/topology-e2e.json (100%) rename src/tests/ecoc25-f5ga-telemetry/data/{ => topology}/topology-ip.json (100%) diff --git a/src/tests/ecoc25-f5ga-telemetry/data/slice1/post_connection_group_to_network_slice1.json b/src/tests/ecoc25-f5ga-telemetry/data/slice1/post_connection_group_to_network_slice1.json new file mode 100644 index 000000000..d39a837bd --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/data/slice1/post_connection_group_to_network_slice1.json @@ -0,0 +1,62 @@ +{ + "connection-group": [ + { + "id": "line2", + "connectivity-type": "point-to-point", + "connectivity-construct": [ + { + "id": 1, + "p2p-sender-sdp": "1", + "p2p-receiver-sdp": "3", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "10" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "5000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.001" + } + ] + } + } + }, + { + "id": 2, + "p2p-sender-sdp": "3", + "p2p-receiver-sdp": "1", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "20" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "1000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.001" + } + ] + } + } + } + ] + } + ] +} \ No newline at end of file diff --git a/src/tests/ecoc25-f5ga-telemetry/data/slice1/post_match_criteria_to_sdp1_in_slice1.json b/src/tests/ecoc25-f5ga-telemetry/data/slice1/post_match_criteria_to_sdp1_in_slice1.json new file mode 100644 index 000000000..16a36d45b --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/data/slice1/post_match_criteria_to_sdp1_in_slice1.json @@ -0,0 +1,40 @@ +{ + "match-criterion": [ + { + "index": 2, + "match-type": [ + { + "type": "ietf-network-slice-service:vlan", + "value": [ + "101" + ] + }, + { + "type": "ietf-network-slice-service:source-ip-prefix", + "value": [ + "172.1.101.22/24" + ] + }, + { + "type": "ietf-network-slice-service:source-tcp-port", + "value": [ + "10200" + ] + }, + { + "type": "ietf-network-slice-service:destination-ip-prefix", + "value": [ + "172.16.104.222/24" + ] + }, + { + "type": "ietf-network-slice-service:destination-tcp-port", + "value": [ + "10500" + ] + } + ], + "target-connection-group-id": "line2" + } + ] +} \ No newline at end of file diff --git a/src/tests/ecoc25-f5ga-telemetry/data/slice1/post_network_slice1.json b/src/tests/ecoc25-f5ga-telemetry/data/slice1/post_network_slice1.json new file mode 100644 index 000000000..e6e0ee90a --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/data/slice1/post_network_slice1.json @@ -0,0 +1,188 @@ +{ + "slice-service": [ + { + "id": "slice1", + "description": "network slice 1, connect to VM1", + "sdps": { + "sdp": [ + { + "id": "1", + "node-id": "172.16.204.220", + "sdp-ip-address": [ + "172.16.204.220" + ], + "service-match-criteria": { + "match-criterion": [ + { + "index": 1, + "match-type": [ + { + "type": "ietf-network-slice-service:vlan", + "value": [ + "101" + ] + }, + { + "type": "ietf-network-slice-service:destination-ip-prefix", + "value": [ + "172.16.104.221/24" + ] + }, + { + "type": "ietf-network-slice-service:destination-tcp-port", + "value": [ + "10500" + ] + }, + { + "type": "ietf-network-slice-service:source-ip-prefix", + "value": [ + "172.1.101.22/24" + ] + }, + { + "type": "ietf-network-slice-service:source-tcp-port", + "value": [ + "10200" + ] + } + ], + "target-connection-group-id": "line1" + } + ] + }, + "attachment-circuits": { + "attachment-circuit": [ + { + "id": "AC POP to VM1", + "description": "AC VM1 connected to POP", + "ac-node-id": "172.16.204.220", + "ac-tp-id": "200" + } + ] + } + }, + { + "id": "2", + "node-id": "172.16.61.10", + "sdp-ip-address": [ + "172.16.61.10" + ], + "service-match-criteria": { + "match-criterion": [ + { + "index": 1, + "match-type": [ + { + "type": "ietf-network-slice-service:vlan", + "value": [ + "21" + ] + }, + { + "type": "ietf-network-slice-service:source-ip-prefix", + "value": [ + "172.16.104.221/24" + ] + }, + { + "type": "ietf-network-slice-service:source-tcp-port", + "value": [ + "10500" + ] + }, + { + "type": "ietf-network-slice-service:destination-ip-prefix", + "value": [ + "172.1.101.22/24" + ] + }, + { + "type": "ietf-network-slice-service:destination-tcp-port", + "value": [ + "10200" + ] + } + ], + "target-connection-group-id": "line1" + } + ] + }, + "attachment-circuits": { + "attachment-circuit": [ + { + "id": "AC ONT", + "description": "AC connected to PC1", + "ac-node-id": "172.16.61.10", + "ac-tp-id": "200" + } + ] + } + } + ] + }, + "connection-groups": { + "connection-group": [ + { + "id": "line1", + "connectivity-type": "point-to-point", + "connectivity-construct": [ + { + "id": 1, + "p2p-sender-sdp": "1", + "p2p-receiver-sdp": "2", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "10" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "5000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.001" + } + ] + } + } + }, + { + "id": 2, + "p2p-sender-sdp": "2", + "p2p-receiver-sdp": "1", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "20" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "1000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.001" + } + ] + } + } + } + ] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/src/tests/ecoc25-f5ga-telemetry/data/slice1/post_sdp_to_network_slice1.json b/src/tests/ecoc25-f5ga-telemetry/data/slice1/post_sdp_to_network_slice1.json new file mode 100644 index 000000000..bd3895fc4 --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/data/slice1/post_sdp_to_network_slice1.json @@ -0,0 +1,61 @@ +{ + "sdp": [ + { + "id": "3", + "node-id": "172.16.61.11", + "sdp-ip-address": [ + "172.16.61.11" + ], + "service-match-criteria": { + "match-criterion": [ + { + "index": 1, + "match-type": [ + { + "type": "ietf-network-slice-service:vlan", + "value": [ + "21" + ] + }, + { + "type": "ietf-network-slice-service:source-ip-prefix", + "value": [ + "172.16.104.222/24" + ] + }, + { + "type": "ietf-network-slice-service:source-tcp-port", + "value": [ + "10500" + ] + }, + { + "type": "ietf-network-slice-service:destination-ip-prefix", + "value": [ + "172.1.101.22/24" + ] + }, + { + "type": "ietf-network-slice-service:destination-tcp-port", + "value": [ + "10200" + ] + } + ], + "target-connection-group-id": "line2" + } + ] + }, + "attachment-circuits": { + "attachment-circuit": [ + { + "id": "AC ONT", + "description": "AC connected to PC2", + "ac-node-id": "172.16.61.11", + "ac-tp-id": "200" + } + ] + } + } + ] +} \ No newline at end of file diff --git a/src/tests/ecoc25-f5ga-telemetry/data/slice1_post_ietf_network_slice.json b/src/tests/ecoc25-f5ga-telemetry/data/slice1_post_ietf_network_slice.json deleted file mode 100644 index ac1f09dd8..000000000 --- a/src/tests/ecoc25-f5ga-telemetry/data/slice1_post_ietf_network_slice.json +++ /dev/null @@ -1,190 +0,0 @@ -{ - "network-slice-services": { - "slice-service": [ - { - "connection-groups": { - "connection-group": [ - { - "connectivity-construct": [ - { - "id": 1, - "p2p-receiver-sdp": "2", - "p2p-sender-sdp": "1", - "service-slo-sle-policy": { - "slo-policy": { - "metric-bound": [ - { - "bound": 10, - "metric-type": "ietf-network-slice-service:one-way-delay-maximum", - "metric-unit": "milliseconds" - }, - { - "bound": 5000, - "metric-type": "ietf-network-slice-service:one-way-bandwidth", - "metric-unit": "Mbps" - }, - { - "metric-type": "ietf-network-slice-service:two-way-packet-loss", - "metric-unit": "percentage", - "percentile-value": 0.001 - } - ] - } - } - }, - { - "id": 2, - "p2p-receiver-sdp": "1", - "p2p-sender-sdp": "2", - "service-slo-sle-policy": { - "slo-policy": { - "metric-bound": [ - { - "bound": 20, - "metric-type": "ietf-network-slice-service:one-way-delay-maximum", - "metric-unit": "milliseconds" - }, - { - "bound": 1000, - "metric-type": "ietf-network-slice-service:one-way-bandwidth", - "metric-unit": "Mbps" - }, - { - "metric-type": "ietf-network-slice-service:two-way-packet-loss", - "metric-unit": "percentage", - "percentile-value": 0.001 - } - ] - } - } - } - ], - "connectivity-type": "point-to-point", - "id": "line1" - } - ] - }, - "description": "dsc", - "id": "slice1", - "sdps": { - "sdp": [ - { - "attachment-circuits": { - "attachment-circuit": [ - { - "ac-node-id": "172.16.185.32", - "ac-tp-id": "200", - "description": "dsc", - "id": "0" - } - ] - }, - "id": "1", - "node-id": "172.16.185.32", - "sdp-ip-address": [ - "172.16.185.32" - ], - "service-match-criteria": { - "match-criterion": [ - { - "index": 1, - "match-type": [ - { - "type": "ietf-network-slice-service:vlan", - "value": [ - "101" - ] - }, - { - "type": "ietf-network-slice-service:source-ip-prefix", - "value": [ - "172.1.101.22/24" - ] - }, - { - "type": "ietf-network-slice-service:source-tcp-port", - "value": [ - "10200" - ] - }, - { - "type": "ietf-network-slice-service:destination-ip-prefix", - "value": [ - "172.16.104.221/24" - ] - }, - { - "type": "ietf-network-slice-service:destination-tcp-port", - "value": [ - "10500" - ] - } - ], - "target-connection-group-id": "line1" - } - ] - } - }, - { - "attachment-circuits": { - "attachment-circuit": [ - { - "ac-node-id": "172.16.182.25", - "ac-tp-id": "200", - "description": "dsc", - "id": "0" - } - ] - }, - "id": "2", - "node-id": "172.16.182.25", - "sdp-ip-address": [ - "172.16.182.25" - ], - "service-match-criteria": { - "match-criterion": [ - { - "index": 1, - "match-type": [ - { - "type": "ietf-network-slice-service:vlan", - "value": [ - "21" - ] - }, - { - "type": "ietf-network-slice-service:source-ip-prefix", - "value": [ - "172.16.104.221/24" - ] - }, - { - "type": "ietf-network-slice-service:source-tcp-port", - "value": [ - "10500" - ] - }, - { - "type": "ietf-network-slice-service:destination-ip-prefix", - "value": [ - "172.1.101.22/24" - ] - }, - { - "type": "ietf-network-slice-service:destination-tcp-port", - "value": [ - "10200" - ] - } - ], - "target-connection-group-id": "line1" - } - ] - } - } - ] - } - } - ] - } -} diff --git a/src/tests/ecoc25-f5ga-telemetry/data/slice1_put_ietf_network_slice.json b/src/tests/ecoc25-f5ga-telemetry/data/slice1_put_ietf_network_slice.json deleted file mode 100644 index 690a84d91..000000000 --- a/src/tests/ecoc25-f5ga-telemetry/data/slice1_put_ietf_network_slice.json +++ /dev/null @@ -1,58 +0,0 @@ -{ - "connectivity-construct": [ - { - "id": 1, - "p2p-receiver-sdp": "2", - "p2p-sender-sdp": "1", - "service-slo-sle-policy": { - "slo-policy": { - "metric-bound": [ - { - "bound": 10, - "metric-type": "ietf-network-slice-service:one-way-delay-maximum", - "metric-unit": "milliseconds" - }, - { - "bound": 5000, - "metric-type": "ietf-network-slice-service:one-way-bandwidth", - "metric-unit": "Mbps" - }, - { - "metric-type": "ietf-network-slice-service:two-way-packet-loss", - "metric-unit": "percentage", - "percentile-value": 0.001 - } - ] - } - } - }, - { - "id": 2, - "p2p-receiver-sdp": "1", - "p2p-sender-sdp": "2", - "service-slo-sle-policy": { - "slo-policy": { - "metric-bound": [ - { - "bound": 20, - "metric-type": "ietf-network-slice-service:one-way-delay-maximum", - "metric-unit": "milliseconds" - }, - { - "bound": 1000, - "metric-type": "ietf-network-slice-service:one-way-bandwidth", - "metric-unit": "Mbps" - }, - { - "metric-type": "ietf-network-slice-service:two-way-packet-loss", - "metric-unit": "percentage", - "percentile-value": 0.001 - } - ] - } - } - } - ], - "connectivity-type": "point-to-point", - "id": "line1" - } diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json similarity index 100% rename from src/tests/ecoc25-f5ga-telemetry/data/topology-agg.json rename to src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology-e2e.json b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-e2e.json similarity index 100% rename from src/tests/ecoc25-f5ga-telemetry/data/topology-e2e.json rename to src/tests/ecoc25-f5ga-telemetry/data/topology/topology-e2e.json diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology-ip.json b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-ip.json similarity index 100% rename from src/tests/ecoc25-f5ga-telemetry/data/topology-ip.json rename to src/tests/ecoc25-f5ga-telemetry/data/topology/topology-ip.json -- GitLab From 1f001fe2a21c8a6334e4327c43a986e65fe40b86 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 2 Sep 2025 11:17:10 +0000 Subject: [PATCH 070/367] Tests - Tools - Mock NCE-T Controller - Properly parse scaped URLs --- src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/YangHandler.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/YangHandler.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/YangHandler.py index eb92e012d..9df57528f 100644 --- a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/YangHandler.py +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/YangHandler.py @@ -14,6 +14,7 @@ import json, libyang, logging +import urllib.parse from typing import Dict, List, Optional, Set @@ -152,7 +153,9 @@ class YangHandler: list name as the key name. """ - parts = [p for p in path.strip('/').split('/') if p != ''] + # URL-decode each path segment so escaped characters like `%22` + # (double quotes) are properly handled when parsing list keys. + parts = [urllib.parse.unquote(p) for p in path.strip('/').split('/') if p != ''] schema_path = '' out_parts: List[str] = [] -- GitLab From ae0c6473d71b712f9f09f6f2a99404d659a8a7d3 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 2 Sep 2025 11:49:34 +0000 Subject: [PATCH 071/367] Device component - ACTN Driver: - Implemented link extraction - Corrected retrieval URL - Code polish --- .../handlers/NetworkTopologyHandler.py | 107 ++++++------------ 1 file changed, 32 insertions(+), 75 deletions(-) diff --git a/src/device/service/drivers/ietf_actn/handlers/NetworkTopologyHandler.py b/src/device/service/drivers/ietf_actn/handlers/NetworkTopologyHandler.py index 7c569d8bd..7df7f00b6 100644 --- a/src/device/service/drivers/ietf_actn/handlers/NetworkTopologyHandler.py +++ b/src/device/service/drivers/ietf_actn/handlers/NetworkTopologyHandler.py @@ -12,59 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging, requests -from typing import Dict, List, Optional, Tuple, Union +import logging +from typing import Dict, List, Optional from common.Constants import DEFAULT_TOPOLOGY_NAME from common.DeviceTypes import DeviceTypeEnum from common.proto.context_pb2 import ( DEVICEDRIVER_UNDEFINED, DEVICEOPERATIONALSTATUS_DISABLED, - DEVICEOPERATIONALSTATUS_ENABLED, DeviceOperationalStatusEnum + DEVICEOPERATIONALSTATUS_ENABLED ) -from common.tools.client.RestApiClient import RestApiClient from common.tools.client.RestConfClient import RestConfClient from device.service.driver_api.ImportTopologyEnum import ( ImportTopologyEnum, get_import_topology ) -from .RestApiClient import ( - HTTP_STATUS_CREATED, HTTP_STATUS_NO_CONTENT, HTTP_STATUS_OK, - RestApiClient -) - - -GET_CONTEXT_IDS_URL = '/tfs-api/context_ids' -GET_DEVICES_URL = '/tfs-api/devices' -GET_LINKS_URL = '/tfs-api/links' -L3VPN_URL = '/restconf/data/ietf-l3vpn-svc:l3vpn-svc/vpn-services' - - -MAPPING_STATUS = { - 'DEVICEOPERATIONALSTATUS_UNDEFINED': 0, - 'DEVICEOPERATIONALSTATUS_DISABLED' : 1, - 'DEVICEOPERATIONALSTATUS_ENABLED' : 2, -} - - -MAPPING_DRIVER = { - 'DEVICEDRIVER_UNDEFINED' : 0, - 'DEVICEDRIVER_OPENCONFIG' : 1, - 'DEVICEDRIVER_TRANSPORT_API' : 2, - 'DEVICEDRIVER_P4' : 3, - 'DEVICEDRIVER_IETF_NETWORK_TOPOLOGY': 4, - 'DEVICEDRIVER_ONF_TR_532' : 5, - 'DEVICEDRIVER_XR' : 6, - 'DEVICEDRIVER_IETF_L2VPN' : 7, - 'DEVICEDRIVER_GNMI_OPENCONFIG' : 8, - 'DEVICEDRIVER_OPTICAL_TFS' : 9, - 'DEVICEDRIVER_IETF_ACTN' : 10, - 'DEVICEDRIVER_OC' : 11, - 'DEVICEDRIVER_QKD' : 12, - 'DEVICEDRIVER_IETF_L3VPN' : 13, - 'DEVICEDRIVER_IETF_SLICE' : 14, - 'DEVICEDRIVER_NCE' : 15, - 'DEVICEDRIVER_SMARTNIC' : 16, - 'DEVICEDRIVER_MORPHEUS' : 17, - 'DEVICEDRIVER_RYU' : 18, -} LOGGER = logging.getLogger(__name__) @@ -75,7 +34,7 @@ class NetworkTopologyHandler: self._rest_conf_client = rest_conf_client self._object_name = 'NetworkTopology' self._subpath_root = '/ietf-network:networks' - self._subpath_item = self._subpath_root + '/network="{network_id:s}"' + self._subpath_item = self._subpath_root + '/network={network_id:s}' # Options are: # disabled --> just import endpoints as usual @@ -89,22 +48,16 @@ class NetworkTopologyHandler: def get(self, network_id : Optional[str] = None) -> List[Dict]: if network_id is None: network_id = DEFAULT_TOPOLOGY_NAME endpoint = self._subpath_item.format(network_id=network_id) - networks = self._rest_conf_client.get(endpoint) + reply = self._rest_conf_client.get(endpoint) - if 'ietf-network:networks' not in networks: - raise Exception('Malformed reply. "ietf-network:networks" missing') - networks = networks['ietf-network:networks'] + if 'ietf-network:network' not in reply: + raise Exception('Malformed reply. "ietf-network:network" missing') + network = reply['ietf-network:network'] - if 'network' not in networks: return list() - networks = networks['network'] - if len(networks) == 0: return list() - - network = next(iter([ - n for n in networks if n['network-id'] == network_id - ]), default=None) - - if network is None: - raise Exception('Network({:s}) not found'.format(str(network_id))) + if len(network) == 0: + MSG = '[get] Network({:s}) not found; returning' + LOGGER.debug(MSG.format(str(network_id))) + return list() MSG = '[get] import_topology={:s}' LOGGER.debug(MSG.format(str(self._import_topology))) @@ -178,22 +131,26 @@ class NetworkTopologyHandler: LOGGER.debug('[get] devices only; returning') return result -# for json_link in links['links']: -# link_uuid : str = json_link['link_id']['link_uuid']['uuid'] -# link_url = '/links/link[{:s}]'.format(link_uuid) -# link_endpoint_ids = [ -# ( -# json_endpoint_id['device_id']['device_uuid']['uuid'], -# json_endpoint_id['endpoint_uuid']['uuid'], -# ) -# for json_endpoint_id in json_link['link_endpoint_ids'] -# ] -# link_data = { -# 'uuid': json_link['link_id']['link_uuid']['uuid'], -# 'name': json_link['name'], -# 'endpoints': link_endpoint_ids, -# } -# result.append((link_url, link_data)) + for link in network['ietf-network-topology:link']: + link_uuid = link['link-id'] + link_src = link['source'] + link_dst = link['destination'] + link_src_dev_id = link_src['source-node'] + link_src_ep_id = link_src['source-tp'] + link_dst_dev_id = link_dst['dest-node'] + link_dst_ep_id = link_dst['dest-tp'] + + link_url = '/links/link[{:s}]'.format(link_uuid) + link_endpoint_ids = [ + (link_src_dev_id, link_src_ep_id), + (link_dst_dev_id, link_dst_ep_id), + ] + link_data = { + 'uuid': link_uuid, + 'name': link_uuid, + 'endpoints': link_endpoint_ids, + } + result.append((link_url, link_data)) LOGGER.debug('[get] topology; returning') return result -- GitLab From 72a15be4daaae707735643ee24b6101b2aa6e107 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 2 Sep 2025 11:49:53 +0000 Subject: [PATCH 072/367] ECOC F5GA Telemetry Demo: - Data file cleanup --- ...st_connection_group_to_network_slice1.json | 62 ------ ...post_match_criteria_to_sdp1_in_slice1.json | 40 ---- .../data/slice1/post_network_slice1.json | 188 ------------------ .../slice1/post_sdp_to_network_slice1.json | 61 ------ .../data/slices/network_slice1.json | 118 +++++++++++ .../data/slices/network_slice2.json | 118 +++++++++++ 6 files changed, 236 insertions(+), 351 deletions(-) delete mode 100644 src/tests/ecoc25-f5ga-telemetry/data/slice1/post_connection_group_to_network_slice1.json delete mode 100644 src/tests/ecoc25-f5ga-telemetry/data/slice1/post_match_criteria_to_sdp1_in_slice1.json delete mode 100644 src/tests/ecoc25-f5ga-telemetry/data/slice1/post_network_slice1.json delete mode 100644 src/tests/ecoc25-f5ga-telemetry/data/slice1/post_sdp_to_network_slice1.json create mode 100644 src/tests/ecoc25-f5ga-telemetry/data/slices/network_slice1.json create mode 100644 src/tests/ecoc25-f5ga-telemetry/data/slices/network_slice2.json diff --git a/src/tests/ecoc25-f5ga-telemetry/data/slice1/post_connection_group_to_network_slice1.json b/src/tests/ecoc25-f5ga-telemetry/data/slice1/post_connection_group_to_network_slice1.json deleted file mode 100644 index d39a837bd..000000000 --- a/src/tests/ecoc25-f5ga-telemetry/data/slice1/post_connection_group_to_network_slice1.json +++ /dev/null @@ -1,62 +0,0 @@ -{ - "connection-group": [ - { - "id": "line2", - "connectivity-type": "point-to-point", - "connectivity-construct": [ - { - "id": 1, - "p2p-sender-sdp": "1", - "p2p-receiver-sdp": "3", - "service-slo-sle-policy": { - "slo-policy": { - "metric-bound": [ - { - "metric-type": "ietf-network-slice-service:one-way-delay-maximum", - "metric-unit": "milliseconds", - "bound": "10" - }, - { - "metric-type": "ietf-network-slice-service:one-way-bandwidth", - "metric-unit": "Mbps", - "bound": "5000" - }, - { - "metric-type": "ietf-network-slice-service:two-way-packet-loss", - "metric-unit": "percentage", - "percentile-value": "0.001" - } - ] - } - } - }, - { - "id": 2, - "p2p-sender-sdp": "3", - "p2p-receiver-sdp": "1", - "service-slo-sle-policy": { - "slo-policy": { - "metric-bound": [ - { - "metric-type": "ietf-network-slice-service:one-way-delay-maximum", - "metric-unit": "milliseconds", - "bound": "20" - }, - { - "metric-type": "ietf-network-slice-service:one-way-bandwidth", - "metric-unit": "Mbps", - "bound": "1000" - }, - { - "metric-type": "ietf-network-slice-service:two-way-packet-loss", - "metric-unit": "percentage", - "percentile-value": "0.001" - } - ] - } - } - } - ] - } - ] -} \ No newline at end of file diff --git a/src/tests/ecoc25-f5ga-telemetry/data/slice1/post_match_criteria_to_sdp1_in_slice1.json b/src/tests/ecoc25-f5ga-telemetry/data/slice1/post_match_criteria_to_sdp1_in_slice1.json deleted file mode 100644 index 16a36d45b..000000000 --- a/src/tests/ecoc25-f5ga-telemetry/data/slice1/post_match_criteria_to_sdp1_in_slice1.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "match-criterion": [ - { - "index": 2, - "match-type": [ - { - "type": "ietf-network-slice-service:vlan", - "value": [ - "101" - ] - }, - { - "type": "ietf-network-slice-service:source-ip-prefix", - "value": [ - "172.1.101.22/24" - ] - }, - { - "type": "ietf-network-slice-service:source-tcp-port", - "value": [ - "10200" - ] - }, - { - "type": "ietf-network-slice-service:destination-ip-prefix", - "value": [ - "172.16.104.222/24" - ] - }, - { - "type": "ietf-network-slice-service:destination-tcp-port", - "value": [ - "10500" - ] - } - ], - "target-connection-group-id": "line2" - } - ] -} \ No newline at end of file diff --git a/src/tests/ecoc25-f5ga-telemetry/data/slice1/post_network_slice1.json b/src/tests/ecoc25-f5ga-telemetry/data/slice1/post_network_slice1.json deleted file mode 100644 index e6e0ee90a..000000000 --- a/src/tests/ecoc25-f5ga-telemetry/data/slice1/post_network_slice1.json +++ /dev/null @@ -1,188 +0,0 @@ -{ - "slice-service": [ - { - "id": "slice1", - "description": "network slice 1, connect to VM1", - "sdps": { - "sdp": [ - { - "id": "1", - "node-id": "172.16.204.220", - "sdp-ip-address": [ - "172.16.204.220" - ], - "service-match-criteria": { - "match-criterion": [ - { - "index": 1, - "match-type": [ - { - "type": "ietf-network-slice-service:vlan", - "value": [ - "101" - ] - }, - { - "type": "ietf-network-slice-service:destination-ip-prefix", - "value": [ - "172.16.104.221/24" - ] - }, - { - "type": "ietf-network-slice-service:destination-tcp-port", - "value": [ - "10500" - ] - }, - { - "type": "ietf-network-slice-service:source-ip-prefix", - "value": [ - "172.1.101.22/24" - ] - }, - { - "type": "ietf-network-slice-service:source-tcp-port", - "value": [ - "10200" - ] - } - ], - "target-connection-group-id": "line1" - } - ] - }, - "attachment-circuits": { - "attachment-circuit": [ - { - "id": "AC POP to VM1", - "description": "AC VM1 connected to POP", - "ac-node-id": "172.16.204.220", - "ac-tp-id": "200" - } - ] - } - }, - { - "id": "2", - "node-id": "172.16.61.10", - "sdp-ip-address": [ - "172.16.61.10" - ], - "service-match-criteria": { - "match-criterion": [ - { - "index": 1, - "match-type": [ - { - "type": "ietf-network-slice-service:vlan", - "value": [ - "21" - ] - }, - { - "type": "ietf-network-slice-service:source-ip-prefix", - "value": [ - "172.16.104.221/24" - ] - }, - { - "type": "ietf-network-slice-service:source-tcp-port", - "value": [ - "10500" - ] - }, - { - "type": "ietf-network-slice-service:destination-ip-prefix", - "value": [ - "172.1.101.22/24" - ] - }, - { - "type": "ietf-network-slice-service:destination-tcp-port", - "value": [ - "10200" - ] - } - ], - "target-connection-group-id": "line1" - } - ] - }, - "attachment-circuits": { - "attachment-circuit": [ - { - "id": "AC ONT", - "description": "AC connected to PC1", - "ac-node-id": "172.16.61.10", - "ac-tp-id": "200" - } - ] - } - } - ] - }, - "connection-groups": { - "connection-group": [ - { - "id": "line1", - "connectivity-type": "point-to-point", - "connectivity-construct": [ - { - "id": 1, - "p2p-sender-sdp": "1", - "p2p-receiver-sdp": "2", - "service-slo-sle-policy": { - "slo-policy": { - "metric-bound": [ - { - "metric-type": "ietf-network-slice-service:one-way-delay-maximum", - "metric-unit": "milliseconds", - "bound": "10" - }, - { - "metric-type": "ietf-network-slice-service:one-way-bandwidth", - "metric-unit": "Mbps", - "bound": "5000" - }, - { - "metric-type": "ietf-network-slice-service:two-way-packet-loss", - "metric-unit": "percentage", - "percentile-value": "0.001" - } - ] - } - } - }, - { - "id": 2, - "p2p-sender-sdp": "2", - "p2p-receiver-sdp": "1", - "service-slo-sle-policy": { - "slo-policy": { - "metric-bound": [ - { - "metric-type": "ietf-network-slice-service:one-way-delay-maximum", - "metric-unit": "milliseconds", - "bound": "20" - }, - { - "metric-type": "ietf-network-slice-service:one-way-bandwidth", - "metric-unit": "Mbps", - "bound": "1000" - }, - { - "metric-type": "ietf-network-slice-service:two-way-packet-loss", - "metric-unit": "percentage", - "percentile-value": "0.001" - } - ] - } - } - } - ] - } - ] - } - } - ] -} \ No newline at end of file diff --git a/src/tests/ecoc25-f5ga-telemetry/data/slice1/post_sdp_to_network_slice1.json b/src/tests/ecoc25-f5ga-telemetry/data/slice1/post_sdp_to_network_slice1.json deleted file mode 100644 index bd3895fc4..000000000 --- a/src/tests/ecoc25-f5ga-telemetry/data/slice1/post_sdp_to_network_slice1.json +++ /dev/null @@ -1,61 +0,0 @@ -{ - "sdp": [ - { - "id": "3", - "node-id": "172.16.61.11", - "sdp-ip-address": [ - "172.16.61.11" - ], - "service-match-criteria": { - "match-criterion": [ - { - "index": 1, - "match-type": [ - { - "type": "ietf-network-slice-service:vlan", - "value": [ - "21" - ] - }, - { - "type": "ietf-network-slice-service:source-ip-prefix", - "value": [ - "172.16.104.222/24" - ] - }, - { - "type": "ietf-network-slice-service:source-tcp-port", - "value": [ - "10500" - ] - }, - { - "type": "ietf-network-slice-service:destination-ip-prefix", - "value": [ - "172.1.101.22/24" - ] - }, - { - "type": "ietf-network-slice-service:destination-tcp-port", - "value": [ - "10200" - ] - } - ], - "target-connection-group-id": "line2" - } - ] - }, - "attachment-circuits": { - "attachment-circuit": [ - { - "id": "AC ONT", - "description": "AC connected to PC2", - "ac-node-id": "172.16.61.11", - "ac-tp-id": "200" - } - ] - } - } - ] -} \ No newline at end of file diff --git a/src/tests/ecoc25-f5ga-telemetry/data/slices/network_slice1.json b/src/tests/ecoc25-f5ga-telemetry/data/slices/network_slice1.json new file mode 100644 index 000000000..3bb285dfb --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/data/slices/network_slice1.json @@ -0,0 +1,118 @@ +{ + "slice-service": [ + { + "id": "slice1", + "description": "network slice 1, PC1-VM1", + "sdps": { + "sdp": [ + { + "id": "1", + "node-id": "172.16.61.10", + "sdp-ip-address": ["172.16.61.10"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["21"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.16.204.221/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10500"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.1.101.22/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10200"]} + ], + "target-connection-group-id": "line1" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC ONT", + "description": "AC ONT connected to PC1", + "ac-node-id": "172.16.61.10", + "ac-tp-id": "200" + }]} + }, + { + "id": "2", + "node-id": "172.16.204.221", + "sdp-ip-address": ["172.16.204.221"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["101"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.16.104.221/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10500"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.1.101.22/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10200"]} + ], + "target-connection-group-id": "line1" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC POP2 to VM1", + "description": "AC POP2 connected to VM1", + "ac-node-id": "172.16.204.221", + "ac-tp-id": "200" + }]} + } + ] + }, + "connection-groups": { + "connection-group": [ + { + "id": "line1", + "connectivity-type": "point-to-point", + "connectivity-construct": [ + { + "id": 1, + "p2p-sender-sdp": "1", + "p2p-receiver-sdp": "2", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "10" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "5000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.001" + } + ] + } + } + }, + { + "id": 2, + "p2p-sender-sdp": "2", + "p2p-receiver-sdp": "1", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "20" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "1000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.001" + } + ] + } + } + } + ] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/src/tests/ecoc25-f5ga-telemetry/data/slices/network_slice2.json b/src/tests/ecoc25-f5ga-telemetry/data/slices/network_slice2.json new file mode 100644 index 000000000..3bb285dfb --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/data/slices/network_slice2.json @@ -0,0 +1,118 @@ +{ + "slice-service": [ + { + "id": "slice1", + "description": "network slice 1, PC1-VM1", + "sdps": { + "sdp": [ + { + "id": "1", + "node-id": "172.16.61.10", + "sdp-ip-address": ["172.16.61.10"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["21"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.16.204.221/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10500"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.1.101.22/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10200"]} + ], + "target-connection-group-id": "line1" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC ONT", + "description": "AC ONT connected to PC1", + "ac-node-id": "172.16.61.10", + "ac-tp-id": "200" + }]} + }, + { + "id": "2", + "node-id": "172.16.204.221", + "sdp-ip-address": ["172.16.204.221"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["101"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.16.104.221/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10500"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.1.101.22/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10200"]} + ], + "target-connection-group-id": "line1" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC POP2 to VM1", + "description": "AC POP2 connected to VM1", + "ac-node-id": "172.16.204.221", + "ac-tp-id": "200" + }]} + } + ] + }, + "connection-groups": { + "connection-group": [ + { + "id": "line1", + "connectivity-type": "point-to-point", + "connectivity-construct": [ + { + "id": 1, + "p2p-sender-sdp": "1", + "p2p-receiver-sdp": "2", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "10" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "5000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.001" + } + ] + } + } + }, + { + "id": 2, + "p2p-sender-sdp": "2", + "p2p-receiver-sdp": "1", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "20" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "1000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.001" + } + ] + } + } + } + ] + } + ] + } + } + ] +} \ No newline at end of file -- GitLab From bb63515bb89872ac1bd3956456cce5b01680c545 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 2 Sep 2025 12:00:34 +0000 Subject: [PATCH 073/367] Device component - ACTN Driver: - Added missing log messages - Corrected parsing of network topology --- .../service/drivers/ietf_actn/IetfActnDriver.py | 2 ++ .../ietf_actn/handlers/NetworkTopologyHandler.py | 11 +++++++++-- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/src/device/service/drivers/ietf_actn/IetfActnDriver.py b/src/device/service/drivers/ietf_actn/IetfActnDriver.py index 42b02ca40..b3850db36 100644 --- a/src/device/service/drivers/ietf_actn/IetfActnDriver.py +++ b/src/device/service/drivers/ietf_actn/IetfActnDriver.py @@ -109,6 +109,8 @@ class IetfActnDriver(_Driver): results.extend(_results) except Exception as e: + MSG = 'Error processing resource_key: {:s}' + LOGGER.exception(MSG.format(str(resource_key))) results.append((resource_key, e)) return results diff --git a/src/device/service/drivers/ietf_actn/handlers/NetworkTopologyHandler.py b/src/device/service/drivers/ietf_actn/handlers/NetworkTopologyHandler.py index 7df7f00b6..73a5b8bdc 100644 --- a/src/device/service/drivers/ietf_actn/handlers/NetworkTopologyHandler.py +++ b/src/device/service/drivers/ietf_actn/handlers/NetworkTopologyHandler.py @@ -52,13 +52,20 @@ class NetworkTopologyHandler: if 'ietf-network:network' not in reply: raise Exception('Malformed reply. "ietf-network:network" missing') - network = reply['ietf-network:network'] + networks = reply['ietf-network:network'] - if len(network) == 0: + if len(networks) == 0: MSG = '[get] Network({:s}) not found; returning' LOGGER.debug(MSG.format(str(network_id))) return list() + if len(networks) > 1: + MSG = '[get] Multiple occurrences for Network({:s}); returning' + LOGGER.debug(MSG.format(str(network_id))) + return list() + + network = networks[0] + MSG = '[get] import_topology={:s}' LOGGER.debug(MSG.format(str(self._import_topology))) -- GitLab From 6e4b217d07a438c0269965f91dc91ea42ee0207c Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 2 Sep 2025 13:10:40 +0000 Subject: [PATCH 074/367] Device component - ACTN Driver: - Migrated RestApiClient to RestConfClient - Multiple code polishing and alignment --- .../drivers/ietf_actn/IetfActnDriver.py | 28 ++++++------ .../ietf_actn/handlers/EthtServiceHandler.py | 43 +++++++------------ .../handlers/NetworkTopologyHandler.py | 1 - .../ietf_actn/handlers/OsuTunnelHandler.py | 40 +++++++---------- 4 files changed, 43 insertions(+), 69 deletions(-) diff --git a/src/device/service/drivers/ietf_actn/IetfActnDriver.py b/src/device/service/drivers/ietf_actn/IetfActnDriver.py index b3850db36..b15cb3f84 100644 --- a/src/device/service/drivers/ietf_actn/IetfActnDriver.py +++ b/src/device/service/drivers/ietf_actn/IetfActnDriver.py @@ -21,19 +21,22 @@ from device.service.driver_api._Driver import _Driver, RESOURCE_ENDPOINTS, RESOU from .handlers.EthtServiceHandler import EthtServiceHandler from .handlers.OsuTunnelHandler import OsuTunnelHandler from .handlers.NetworkTopologyHandler import NetworkTopologyHandler -from .handlers.RestApiClient import RestApiClient from .Tools import get_etht_services, get_osu_tunnels, parse_resource_key + LOGGER = logging.getLogger(__name__) + ALL_RESOURCE_KEYS = [ RESOURCE_ENDPOINTS, RESOURCE_SERVICES, ] + DRIVER_NAME = 'ietf_actn' METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': DRIVER_NAME}) + class IetfActnDriver(_Driver): def __init__(self, address: str, port: int, **settings) -> None: super().__init__(DRIVER_NAME, address, port, **settings) @@ -41,23 +44,20 @@ class IetfActnDriver(_Driver): self.__started = threading.Event() self.__terminate = threading.Event() - self._rest_api_client = RestApiClient(address, port, settings=settings) - restconf_settings = copy.deepcopy(settings) restconf_settings.pop('base_url', None) restconf_settings.pop('import_topology', None) restconf_settings['logger'] = logging.getLogger(__name__ + '.RestConfClient') self._rest_conf_client = RestConfClient(address, port=port, **restconf_settings) - - self._handler_etht_service = EthtServiceHandler(self._rest_api_client) + self._handler_etht_service = EthtServiceHandler(self._rest_conf_client) self._handler_net_topology = NetworkTopologyHandler(self._rest_conf_client, **settings) - self._handler_osu_tunnel = OsuTunnelHandler(self._rest_api_client) + self._handler_osu_tunnel = OsuTunnelHandler(self._rest_conf_client) def Connect(self) -> bool: with self.__lock: if self.__started.is_set(): return True try: - self._rest_api_client.get('Check Credentials', '') + self._rest_conf_client._discover_base_url() except requests.exceptions.Timeout: LOGGER.exception('Timeout exception checking connectivity') return False @@ -81,15 +81,13 @@ class IetfActnDriver(_Driver): @metered_subclass_method(METRICS_POOL) def GetConfig(self, resource_keys : List[str] = []) -> List[Tuple[str, Union[Any, None, Exception]]]: chk_type('resources', resource_keys, list) - results = [] + results = list() with self.__lock: if len(resource_keys) == 0: resource_keys = ALL_RESOURCE_KEYS for i, resource_key in enumerate(resource_keys): chk_string('resource_key[#{:d}]'.format(i), resource_key, allow_empty=False) try: - _results = list() - if resource_key == RESOURCE_ENDPOINTS: # Add mgmt endpoint by default #resource_key = '/endpoints/endpoint[mgmt]' @@ -97,17 +95,15 @@ class IetfActnDriver(_Driver): #results.append((resource_key, resource_value)) results.extend(self._handler_net_topology.get()) elif resource_key == RESOURCE_SERVICES: - get_osu_tunnels(self._handler_osu_tunnel, _results) - get_etht_services(self._handler_etht_service, _results) + get_osu_tunnels(self._handler_osu_tunnel, results) + get_etht_services(self._handler_etht_service, results) else: # check if resource key is for a specific OSU tunnel or ETHT service, and get them accordingly osu_tunnel_name, etht_service_name = parse_resource_key(resource_key) if osu_tunnel_name is not None: - get_osu_tunnels(self._handler_osu_tunnel, _results, osu_tunnel_name=osu_tunnel_name) + get_osu_tunnels(self._handler_osu_tunnel, results, osu_tunnel_name=osu_tunnel_name) if etht_service_name is not None: - get_etht_services(self._handler_etht_service, _results, etht_service_name=etht_service_name) - - results.extend(_results) + get_etht_services(self._handler_etht_service, results, etht_service_name=etht_service_name) except Exception as e: MSG = 'Error processing resource_key: {:s}' LOGGER.exception(MSG.format(str(resource_key))) diff --git a/src/device/service/drivers/ietf_actn/handlers/EthtServiceHandler.py b/src/device/service/drivers/ietf_actn/handlers/EthtServiceHandler.py index 6d8ec2f8c..fffde6cde 100644 --- a/src/device/service/drivers/ietf_actn/handlers/EthtServiceHandler.py +++ b/src/device/service/drivers/ietf_actn/handlers/EthtServiceHandler.py @@ -14,10 +14,12 @@ import enum, logging from typing import Dict, List, Optional, Tuple, Union -from .RestApiClient import HTTP_STATUS_CREATED, HTTP_STATUS_NO_CONTENT, HTTP_STATUS_OK, RestApiClient +from common.tools.client.RestConfClient import RestConfClient + LOGGER = logging.getLogger(__name__) + class BandwidthProfileTypeEnum(enum.Enum): MEF_10_BWP = 'ietf-eth-tran-types:mef-10-bwp' @@ -106,36 +108,20 @@ def compose_etht_service( 'optimizations': compose_optimizations(), }]}} + class EthtServiceHandler: - def __init__(self, rest_api_client : RestApiClient) -> None: - self._rest_api_client = rest_api_client - self._object_name = 'EthtService' + def __init__(self, rest_conf_client : RestConfClient) -> None: + self._rest_conf_client = rest_conf_client self._subpath_root = '/ietf-eth-tran-service:etht-svc' - self._subpath_item = self._subpath_root + '/etht-svc-instances="{etht_service_name:s}"' + self._subpath_item = self._subpath_root + '/etht-svc-instances={etht_service_name:s}' - def _rest_api_get(self, etht_service_name : Optional[str] = None) -> Union[Dict, List]: + + def get(self, etht_service_name : Optional[str] = None) -> Union[Dict, List]: if etht_service_name is None: subpath_url = self._subpath_root else: subpath_url = self._subpath_item.format(etht_service_name=etht_service_name) - return self._rest_api_client.get( - self._object_name, subpath_url, expected_http_status={HTTP_STATUS_OK} - ) - - def _rest_api_update(self, data : Dict) -> bool: - return self._rest_api_client.update( - self._object_name, self._subpath_root, data, expected_http_status={HTTP_STATUS_CREATED} - ) - - def _rest_api_delete(self, etht_service_name : str) -> bool: - if etht_service_name is None: raise Exception('etht_service_name is None') - subpath_url = self._subpath_item.format(etht_service_name=etht_service_name) - return self._rest_api_client.delete( - self._object_name, subpath_url, expected_http_status={HTTP_STATUS_NO_CONTENT} - ) - - def get(self, etht_service_name : Optional[str] = None) -> Union[Dict, List]: - data = self._rest_api_get(etht_service_name=etht_service_name) + data = self._rest_conf_client.get(subpath_url) if not isinstance(data, dict): raise ValueError('data should be a dict') if 'ietf-eth-tran-service:etht-svc' not in data: @@ -192,6 +178,7 @@ class EthtServiceHandler: return etht_services + def update(self, parameters : Dict) -> bool: name = parameters['name' ] service_type = parameters['service_type' ] @@ -214,8 +201,10 @@ class EthtServiceHandler: src_node_id, src_tp_id, src_vlan_tag, dst_node_id, dst_tp_id, dst_vlan_tag, src_static_routes=src_static_routes, dst_static_routes=dst_static_routes ) + return self._rest_conf_client.post(self._subpath_root, body=data) is not None - return self._rest_api_update(data) - def delete(self, etht_service_name : str) -> bool: - return self._rest_api_delete(etht_service_name) + def delete(self, etht_service_name : str) -> None: + if etht_service_name is None: raise Exception('etht_service_name is None') + subpath_url = self._subpath_item.format(etht_service_name=etht_service_name) + return self._rest_conf_client.delete(subpath_url) diff --git a/src/device/service/drivers/ietf_actn/handlers/NetworkTopologyHandler.py b/src/device/service/drivers/ietf_actn/handlers/NetworkTopologyHandler.py index 73a5b8bdc..d03f00830 100644 --- a/src/device/service/drivers/ietf_actn/handlers/NetworkTopologyHandler.py +++ b/src/device/service/drivers/ietf_actn/handlers/NetworkTopologyHandler.py @@ -32,7 +32,6 @@ LOGGER = logging.getLogger(__name__) class NetworkTopologyHandler: def __init__(self, rest_conf_client : RestConfClient, **settings) -> None: self._rest_conf_client = rest_conf_client - self._object_name = 'NetworkTopology' self._subpath_root = '/ietf-network:networks' self._subpath_item = self._subpath_root + '/network={network_id:s}' diff --git a/src/device/service/drivers/ietf_actn/handlers/OsuTunnelHandler.py b/src/device/service/drivers/ietf_actn/handlers/OsuTunnelHandler.py index d94e7d7c4..de23f1e34 100644 --- a/src/device/service/drivers/ietf_actn/handlers/OsuTunnelHandler.py +++ b/src/device/service/drivers/ietf_actn/handlers/OsuTunnelHandler.py @@ -14,10 +14,12 @@ import enum, logging from typing import Dict, List, Optional, Union -from .RestApiClient import HTTP_STATUS_CREATED, HTTP_STATUS_NO_CONTENT, HTTP_STATUS_OK, RestApiClient +from common.tools.client.RestConfClient import RestConfClient + LOGGER = logging.getLogger(__name__) + class EndpointProtectionRoleEnum(enum.Enum): WORK = 'work' @@ -80,36 +82,21 @@ def compose_osu_tunnel( 'protection': compose_osu_tunnel_protection(), }]} + class OsuTunnelHandler: - def __init__(self, rest_api_client : RestApiClient) -> None: - self._rest_api_client = rest_api_client - self._object_name = 'OsuTunnel' + def __init__(self, rest_conf_client : RestConfClient) -> None: + self._rest_conf_client = rest_conf_client self._subpath_root = '/ietf-te:te/tunnels' - self._subpath_item = self._subpath_root + '/tunnel="{osu_tunnel_name:s}"' + self._subpath_item = self._subpath_root + '/tunnel={osu_tunnel_name:s}' + - def _rest_api_get(self, osu_tunnel_name : Optional[str] = None) -> Union[Dict, List]: + def get(self, osu_tunnel_name : Optional[str] = None) -> Union[Dict, List]: if osu_tunnel_name is None: subpath_url = self._subpath_root else: subpath_url = self._subpath_item.format(osu_tunnel_name=osu_tunnel_name) - return self._rest_api_client.get( - self._object_name, subpath_url, expected_http_status={HTTP_STATUS_OK} - ) - - def _rest_api_update(self, data : Dict) -> bool: - return self._rest_api_client.update( - self._object_name, self._subpath_root, data, expected_http_status={HTTP_STATUS_CREATED} - ) - def _rest_api_delete(self, osu_tunnel_name : str) -> bool: - if osu_tunnel_name is None: raise Exception('osu_tunnel_name is None') - subpath_url = self._subpath_item.format(osu_tunnel_name=osu_tunnel_name) - return self._rest_api_client.delete( - self._object_name, subpath_url, expected_http_status={HTTP_STATUS_NO_CONTENT} - ) - - def get(self, osu_tunnel_name : Optional[str] = None) -> Union[Dict, List]: - data = self._rest_api_get(osu_tunnel_name=osu_tunnel_name) + data = self._rest_conf_client.get(subpath_url) if not isinstance(data, dict): raise ValueError('data should be a dict') if 'ietf-te:tunnel' not in data: raise ValueError('data does not contain key "ietf-te:tunnel"') @@ -147,6 +134,7 @@ class OsuTunnelHandler: return osu_tunnels + def update(self, parameters : Dict) -> bool: name = parameters['name' ] @@ -169,8 +157,10 @@ class OsuTunnelHandler: name, src_node_id, src_tp_id, src_ttp_channel_name, dst_node_id, dst_tp_id, dst_ttp_channel_name, odu_type, osuflex_number, delay, bidirectional=bidirectional ) + return self._rest_conf_client.post(self._subpath_root, body=data) is not None - return self._rest_api_update(data) def delete(self, osu_tunnel_name : str) -> bool: - return self._rest_api_delete(osu_tunnel_name) + if osu_tunnel_name is None: raise Exception('osu_tunnel_name is None') + subpath_url = self._subpath_item.format(osu_tunnel_name=osu_tunnel_name) + return self._rest_conf_client.delete(subpath_url) -- GitLab From 056f01bf9b3700ed909df6eaf83f763514c04589 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 2 Sep 2025 13:19:01 +0000 Subject: [PATCH 075/367] Device component - ACTN Driver: - Bug fixing --- .../service/drivers/ietf_actn/handlers/OsuTunnelHandler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/device/service/drivers/ietf_actn/handlers/OsuTunnelHandler.py b/src/device/service/drivers/ietf_actn/handlers/OsuTunnelHandler.py index de23f1e34..a43bac28c 100644 --- a/src/device/service/drivers/ietf_actn/handlers/OsuTunnelHandler.py +++ b/src/device/service/drivers/ietf_actn/handlers/OsuTunnelHandler.py @@ -99,7 +99,7 @@ class OsuTunnelHandler: data = self._rest_conf_client.get(subpath_url) if not isinstance(data, dict): raise ValueError('data should be a dict') - if 'ietf-te:tunnel' not in data: raise ValueError('data does not contain key "ietf-te:tunnel"') + if 'ietf-te:tunnel' not in data: return list() data = data['ietf-te:tunnel'] if not isinstance(data, list): raise ValueError('data[ietf-te:tunnel] should be a list') -- GitLab From 4c83196e7a65e6eff7b84dd8ccf532827c204e78 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 2 Sep 2025 13:30:48 +0000 Subject: [PATCH 076/367] Device component - ACTN Driver: - Bug fixing --- .../service/drivers/ietf_actn/handlers/EthtServiceHandler.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/device/service/drivers/ietf_actn/handlers/EthtServiceHandler.py b/src/device/service/drivers/ietf_actn/handlers/EthtServiceHandler.py index fffde6cde..7c9e4b89c 100644 --- a/src/device/service/drivers/ietf_actn/handlers/EthtServiceHandler.py +++ b/src/device/service/drivers/ietf_actn/handlers/EthtServiceHandler.py @@ -121,11 +121,11 @@ class EthtServiceHandler: subpath_url = self._subpath_root else: subpath_url = self._subpath_item.format(etht_service_name=etht_service_name) + data = self._rest_conf_client.get(subpath_url) if not isinstance(data, dict): raise ValueError('data should be a dict') - if 'ietf-eth-tran-service:etht-svc' not in data: - raise ValueError('data does not contain key "ietf-eth-tran-service:etht-svc"') + if 'ietf-eth-tran-service:etht-svc' not in data: return list() data = data['ietf-eth-tran-service:etht-svc'] if 'etht-svc-instances' not in data: raise ValueError('data["ietf-eth-tran-service:etht-svc"] does not contain key "etht-svc-instances"') -- GitLab From 2b82f7864eb5522a003003c630b31a9a4ba0970c Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 2 Sep 2025 13:31:57 +0000 Subject: [PATCH 077/367] Tests - Tools - Mock NCE-T Controller - Added tode to inject NCE-T topology to SIMAP --- .../tools/mock_nce_t_ctrl/nce_t_ctrl/app.py | 42 +++ .../nce_t_ctrl/simap_client/RestConfClient.py | 191 ++++++++++++++ .../nce_t_ctrl/simap_client/SimapClient.py | 242 ++++++++++++++++++ .../nce_t_ctrl/simap_client/__init__.py | 14 + 4 files changed, 489 insertions(+) create mode 100644 src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/simap_client/RestConfClient.py create mode 100644 src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/simap_client/SimapClient.py create mode 100644 src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/simap_client/__init__.py diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py index d839d2179..d32a85bbc 100644 --- a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py @@ -20,6 +20,8 @@ from .Dispatch import RestConfDispatch from .HostMeta import HostMeta from .YangHandler import YangHandler from .YangModelDiscoverer import YangModuleDiscoverer +from .simap_client.RestConfClient import RestConfClient +from .simap_client.SimapClient import SimapClient logging.basicConfig( @@ -27,6 +29,7 @@ logging.basicConfig( format="[Worker-%(process)d][%(asctime)s] %(levelname)s:%(name)s:%(message)s", ) LOGGER = logging.getLogger(__name__) +logging.getLogger('RestConfClient').setLevel(logging.WARN) RESTCONF_PREFIX = '/restconf' @@ -43,6 +46,45 @@ with open(STARTUP_FILE, mode='r', encoding='UTF-8') as fp: YANG_STARTUP_DATA = json.loads(fp.read()) +restconf_client = RestConfClient( + 'simap-client', port=8080, + logger=logging.getLogger('RestConfClient') +) +simap_client = SimapClient(restconf_client) + +te_topo = simap_client.network('admin') +te_topo.update() + +networks = YANG_STARTUP_DATA.get('ietf-network:networks', dict()) +networks = networks.get('network', list()) +assert len(networks) == 1 +network = networks[0] +assert network['network-id'] == 'admin' + +nodes = network.get('node', list()) +for node in nodes: + node_id = node['node-id'] + tp_ids = [ + tp['tp-id'] + for tp in node['ietf-network-topology:termination-point'] + ] + te_topo.node(node_id).create(termination_point_ids=tp_ids) + +links = network.get('ietf-network-topology:link', list()) +for link in links: + link_id = link['link-id'] + link_src = link['source'] + link_dst = link['destination'] + link_src_node_id = link_src['source-node'] + link_src_tp_id = link_src['source-tp'] + link_dst_node_id = link_dst['dest-node'] + link_dst_tp_id = link_dst['dest-tp'] + + te_topo.link(link_id).create( + link_src_node_id, link_src_tp_id, link_dst_node_id, link_dst_tp_id + ) + + yang_handler = YangHandler( YANG_SEARCH_PATH, YANG_MODULE_NAMES, YANG_STARTUP_DATA ) diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/simap_client/RestConfClient.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/simap_client/RestConfClient.py new file mode 100644 index 000000000..b7c057a70 --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/simap_client/RestConfClient.py @@ -0,0 +1,191 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum, logging, requests +from requests.auth import HTTPBasicAuth +from typing import Any, Dict, Optional, Set + +class RestRequestMethod(enum.Enum): + GET = 'get' + POST = 'post' + PUT = 'put' + PATCH = 'patch' + DELETE = 'delete' + +EXPECTED_STATUS_CODES : Set[int] = { + requests.codes['OK' ], # 200 - OK + requests.codes['CREATED' ], # 201 - Created + requests.codes['ACCEPTED' ], # 202 - Accepted + requests.codes['NO_CONTENT'], # 204 - No Content +} + +def compose_basic_auth( + username : Optional[str] = None, password : Optional[str] = None +) -> Optional[HTTPBasicAuth]: + if username is None or password is None: return None + return HTTPBasicAuth(username, password) + +class SchemeEnum(enum.Enum): + HTTP = 'http' + HTTPS = 'https' + +def check_scheme(scheme : str) -> str: + str_scheme = str(scheme).lower() + enm_scheme = SchemeEnum._value2member_map_[str_scheme] + return enm_scheme.value + +HOST_META_URL = '{:s}://{:s}:{:d}/.well-known/host-meta' +RESTCONF_URL = '{:s}://{:s}:{:d}/{:s}' + +class RestConfClient: + def __init__( + self, address : str, port : int = 8080, scheme : str = 'http', + username : Optional[str] = None, password : Optional[str] = None, + timeout : int = 10, verify_certs : bool = True, allow_redirects : bool = True, + logger : Optional[logging.Logger] = None + ) -> None: + self._address = address + self._port = int(port) + self._scheme = check_scheme(scheme) + self._auth = compose_basic_auth(username=username, password=password) + self._base_url = '' + self._timeout = int(timeout) + self._verify_certs = verify_certs + self._allow_redirects = allow_redirects + self._logger = logger + + self._discover_base_url() + + def _discover_base_url(self) -> None: + host_meta_url = HOST_META_URL.format(self._scheme, self._address, self._port) + host_meta : Dict = self.get(host_meta_url, expected_status_codes={requests.codes['OK']}) + + links = host_meta.get('links') + if links is None: raise AttributeError('Missing attribute "links" in host-meta reply') + if not isinstance(links, list): raise AttributeError('Attribute "links" must be a list') + if len(links) != 1: raise AttributeError('Attribute "links" is expected to have exactly 1 item') + + link = links[0] + if not isinstance(link, dict): raise AttributeError('Attribute "links[0]" must be a dict') + + rel = link.get('rel') + if rel is None: raise AttributeError('Missing attribute "links[0].rel" in host-meta reply') + if not isinstance(rel, str): raise AttributeError('Attribute "links[0].rel" must be a str') + if rel != 'restconf': raise AttributeError('Attribute "links[0].rel" != "restconf"') + + href = link.get('href') + if href is None: raise AttributeError('Missing attribute "links[0]" in host-meta reply') + if not isinstance(href, str): raise AttributeError('Attribute "links[0].href" must be a str') + + self._base_url = str(href + '/data').replace('//', '/') + + def _log_msg_request( + self, method : RestRequestMethod, request_url : str, body : Optional[Any], + log_level : int = logging.INFO + ) -> str: + msg = 'Request: {:s} {:s}'.format(str(method.value).upper(), str(request_url)) + if body is not None: msg += ' body={:s}'.format(str(body)) + if self._logger is not None: self._logger.log(log_level, msg) + return msg + + def _log_msg_check_reply( + self, method : RestRequestMethod, request_url : str, body : Optional[Any], + reply : requests.Response, expected_status_codes : Set[int], + log_level : int = logging.INFO + ) -> str: + msg = 'Reply: {:s}'.format(str(reply.text)) + if self._logger is not None: self._logger.log(log_level, msg) + http_status_code = reply.status_code + if http_status_code in expected_status_codes: return msg + MSG = 'Request failed. method={:s} url={:s} body={:s} status_code={:s} reply={:s}' + msg = MSG.format( + str(method.value).upper(), str(request_url), str(body), + str(http_status_code), str(reply.text) + ) + self._logger.error(msg) + raise Exception(msg) + + def _do_rest_request( + self, method : RestRequestMethod, endpoint : str, body : Optional[Any] = None, + expected_status_codes : Set[int] = EXPECTED_STATUS_CODES + ) -> Optional[Any]: + candidate_schemes = tuple(['{:s}://'.format(m).lower() for m in SchemeEnum.__members__.keys()]) + if endpoint.lower().startswith(candidate_schemes): + request_url = endpoint.lstrip('/') + else: + endpoint = str(self._base_url + '/' + endpoint).replace('//', '/').lstrip('/') + request_url = '{:s}://{:s}:{:d}/{:s}'.format( + self._scheme, self._address, self._port, endpoint.lstrip('/') + ) + self._log_msg_request(method, request_url, body) + try: + headers = {'accept': 'application/json'} + reply = requests.request( + method.value, request_url, headers=headers, json=body, + auth=self._auth, verify=self._verify_certs, timeout=self._timeout, + allow_redirects=self._allow_redirects + ) + except Exception as e: + MSG = 'Request failed. method={:s} url={:s} body={:s}' + msg = MSG.format(str(method.value).upper(), request_url, str(body)) + self._logger.exception(msg) + raise Exception(msg) from e + self._log_msg_check_reply(method, request_url, body, reply, expected_status_codes) + if reply.content and len(reply.content) > 0: return reply.json() + return None + + def get( + self, endpoint : str, + expected_status_codes : Set[int] = {requests.codes['OK']} + ) -> Optional[Any]: + return self._do_rest_request( + RestRequestMethod.GET, endpoint, + expected_status_codes=expected_status_codes + ) + + def post( + self, endpoint : str, body : Optional[Any] = None, + expected_status_codes : Set[int] = {requests.codes['CREATED']} + ) -> Optional[Any]: + return self._do_rest_request( + RestRequestMethod.POST, endpoint, body=body, + expected_status_codes=expected_status_codes + ) + + def put( + self, endpoint : str, body : Optional[Any] = None, + expected_status_codes : Set[int] = {requests.codes['CREATED'], requests.codes['NO_CONTENT']} + ) -> Optional[Any]: + return self._do_rest_request( + RestRequestMethod.PUT, endpoint, body=body, + expected_status_codes=expected_status_codes + ) + + def patch( + self, endpoint : str, body : Optional[Any] = None, + expected_status_codes : Set[int] = {requests.codes['NO_CONTENT']} + ) -> Optional[Any]: + return self._do_rest_request( + RestRequestMethod.PATCH, endpoint, body=body, + expected_status_codes=expected_status_codes + ) + + def delete( + self, endpoint : str, body : Optional[Any] = None, + expected_status_codes : Set[int] = {requests.codes['NO_CONTENT']} + ) -> Optional[Any]: + return self._do_rest_request( + RestRequestMethod.DELETE, endpoint, body=body, + expected_status_codes=expected_status_codes + ) diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/simap_client/SimapClient.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/simap_client/SimapClient.py new file mode 100644 index 000000000..b4c27d43a --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/simap_client/SimapClient.py @@ -0,0 +1,242 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import Dict, List, Tuple +from .RestConfClient import RestConfClient + + +class TerminationPoint: + ENDPOINT_NO_ID = '/ietf-network:networks/network[network-id="{:s}"]/node[node-id="{:s}"]' + ENDPOINT_ID = ENDPOINT_NO_ID + '/ietf-network-topology:termination-point[tp-id="{:s}"]' + + def __init__(self, restconf_client : RestConfClient, network_id : str, node_id : str, tp_id : str): + self._restconf_client = restconf_client + self._network_id = network_id + self._node_id = node_id + self._tp_id = tp_id + + def create(self, supporting_termination_point_ids : List[Tuple[str, str, str]] = []) -> None: + endpoint = TerminationPoint.ENDPOINT_ID.format(self._network_id, self._node_id, self._tp_id) + tp = {'tp-id': self._tp_id} + stps = [ + {'network-ref': snet_id, 'node-ref': snode_id, 'tp-ref': stp_id} + for snet_id,snode_id,stp_id in supporting_termination_point_ids + ] + if len(stps) > 0: tp['supporting-termination-point'] = stps + node = {'node-id': self._node_id, 'ietf-network-topology:termination-point': [tp]} + network = {'network-id': self._network_id, 'node': [node]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.post(endpoint, payload) + + def get(self) -> Dict: + endpoint = TerminationPoint.ENDPOINT_ID.format(self._network_id, self._node_id, self._tp_id) + node : Dict = self._restconf_client.get(endpoint) + return node['ietf-network-topology:termination-point'][0] + + def update(self, supporting_termination_point_ids : List[Tuple[str, str, str]] = []) -> None: + endpoint = TerminationPoint.ENDPOINT_ID.format(self._network_id, self._node_id, self._tp_id) + tp = {'tp-id': self._tp_id} + stps = [ + {'network-ref': snet_id, 'node-ref': snode_id, 'tp-ref': stp_id} + for snet_id,snode_id,stp_id in supporting_termination_point_ids + ] + if len(stps) > 0: tp['supporting-termination-point'] = stps + node = {'node-id': self._node_id, 'ietf-network-topology:termination-point': [tp]} + network = {'network-id': self._network_id, 'node': [node]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.patch(endpoint, payload) + + def delete(self) -> None: + endpoint = TerminationPoint.ENDPOINT_ID.format(self._network_id, self._node_id, self._tp_id) + self._restconf_client.delete(endpoint) + +class Node: + ENDPOINT_NO_ID = '/ietf-network:networks/network[network-id="{:s}"]' + ENDPOINT_ID = ENDPOINT_NO_ID + '/node[node-id="{:s}"]' + + def __init__(self, restconf_client : RestConfClient, network_id : str, node_id : str): + self._restconf_client = restconf_client + self._network_id = network_id + self._node_id = node_id + self._tps : Dict[str, TerminationPoint] = dict() + + def termination_points(self) -> List[Dict]: + tps : Dict = self._restconf_client.get(TerminationPoint.ENDPOINT_NO_ID) + return tps['ietf-network-topology:termination-point'].get('termination-point', list()) + + def termination_point(self, tp_id : str) -> TerminationPoint: + _tp = self._tps.get(tp_id) + if _tp is not None: return _tp + _tp = TerminationPoint(self._restconf_client, self._network_id, self._node_id, tp_id) + return self._tps.setdefault(tp_id, _tp) + + def create( + self, termination_point_ids : List[str] = [], + supporting_node_ids : List[Tuple[str, str]] = [] + ) -> None: + endpoint = Node.ENDPOINT_ID.format(self._network_id, self._node_id) + node = {'node-id': self._node_id} + tps = [{'tp-id': tp_id} for tp_id in termination_point_ids] + if len(tps) > 0: node['ietf-network-topology:termination-point'] = tps + sns = [{'network-ref': snet_id, 'node-ref': snode_id} for snet_id,snode_id in supporting_node_ids] + if len(sns) > 0: node['supporting-node'] = sns + network = {'network-id': self._network_id, 'node': [node]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.post(endpoint, payload) + + def get(self) -> Dict: + endpoint = Node.ENDPOINT_ID.format(self._network_id, self._node_id) + node : Dict = self._restconf_client.get(endpoint) + return node['ietf-network:node'][0] + + def update( + self, termination_point_ids : List[str] = [], + supporting_node_ids : List[Tuple[str, str]] = [] + ) -> None: + endpoint = Node.ENDPOINT_ID.format(self._network_id, self._node_id) + node = {'node-id': self._node_id} + tps = [{'tp-id': tp_id} for tp_id in termination_point_ids] + if len(tps) > 0: node['ietf-network-topology:termination-point'] = tps + sns = [{'network-ref': snet_id, 'node-ref': snode_id} for snet_id,snode_id in supporting_node_ids] + if len(sns) > 0: node['supporting-node'] = sns + network = {'network-id': self._network_id, 'node': [node]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.patch(endpoint, payload) + + def delete(self) -> None: + endpoint = Node.ENDPOINT_ID.format(self._network_id, self._node_id) + self._restconf_client.delete(endpoint) + +class Link: + ENDPOINT_NO_ID = '/ietf-network:networks/network[network-id="{:s}"]' + ENDPOINT_ID = ENDPOINT_NO_ID + '/ietf-network-topology:link[link-id="{:s}"]' + + def __init__(self, restconf_client : RestConfClient, network_id : str, link_id : str): + self._restconf_client = restconf_client + self._network_id = network_id + self._link_id = link_id + + def create( + self, src_node_id : str, src_tp_id : str, dst_node_id : str, dst_tp_id : str, + supporting_link_ids : List[Tuple[str, str]] = [] + ) -> None: + endpoint = Link.ENDPOINT_ID.format(self._network_id, self._link_id) + link = { + 'link-id' : self._link_id, + 'source' : {'source-node': src_node_id, 'source-tp': src_tp_id}, + 'destination': {'dest-node' : dst_node_id, 'dest-tp' : dst_tp_id}, + } + sls = [{'network-ref': snet_id, 'link-ref': slink_id} for snet_id,slink_id in supporting_link_ids] + if len(sls) > 0: link['supporting-link'] = sls + network = {'network-id': self._network_id, 'ietf-network-topology:link': [link]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.post(endpoint, payload) + + def get(self) -> Dict: + endpoint = Link.ENDPOINT_ID.format(self._network_id, self._link_id) + link : Dict = self._restconf_client.get(endpoint) + return link['ietf-network-topology:link'][0] + + def update( + self, src_node_id : str, src_tp_id : str, dst_node_id : str, dst_tp_id : str, + supporting_link_ids : List[Tuple[str, str]] = [] + ) -> None: + endpoint = Link.ENDPOINT_ID.format(self._network_id, self._link_id) + link = { + 'link-id' : self._link_id, + 'source' : {'source-node': src_node_id, 'source-tp': src_tp_id}, + 'destination': {'dest-node' : dst_node_id, 'dest-tp' : dst_tp_id}, + } + sls = [{'network-ref': snet_id, 'link-ref': slink_id} for snet_id,slink_id in supporting_link_ids] + if len(sls) > 0: link['supporting-link'] = sls + network = {'network-id': self._network_id, 'ietf-network-topology:link': [link]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.patch(endpoint, payload) + + def delete(self) -> None: + endpoint = Link.ENDPOINT_ID.format(self._network_id, self._link_id) + self._restconf_client.delete(endpoint) + + +class Network: + ENDPOINT_NO_ID = '/ietf-network:networks' + ENDPOINT_ID = ENDPOINT_NO_ID + '/network[network-id="{:s}"]' + + def __init__(self, restconf_client : RestConfClient, network_id : str): + self._restconf_client = restconf_client + self._network_id = network_id + self._nodes : Dict[str, Node] = dict() + self._links : Dict[str, Link] = dict() + + def nodes(self) -> List[Dict]: + reply : Dict = self._restconf_client.get(Node.ENDPOINT_NO_ID.format(self._network_id)) + return reply['ietf-network:network'][0].get('node', list()) + + def links(self) -> List[Dict]: + reply : Dict = self._restconf_client.get(Link.ENDPOINT_NO_ID.format(self._network_id)) + return reply['ietf-network:network'][0].get('ietf-network-topology:link', list()) + + def node(self, node_id : str) -> Node: + _node = self._nodes.get(node_id) + if _node is not None: return _node + _node = Node(self._restconf_client, self._network_id, node_id) + return self._nodes.setdefault(node_id, _node) + + def link(self, link_id : str) -> Link: + _link = self._links.get(link_id) + if _link is not None: return _link + _link = Link(self._restconf_client, self._network_id, link_id) + return self._links.setdefault(link_id, _link) + + def create(self, supporting_network_ids : List[str] = []) -> None: + endpoint = Network.ENDPOINT_ID.format(self._network_id) + network = {'network-id': self._network_id} + sns = [{'network-ref': sn_id} for sn_id in supporting_network_ids] + if len(sns) > 0: network['supporting-network'] = sns + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.post(endpoint, payload) + + def get(self) -> Dict: + endpoint = Network.ENDPOINT_ID.format(self._network_id) + networks : Dict = self._restconf_client.get(endpoint) + return networks['ietf-network:network'][0] + + def update(self, supporting_network_ids : List[str] = []) -> None: + endpoint = Network.ENDPOINT_ID.format(self._network_id) + network = {'network-id': self._network_id} + sns = [{'network-ref': sn_id} for sn_id in supporting_network_ids] + if len(sns) > 0: network['supporting-network'] = sns + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.patch(endpoint, payload) + + def delete(self) -> None: + endpoint = Network.ENDPOINT_ID.format(self._network_id) + self._restconf_client.delete(endpoint) + + +class SimapClient: + def __init__(self, restconf_client : RestConfClient) -> None: + self._restconf_client = restconf_client + self._networks : Dict[str, Network] = dict() + + def networks(self) -> List[Dict]: + reply : Dict = self._restconf_client.get(Network.ENDPOINT_NO_ID) + return reply['ietf-network:networks'].get('network', list()) + + def network(self, network_id : str) -> Network: + _network = self._networks.get(network_id) + if _network is not None: return _network + _network = Network(self._restconf_client, network_id) + return self._networks.setdefault(network_id, _network) diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/simap_client/__init__.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/simap_client/__init__.py new file mode 100644 index 000000000..3ccc21c7d --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/simap_client/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + -- GitLab From 1aec64b5657372957cfec568516ab8e3ca864cb5 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 2 Sep 2025 13:34:24 +0000 Subject: [PATCH 078/367] Tests - Tools - Mock NCE-T Controller - Corrected Dockerfile --- src/tests/tools/mock_nce_t_ctrl/Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/src/tests/tools/mock_nce_t_ctrl/Dockerfile b/src/tests/tools/mock_nce_t_ctrl/Dockerfile index a80b5fe93..3c26ed5af 100644 --- a/src/tests/tools/mock_nce_t_ctrl/Dockerfile +++ b/src/tests/tools/mock_nce_t_ctrl/Dockerfile @@ -52,6 +52,7 @@ RUN python3 -m pip install -r requirements.txt # Add component files into working directory COPY ./yang/. ./yang/ COPY ./nce_t_ctrl/*.py ./nce_t_ctrl/ +COPY ./nce_t_ctrl/simap_client/*.py ./nce_t_ctrl/simap_client/ COPY ./startup.json ./startup.json # Configure Flask for production -- GitLab From 3b51730ea85d10e6d8744900b7e9a89c7999e753 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 2 Sep 2025 13:35:53 +0000 Subject: [PATCH 079/367] Tests - Tools - Mock NCE-T Controller - Bug fixing --- src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py index d32a85bbc..0f895a701 100644 --- a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py @@ -47,7 +47,7 @@ with open(STARTUP_FILE, mode='r', encoding='UTF-8') as fp: restconf_client = RestConfClient( - 'simap-client', port=8080, + 'simap-server', port=8080, logger=logging.getLogger('RestConfClient') ) simap_client = SimapClient(restconf_client) -- GitLab From 86fe13f802355e5a2fe2ddfdb623da6e1d6b457c Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 2 Sep 2025 13:38:14 +0000 Subject: [PATCH 080/367] Tests - Tools - Mock NCE-T Controller - Bug fixing --- src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py index 0f895a701..39a9ee3d1 100644 --- a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py @@ -47,7 +47,7 @@ with open(STARTUP_FILE, mode='r', encoding='UTF-8') as fp: restconf_client = RestConfClient( - 'simap-server', port=8080, + '172.17.0.1', port=8080, logger=logging.getLogger('RestConfClient') ) simap_client = SimapClient(restconf_client) -- GitLab From 481e7b78ac247dae1f184627da8961beef8f92d5 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 2 Sep 2025 14:50:40 +0000 Subject: [PATCH 081/367] Tests - Tools - Mock NCE-T Controller - Bug fixing --- src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py index 39a9ee3d1..7a87f732a 100644 --- a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py @@ -26,7 +26,7 @@ from .simap_client.SimapClient import SimapClient logging.basicConfig( level=logging.INFO, - format="[Worker-%(process)d][%(asctime)s] %(levelname)s:%(name)s:%(message)s", + format='[Worker-%(process)d][%(asctime)s] %(levelname)s:%(name)s:%(message)s', ) LOGGER = logging.getLogger(__name__) logging.getLogger('RestConfClient').setLevel(logging.WARN) -- GitLab From 75db0234038f37ece1cd5c4add2c80d7ab6ee36a Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 2 Sep 2025 14:51:51 +0000 Subject: [PATCH 082/367] Tests - Tools - Mock NCE-FAN Controller - First implementation of NCE-FAN --- src/tests/tools/mock_nce_fan_ctrl/Dockerfile | 62 + src/tests/tools/mock_nce_fan_ctrl/README.md | 23 + src/tests/tools/mock_nce_fan_ctrl/build.sh | 22 + src/tests/tools/mock_nce_fan_ctrl/deploy.sh | 27 + src/tests/tools/mock_nce_fan_ctrl/destroy.sh | 22 + .../nce_fan_ctrl/Dispatch.py | 148 + .../nce_fan_ctrl/HostMeta.py | 50 + .../nce_fan_ctrl/HttpStatusCodesEnum.py | 27 + .../nce_fan_ctrl/YangHandler.py | 226 + .../nce_fan_ctrl/YangModelDiscoverer.py | 195 + .../nce_fan_ctrl/__init__.py | 14 + .../nce_fan_ctrl/__main__.py | 26 + .../mock_nce_fan_ctrl/nce_fan_ctrl/app.py | 112 + .../simap_client/RestConfClient.py | 191 + .../nce_fan_ctrl/simap_client/SimapClient.py | 242 + .../nce_fan_ctrl/simap_client/__init__.py | 14 + .../tools/mock_nce_fan_ctrl/requirements.in | 25 + .../mock_nce_fan_ctrl/run_ctrl_gunicorn.sh | 20 + .../mock_nce_fan_ctrl/run_ctrl_standalone.sh | 19 + .../tools/mock_nce_fan_ctrl/startup.json | 49 + .../ietf-eth-tran-service.yang | 1010 ++++ .../ietf-eth-tran-types.yang | 460 ++ .../ietf-trans-client-service.yang | 325 ++ .../ietf-trans-client-svc-types.yang | 63 + .../ietf-eth-te-topology.yang | 2278 +++++++++ .../ietf-otn-topology.yang | 2230 ++++++++ .../ietf-te-packet-types.yang | 835 +++ .../ietf-te-types.yang | 4473 +++++++++++++++++ .../ietf-te-device.yang | 595 +++ .../draft-ietf-teas-yang-te-34/ietf-te.yang | 1516 ++++++ .../draft-layer1-types/ietf-layer1-types.yang | 1361 +++++ .../yang/rfc6991/ietf-inet-types.yang | 458 ++ .../yang/rfc6991/ietf-yang-types.yang | 474 ++ .../yang/rfc8294/iana-routing-types.yang | 471 ++ .../yang/rfc8294/ietf-routing-types.yang | 771 +++ .../yang/rfc8343/ietf-interfaces.yang | 1123 +++++ .../yang/rfc8345/ietf-network-topology.yang | 294 ++ .../yang/rfc8345/ietf-network.yang | 192 + .../rfc8346/ietf-l3-unicast-topology.yang | 359 ++ .../yang/rfc8795/ietf-te-topology.yang | 1952 +++++++ .../mock_nce_fan_ctrl/yang/yang-repo-url.txt | 1 + 41 files changed, 22755 insertions(+) create mode 100644 src/tests/tools/mock_nce_fan_ctrl/Dockerfile create mode 100644 src/tests/tools/mock_nce_fan_ctrl/README.md create mode 100755 src/tests/tools/mock_nce_fan_ctrl/build.sh create mode 100755 src/tests/tools/mock_nce_fan_ctrl/deploy.sh create mode 100755 src/tests/tools/mock_nce_fan_ctrl/destroy.sh create mode 100644 src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/Dispatch.py create mode 100644 src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/HostMeta.py create mode 100644 src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/HttpStatusCodesEnum.py create mode 100644 src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/YangHandler.py create mode 100644 src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/YangModelDiscoverer.py create mode 100644 src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/__init__.py create mode 100644 src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/__main__.py create mode 100644 src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/app.py create mode 100644 src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/simap_client/RestConfClient.py create mode 100644 src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/simap_client/SimapClient.py create mode 100644 src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/simap_client/__init__.py create mode 100644 src/tests/tools/mock_nce_fan_ctrl/requirements.in create mode 100755 src/tests/tools/mock_nce_fan_ctrl/run_ctrl_gunicorn.sh create mode 100755 src/tests/tools/mock_nce_fan_ctrl/run_ctrl_standalone.sh create mode 100644 src/tests/tools/mock_nce_fan_ctrl/startup.json create mode 100644 src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-ccamp-client-signal-yang-10/ietf-eth-tran-service.yang create mode 100644 src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-ccamp-client-signal-yang-10/ietf-eth-tran-types.yang create mode 100644 src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-ccamp-client-signal-yang-10/ietf-trans-client-service.yang create mode 100644 src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-ccamp-client-signal-yang-10/ietf-trans-client-svc-types.yang create mode 100644 src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-ccamp-eth-client-te-topo-yang-09/ietf-eth-te-topology.yang create mode 100644 src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-ccamp-otn-topo-yang-20/ietf-otn-topology.yang create mode 100644 src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-teas-rfc8776-update-18/ietf-te-packet-types.yang create mode 100644 src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-teas-rfc8776-update-18/ietf-te-types.yang create mode 100644 src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-teas-yang-te-34/ietf-te-device.yang create mode 100644 src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-teas-yang-te-34/ietf-te.yang create mode 100644 src/tests/tools/mock_nce_fan_ctrl/yang/draft-layer1-types/ietf-layer1-types.yang create mode 100644 src/tests/tools/mock_nce_fan_ctrl/yang/rfc6991/ietf-inet-types.yang create mode 100644 src/tests/tools/mock_nce_fan_ctrl/yang/rfc6991/ietf-yang-types.yang create mode 100644 src/tests/tools/mock_nce_fan_ctrl/yang/rfc8294/iana-routing-types.yang create mode 100644 src/tests/tools/mock_nce_fan_ctrl/yang/rfc8294/ietf-routing-types.yang create mode 100644 src/tests/tools/mock_nce_fan_ctrl/yang/rfc8343/ietf-interfaces.yang create mode 100644 src/tests/tools/mock_nce_fan_ctrl/yang/rfc8345/ietf-network-topology.yang create mode 100644 src/tests/tools/mock_nce_fan_ctrl/yang/rfc8345/ietf-network.yang create mode 100644 src/tests/tools/mock_nce_fan_ctrl/yang/rfc8346/ietf-l3-unicast-topology.yang create mode 100644 src/tests/tools/mock_nce_fan_ctrl/yang/rfc8795/ietf-te-topology.yang create mode 100644 src/tests/tools/mock_nce_fan_ctrl/yang/yang-repo-url.txt diff --git a/src/tests/tools/mock_nce_fan_ctrl/Dockerfile b/src/tests/tools/mock_nce_fan_ctrl/Dockerfile new file mode 100644 index 000000000..a11f21b62 --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/Dockerfile @@ -0,0 +1,62 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM python:3.9-slim + +# Install dependencies +RUN apt-get --yes --quiet --quiet update && \ + apt-get --yes --quiet --quiet install git build-essential cmake libpcre2-dev python3-dev python3-cffi && \ + rm -rf /var/lib/apt/lists/* + +# Download, build and install libyang. Note that APT package is outdated +# - Ref: https://github.com/CESNET/libyang +# - Ref: https://github.com/CESNET/libyang-python/ +RUN mkdir -p /var/libyang +RUN git clone https://github.com/CESNET/libyang.git /var/libyang +WORKDIR /var/libyang +RUN git fetch +RUN git checkout v2.1.148 +RUN mkdir -p /var/libyang/build +WORKDIR /var/libyang/build +RUN cmake -D CMAKE_BUILD_TYPE:String="Release" .. +RUN make +RUN make install +RUN ldconfig + +# Set Python to show logs as they occur +ENV PYTHONUNBUFFERED=0 + +# Get generic Python packages +RUN python3 -m pip install --upgrade pip +RUN python3 -m pip install --upgrade setuptools wheel +RUN python3 -m pip install --upgrade pip-tools + +# Create component sub-folders, get specific Python packages +RUN mkdir -p /var/teraflow/nce_fan_ctrl/ +WORKDIR /var/teraflow/nce_fan_ctrl/ +COPY ./requirements.in ./requirements.in +RUN pip-compile --quiet --output-file=requirements.txt requirements.in +RUN python3 -m pip install -r requirements.txt + +# Add component files into working directory +COPY ./yang/. ./yang/ +COPY ./nce_fan_ctrl/*.py ./nce_fan_ctrl/ +COPY ./nce_fan_ctrl/simap_client/*.py ./nce_fan_ctrl/simap_client/ +COPY ./startup.json ./startup.json + +# Configure Flask for production +ENV FLASK_ENV=production + +# Start the service +ENTRYPOINT ["gunicorn", "--workers", "1", "--worker-class", "eventlet", "--bind", "0.0.0.0:8080", "nce_fan_ctrl.app:app"] diff --git a/src/tests/tools/mock_nce_fan_ctrl/README.md b/src/tests/tools/mock_nce_fan_ctrl/README.md new file mode 100644 index 000000000..ff0e2dbe1 --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/README.md @@ -0,0 +1,23 @@ +# RESTCONF-based NCE-FAN Controller + +This server implements a basic RESTCONF Server that can load, potentially, any YANG data model. +In this case, it is prepared to load a NCE-FAN Controller based on: +- IETF Network Topology +- IETF YANG Data Model for Transport Network Client Signals +- IETF YANG Data Model for Traffic Engineering Tunnels, Label Switched Paths and Interfaces + + +## Build the Docker image +```bash +./build.sh +``` + +## Deploy the Controller +```bash +./deploy.sh +``` + +## Destroy the Controller +```bash +./destroy.sh +``` diff --git a/src/tests/tools/mock_nce_fan_ctrl/build.sh b/src/tests/tools/mock_nce_fan_ctrl/build.sh new file mode 100755 index 000000000..d4d49c98d --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/build.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Make folder containing the script the root folder for its execution +cd $(dirname $0) + +# Build image for NCE-FAN Controller +docker buildx build -t nce-fan-ctrl:test -f Dockerfile . +#docker tag nce-fan-ctrl:test localhost:32000/tfs/nce-fan-ctrl:test +#docker push localhost:32000/tfs/nce-fan-ctrl:test diff --git a/src/tests/tools/mock_nce_fan_ctrl/deploy.sh b/src/tests/tools/mock_nce_fan_ctrl/deploy.sh new file mode 100755 index 000000000..90eef41c5 --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/deploy.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Cleanup +docker rm --force nce-fan-ctrl + +# Create NCE-FAN Controller +docker run --detach --name nce-fan-ctrl --publish 8080:8080 nce-fan-ctrl:test + +sleep 2 + +# Dump Docker containers +docker ps -a + +echo "Bye!" diff --git a/src/tests/tools/mock_nce_fan_ctrl/destroy.sh b/src/tests/tools/mock_nce_fan_ctrl/destroy.sh new file mode 100755 index 000000000..c6eda7260 --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/destroy.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Cleanup +docker rm --force nce-fan-ctrl + +# Dump Docker containers +docker ps -a + +echo "Bye!" diff --git a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/Dispatch.py b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/Dispatch.py new file mode 100644 index 000000000..319aa9f7b --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/Dispatch.py @@ -0,0 +1,148 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import json, logging +from flask import Response, abort, jsonify, request +from flask_restful import Resource +from .HttpStatusCodesEnum import HttpStatusCodesEnum +from .YangHandler import YangHandler + +LOGGER = logging.getLogger(__name__) + +class RestConfDispatch(Resource): + def __init__(self, yang_handler : YangHandler) -> None: + super().__init__() + self._yang_handler = yang_handler + + def get(self, subpath : str = '/') -> Response: + data = self._yang_handler.get(subpath) + if data is None: + abort( + HttpStatusCodesEnum.CLI_ERR_NOT_FOUND.value, + description='Path({:s}) not found'.format(str(subpath)) + ) + + LOGGER.info('[GET] {:s} => {:s}'.format(subpath, str(data))) + + response = jsonify(json.loads(data)) + response.status_code = HttpStatusCodesEnum.SUCCESS_OK.value + return response + + def post(self, subpath : str) -> Response: + # TODO: client should not provide identifier of element to be created, add it to subpath + try: + payload = request.get_json(force=True) + except Exception: + LOGGER.exception('Invalid JSON') + abort(HttpStatusCodesEnum.CLI_ERR_BAD_REQUEST.value, desctiption='Invalid JSON') + + data = self._yang_handler.get(subpath) + if data is not None: + abort( + HttpStatusCodesEnum.CLI_ERR_CONFLICT.value, + description='Path({:s}) already exists'.format(str(subpath)) + ) + + try: + json_data = self._yang_handler.create(subpath, payload) + except Exception as e: + LOGGER.exception('Create failed') + abort( + HttpStatusCodesEnum.CLI_ERR_NOT_ACCEPTABLE.value, + description=str(e) + ) + + LOGGER.info('[POST] {:s} {:s} => {:s}'.format(subpath, str(payload), str(json_data))) + + response = jsonify({'status': 'created'}) + response.status_code = HttpStatusCodesEnum.SUCCESS_CREATED.value + return response + + def put(self, subpath : str) -> Response: + # NOTE: client should provide identifier of element to be created/replaced + try: + payload = request.get_json(force=True) + except Exception: + LOGGER.exception('Invalid JSON') + abort(HttpStatusCodesEnum.CLI_ERR_BAD_REQUEST.value, desctiption='Invalid JSON') + + try: + json_data = self._yang_handler.update(subpath, payload) + except Exception as e: + LOGGER.exception('Update failed') + abort( + HttpStatusCodesEnum.CLI_ERR_NOT_ACCEPTABLE.value, + description=str(e) + ) + + LOGGER.info('[PUT] {:s} {:s} => {:s}'.format(subpath, str(payload), str(json_data))) + updated = False # TODO: compute if create or update + + response = jsonify({'status': ( + 'updated' if updated else 'created' + )}) + response.status_code = ( + HttpStatusCodesEnum.SUCCESS_NO_CONTENT.value + if updated else + HttpStatusCodesEnum.SUCCESS_CREATED.value + ) + return response + + def patch(self, subpath : str) -> Response: + # NOTE: client should provide identifier of element to be patched + try: + payload = request.get_json(force=True) + except Exception: + LOGGER.exception('Invalid JSON') + abort(HttpStatusCodesEnum.CLI_ERR_BAD_REQUEST.value, desctiption='Invalid JSON') + + try: + json_data = self._yang_handler.update(subpath, payload) + except Exception as e: + LOGGER.exception('Update failed') + abort( + HttpStatusCodesEnum.CLI_ERR_NOT_ACCEPTABLE.value, + description=str(e) + ) + + LOGGER.info('[PATCH] {:s} {:s} => {:s}'.format(subpath, str(payload), str(json_data))) + + response = jsonify({'status': 'patched'}) + response.status_code = HttpStatusCodesEnum.SUCCESS_NO_CONTENT.value + return response + + def delete(self, subpath : str) -> Response: + # NOTE: client should provide identifier of element to be patched + + try: + deleted_node = self._yang_handler.delete(subpath) + except Exception as e: + LOGGER.exception('Delete failed') + abort( + HttpStatusCodesEnum.CLI_ERR_NOT_ACCEPTABLE.value, + description=str(e) + ) + + LOGGER.info('[DELETE] {:s} => {:s}'.format(subpath, str(deleted_node))) + + if deleted_node is None: + abort( + HttpStatusCodesEnum.CLI_ERR_NOT_FOUND.value, + description='Path({:s}) not found'.format(str(subpath)) + ) + + response = jsonify({}) + response.status_code = HttpStatusCodesEnum.SUCCESS_NO_CONTENT.value + return response diff --git a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/HostMeta.py b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/HostMeta.py new file mode 100644 index 000000000..95ef34b19 --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/HostMeta.py @@ -0,0 +1,50 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import xml.etree.ElementTree as ET +from flask import abort, jsonify, make_response, request +from flask_restful import Resource +from .HttpStatusCodesEnum import HttpStatusCodesEnum + +XRD_NS = 'http://docs.oasis-open.org/ns/xri/xrd-1.0' +ET.register_namespace('', XRD_NS) + +class HostMeta(Resource): + def __init__(self, restconf_prefix : str) -> None: + super().__init__() + self._restconf_prefix = restconf_prefix + + def get(self): + best = request.accept_mimetypes.best_match([ + 'application/xrd+xml', 'application/json' + ], default='application/xrd+xml') + + if best == 'application/xrd+xml': + xrd = ET.Element('{{{:s}}}XRD'.format(str(XRD_NS))) + ET.SubElement(xrd, '{{{:s}}}Link'.format(str(XRD_NS)), attrib={ + 'rel': 'restconf', 'href': self._restconf_prefix + }) + xml_string = ET.tostring(xrd, encoding='utf-8', xml_declaration=True).decode() + response = make_response(str(xml_string)) + response.status_code = 200 + response.content_type = best + return response + elif best == 'application/json': + response = jsonify({'links': [{'rel': 'restconf', 'href': self._restconf_prefix}]}) + response.status_code = 200 + response.content_type = best + return response + else: + abort(HttpStatusCodesEnum.CLI_ERR_NOT_ACCEPTABLE) diff --git a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/HttpStatusCodesEnum.py b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/HttpStatusCodesEnum.py new file mode 100644 index 000000000..c44d135c0 --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/HttpStatusCodesEnum.py @@ -0,0 +1,27 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import enum + +class HttpStatusCodesEnum(enum.IntEnum): + SUCCESS_OK = 200 + SUCCESS_CREATED = 201 + SUCCESS_ACCEPTED = 202 + SUCCESS_NO_CONTENT = 204 + CLI_ERR_BAD_REQUEST = 400 + CLI_ERR_NOT_FOUND = 404 + CLI_ERR_NOT_ACCEPTABLE = 406 + CLI_ERR_CONFLICT = 409 + SVR_ERR_NOT_IMPLEMENTED = 501 diff --git a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/YangHandler.py b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/YangHandler.py new file mode 100644 index 000000000..9df57528f --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/YangHandler.py @@ -0,0 +1,226 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import json, libyang, logging +import urllib.parse +from typing import Dict, List, Optional, Set + + +LOGGER = logging.getLogger(__name__) + + +def walk_schema(node : libyang.SNode, path : str = '') -> Set[str]: + current_path = f'{path}/{node.name()}' + schema_paths : Set[str] = {current_path} + for child in node.children(): + if isinstance(child, (libyang.SLeaf, libyang.SLeafList)): continue + schema_paths.update(walk_schema(child, current_path)) + return schema_paths + +def extract_schema_paths(yang_module : libyang.Module) -> Set[str]: + schema_paths : Set[str] = set() + for node in yang_module.children(): + schema_paths.update(walk_schema(node)) + return schema_paths + +class YangHandler: + def __init__( + self, yang_search_path : str, yang_module_names : List[str], + yang_startup_data : Dict + ) -> None: + self._yang_context = libyang.Context(yang_search_path) + self._loaded_modules : Set[str] = set() + self._schema_paths : Set[str] = set() + for yang_module_name in yang_module_names: + LOGGER.info('Loading module: {:s}'.format(str(yang_module_name))) + yang_module = self._yang_context.load_module(yang_module_name) + yang_module.feature_enable_all() + self._loaded_modules.add(yang_module_name) + self._schema_paths.update(extract_schema_paths(yang_module)) + + self._datastore = self._yang_context.parse_data_mem( + json.dumps(yang_startup_data), fmt='json' + ) + + def destroy(self) -> None: + self._yang_context.destroy() + + def get_schema_paths(self) -> Set[str]: + return self._schema_paths + + def get(self, path : str) -> Optional[str]: + path = self._normalize_path(path) + data = self._datastore.find_path(path) + if data is None: return None + json_data = data.print_mem( + fmt='json', with_siblings=False, pretty=True, + keep_empty_containers=False, include_implicit_defaults=True + ) + return json_data + + def get_xpath(self, xpath : str) -> List[str]: + if not xpath.startswith('/'): xpath = '/' + xpath + items = self._datastore.find_all(xpath) + result = list() + for item in items: + result.append(item.print_mem( + fmt='json', with_siblings=False, pretty=True, + keep_empty_containers=False, include_implicit_defaults=True + )) + return result + + def create(self, path : str, payload : Dict) -> str: + path = self._normalize_path(path) + # TODO: client should not provide identifier of element to be created, add it to subpath + dnode_parsed : Optional[libyang.DNode] = self._yang_context.parse_data_mem( + json.dumps(payload), 'json', strict=True, parse_only=False, + validate_present=True, validate_multi_error=True + ) + if dnode_parsed is None: raise Exception('Unable to parse Data({:s})'.format(str(payload))) + + dnode : Optional[libyang.DNode] = self._yang_context.create_data_path( + path, parent=self._datastore, value=dnode_parsed, update=False + ) + self._datastore.merge(dnode_parsed, with_siblings=True, defaults=True) + + json_data = dnode.print_mem( + fmt='json', with_siblings=True, pretty=True, + keep_empty_containers=True, include_implicit_defaults=True + ) + return json_data + + def update(self, path : str, payload : Dict) -> str: + path = self._normalize_path(path) + # NOTE: client should provide identifier of element to be updated + dnode_parsed : Optional[libyang.DNode] = self._yang_context.parse_data_mem( + json.dumps(payload), 'json', strict=True, parse_only=False, + validate_present=True, validate_multi_error=True + ) + if dnode_parsed is None: raise Exception('Unable to parse Data({:s})'.format(str(payload))) + + dnode = self._yang_context.create_data_path( + path, parent=self._datastore, value=dnode_parsed, update=True + ) + self._datastore.merge(dnode_parsed, with_siblings=True, defaults=True) + + json_data = dnode.print_mem( + fmt='json', with_siblings=True, pretty=True, + keep_empty_containers=True, include_implicit_defaults=True + ) + return json_data + + def delete(self, path : str) -> Optional[str]: + path = self._normalize_path(path) + + # NOTE: client should provide identifier of element to be deleted + + node : libyang.DNode = self._datastore.find_path(path) + if node is None: return None + + LOGGER.info('node = {:s}'.format(str(node))) + json_data = str(node.print_mem( + fmt='json', with_siblings=True, pretty=True, + keep_empty_containers=True, include_implicit_defaults=True + )) + LOGGER.info('json_data = {:s}'.format(json_data)) + + node.unlink() + node.free() + + return json_data + + def _normalize_path(self, path : str) -> str: + """ + Normalize RESTCONF path segments using the standard `list=` + syntax into the libyang bracketed predicate form expected by + the datastore (e.g. `network="admin"` -> `network[network-id="admin"]`). + + This implementation looks up the schema node for the list and + uses its key leaf names to build the proper predicates. If the + schema information is unavailable, it falls back to using the + list name as the key name. + """ + + # URL-decode each path segment so escaped characters like `%22` + # (double quotes) are properly handled when parsing list keys. + parts = [urllib.parse.unquote(p) for p in path.strip('/').split('/') if p != ''] + schema_path = '' + out_parts: List[str] = [] + + for part in parts: + if '=' in part: + # split into name and value (value may contain commas/quotes) + name, val = part.split('=', 1) + # keep original name (may include prefix) for output, but + # use local name (without module prefix) to lookup schema + local_name = name.split(':', 1)[1] if ':' in name else name + schema_path = schema_path + '/' + local_name if schema_path else '/' + local_name + schema_nodes = list(self._yang_context.find_path(schema_path)) + if len(schema_nodes) != 1: + MSG = 'No/Multiple SchemaNodes({:s}) for SchemaPath({:s})' + raise Exception(MSG.format( + str([repr(sn) for sn in schema_nodes]), schema_path + )) + schema_node = schema_nodes[0] + + # parse values splitting on commas outside quotes + values = [] + cur = '' + in_quotes = False + for ch in val: + if ch == '"': + in_quotes = not in_quotes + cur += ch + elif ch == ',' and not in_quotes: + values.append(cur) + cur = '' + else: + cur += ch + if cur != '': + values.append(cur) + + # determine key names from schema_node if possible + key_names = None + if isinstance(schema_node, libyang.SList): + key_names = [k.name() for k in schema_node.keys()] + #if isinstance(keys, (list, tuple)): + # key_names = keys + #elif isinstance(keys, str): + # key_names = [kn for kn in k.split() if kn] + #else: + # MSG = 'Unsupported keys format: {:s} / {:s}' + # raise Exception(MSG.format(str(type(keys)), str(keys))) + #elif hasattr(schema_node, 'key'): + # k = schema_node.key() + # if isinstance(k, str): + # key_names = [kn for kn in k.split() if kn] + + if not key_names: + # fallback: use the local list name as the single key + key_names = [local_name] + + # build predicate(s) + preds = [] + for idx, kn in enumerate(key_names): + kv = values[idx] if idx < len(values) else values[0] + preds.append(f'[{kn}="{kv}"]') + + out_parts.append(name + ''.join(preds)) + else: + local_part = part.split(':', 1)[1] if ':' in part else part + schema_path = schema_path + '/' + local_part if schema_path else '/' + local_part + out_parts.append(part) + + return '/' + '/'.join(out_parts) diff --git a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/YangModelDiscoverer.py b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/YangModelDiscoverer.py new file mode 100644 index 000000000..f31305280 --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/YangModelDiscoverer.py @@ -0,0 +1,195 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging, re +from collections import defaultdict +from graphlib import TopologicalSorter, CycleError +from pathlib import Path +from typing import Dict, List, Optional, Set, Tuple + + +COMMENT_SINGLE_RE = re.compile(r"//.*?$", re.MULTILINE) +COMMENT_MULTI_RE = re.compile(r"/\*.*?\*/", re.DOTALL) + +# module / submodule name +MODNAME_RE = re.compile(r"\b(module|submodule)\s+([A-Za-z0-9_.-]+)\s*\{") + +# import foo { ... } (most common form) +IMPORT_BLOCK_RE = re.compile(r"\bimport\s+([A-Za-z0-9_.-]+)\s*\{", re.IGNORECASE) + +# import foo; (very rare, but we’ll support it) +IMPORT_SEMI_RE = re.compile(r"\bimport\s+([A-Za-z0-9_.-]+)\s*;", re.IGNORECASE) + + +def _parse_yang_file(path: Path) -> Tuple[Optional[str], Set[str]]: + path_stem = path.stem # file name without extension + expected_module_name = path_stem.split('@', 1)[0] + + try: + data = path.read_text(encoding='utf-8', errors='ignore') + except Exception: + data = path.read_bytes().decode('utf-8', errors='ignore') + + data = COMMENT_MULTI_RE.sub('', data) + data = COMMENT_SINGLE_RE.sub('', data) + + match = MODNAME_RE.search(data) + if match is None: + return None, set() + module_name = match.group(2) + if module_name != expected_module_name: + MSG = 'Module({:s}) mismatches its FileName({:s})' + raise Exception(MSG.format(str(module_name), str(expected_module_name))) + + module_imports = set() + if module_name is not None: + module_imports.update(IMPORT_BLOCK_RE.findall(data)) + module_imports.update(IMPORT_SEMI_RE.findall(data)) + + # ignore modules importing themselves, just in case + module_imports.discard(module_name) + + return module_name, module_imports + + +class YangModuleDiscoverer: + def __init__(self, yang_search_path : str) -> None: + self._yang_search_path = yang_search_path + + self._module_to_paths : Dict[str, List[Path]] = defaultdict(list) + self._module_to_imports : Dict[str, Set[str]] = defaultdict(set) + self._ordered_module_names : Optional[List[str]] = None + + + def run( + self, do_print_order : bool = False, do_log_order : bool = False, + logger : Optional[logging.Logger] = None, level : int = logging.INFO + ) -> List[str]: + if self._ordered_module_names is None: + self._scan_modules() + self._sort_modules() + + if do_print_order: + self.print_order() + + if do_log_order: + if logger is None: logger = logging.getLogger(__name__) + self.log_order(logger, level=level) + + return self._ordered_module_names + + def _scan_modules(self) -> None: + yang_root = Path(self._yang_search_path).resolve() + if not yang_root.exists(): + MSG = 'Path({:s}) not found' + raise Exception(MSG.format(str(self._yang_search_path))) + + for yang_path in yang_root.rglob('*.yang'): + module_name, module_imports = _parse_yang_file(yang_path) + if module_name is None: continue + self._module_to_paths[module_name].append(yang_path) + self._module_to_imports[module_name] = module_imports + + if len(self._module_to_paths) == 0: + MSG = 'No modules found in Path({:s})' + raise Exception(MSG.format(str(self._yang_search_path))) + + self._check_duplicated_module_declaration() + self._check_missing_modules() + + + def _check_duplicated_module_declaration(self) -> None: + duplicate_module_declarations : List[str] = list() + for module_name, paths in self._module_to_paths.items(): + if len(paths) == 1: continue + str_paths = [str(p) for p in paths] + duplicate_module_declarations.append( + ' {:s} => {:s}'.format(module_name, str_paths) + ) + + if len(duplicate_module_declarations) > 0: + MSG = 'Duplicate module declarations:\n{:s}' + str_dup_mods = '\n'.join(duplicate_module_declarations) + raise Exception(MSG.format(str_dup_mods)) + + + def _check_missing_modules(self) -> None: + local_module_names = set(self._module_to_imports.keys()) + missing_modules : List[str] = list() + for module_name, imported_modules in self._module_to_imports.items(): + missing = imported_modules.difference(local_module_names) + if len(missing) == 0: continue + missing_modules.append( + ' {:s} => {:s}'.format(module_name, str(missing)) + ) + + if len(missing_modules) > 0: + MSG = 'Missing modules:\n{:s}' + str_mis_mods = '\n'.join(missing_modules) + raise Exception(MSG.format(str_mis_mods)) + + + def _sort_modules(self) -> None: + ts = TopologicalSorter() + for module_name, imported_modules in self._module_to_imports.items(): + ts.add(module_name, *imported_modules) + + try: + self._ordered_module_names = list(ts.static_order()) # raises CycleError on cycles + except CycleError as e: + cycle = list(dict.fromkeys(e.args[1])) # de-dup while preserving order + MSG = 'Circular dependencies between modules: {:s}' + raise Exception(MSG.format(str(cycle))) # pylint: disable=raise-missing-from + + + def dump_order(self) -> List[Tuple[int, str, List[str]]]: + if self._ordered_module_names is None: + raise Exception('First process the YANG Modules running method .run()') + + module_order : List[Tuple[int, str, List[str]]] = list() + for i, module_name in enumerate(self._ordered_module_names, 1): + module_imports = sorted(self._module_to_imports[module_name]) + module_order.append((i, module_name, module_imports)) + + return module_order + + + def print_order(self) -> None: + print('Ordered Modules:') + for i, module_name, module_imports in self.dump_order(): + MSG = '{:2d} : {:s} => {:s}' + print(MSG.format(i, module_name, str(module_imports))) + + + def log_order(self, logger : logging.Logger, level : int = logging.INFO) -> None: + logger.log(level, 'Ordered Modules:') + for i, module_name, module_imports in self.dump_order(): + MSG = '{:2d} : {:s} => {:s}' + logger.log(level, MSG.format(i, module_name, str(module_imports))) + + +def main() -> None: + logging.basicConfig(level=logging.INFO) + + ymd = YangModuleDiscoverer('./yang') + ordered_module_names = ymd.run( + do_print_order=True, + do_log_order=True + ) + print('ordered_module_names', ordered_module_names) + + +if __name__ == '__main__': + main() diff --git a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/__init__.py b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/__init__.py new file mode 100644 index 000000000..3ccc21c7d --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/__main__.py b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/__main__.py new file mode 100644 index 000000000..2c84d92ef --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/__main__.py @@ -0,0 +1,26 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from .app import app + +BIND_ADDRESS = '0.0.0.0' +BIND_PORT = 8080 + +if __name__ == '__main__': + # Only used to run it locally during development stage; + # otherwise, app is directly launched by gunicorn. + app.run( + host=BIND_ADDRESS, port=BIND_PORT, debug=True, use_reloader=False + ) diff --git a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/app.py b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/app.py new file mode 100644 index 000000000..7a87f732a --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/app.py @@ -0,0 +1,112 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import json, logging, secrets +from flask import Flask +from flask_restful import Api +from .Dispatch import RestConfDispatch +from .HostMeta import HostMeta +from .YangHandler import YangHandler +from .YangModelDiscoverer import YangModuleDiscoverer +from .simap_client.RestConfClient import RestConfClient +from .simap_client.SimapClient import SimapClient + + +logging.basicConfig( + level=logging.INFO, + format='[Worker-%(process)d][%(asctime)s] %(levelname)s:%(name)s:%(message)s', +) +LOGGER = logging.getLogger(__name__) +logging.getLogger('RestConfClient').setLevel(logging.WARN) + + +RESTCONF_PREFIX = '/restconf' +SECRET_KEY = secrets.token_hex(64) + + +YANG_SEARCH_PATH = './yang' + +ymd = YangModuleDiscoverer(YANG_SEARCH_PATH) +YANG_MODULE_NAMES = ymd.run(do_log_order=True) + +STARTUP_FILE = './startup.json' +with open(STARTUP_FILE, mode='r', encoding='UTF-8') as fp: + YANG_STARTUP_DATA = json.loads(fp.read()) + + +restconf_client = RestConfClient( + '172.17.0.1', port=8080, + logger=logging.getLogger('RestConfClient') +) +simap_client = SimapClient(restconf_client) + +te_topo = simap_client.network('admin') +te_topo.update() + +networks = YANG_STARTUP_DATA.get('ietf-network:networks', dict()) +networks = networks.get('network', list()) +assert len(networks) == 1 +network = networks[0] +assert network['network-id'] == 'admin' + +nodes = network.get('node', list()) +for node in nodes: + node_id = node['node-id'] + tp_ids = [ + tp['tp-id'] + for tp in node['ietf-network-topology:termination-point'] + ] + te_topo.node(node_id).create(termination_point_ids=tp_ids) + +links = network.get('ietf-network-topology:link', list()) +for link in links: + link_id = link['link-id'] + link_src = link['source'] + link_dst = link['destination'] + link_src_node_id = link_src['source-node'] + link_src_tp_id = link_src['source-tp'] + link_dst_node_id = link_dst['dest-node'] + link_dst_tp_id = link_dst['dest-tp'] + + te_topo.link(link_id).create( + link_src_node_id, link_src_tp_id, link_dst_node_id, link_dst_tp_id + ) + + +yang_handler = YangHandler( + YANG_SEARCH_PATH, YANG_MODULE_NAMES, YANG_STARTUP_DATA +) +restconf_paths = yang_handler.get_schema_paths() + +app = Flask(__name__) +app.config['SECRET_KEY'] = SECRET_KEY + +api = Api(app) +api.add_resource( + HostMeta, + '/.well-known/host-meta', + resource_class_args=(RESTCONF_PREFIX,) +) +api.add_resource( + RestConfDispatch, + RESTCONF_PREFIX + '/data', + RESTCONF_PREFIX + '/data/', + RESTCONF_PREFIX + '/data/', + resource_class_args=(yang_handler,) +) + +LOGGER.info('Available RESTCONF paths:') +for restconf_path in sorted(restconf_paths): + LOGGER.info('- {:s}'.format(str(restconf_path))) diff --git a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/simap_client/RestConfClient.py b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/simap_client/RestConfClient.py new file mode 100644 index 000000000..b7c057a70 --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/simap_client/RestConfClient.py @@ -0,0 +1,191 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum, logging, requests +from requests.auth import HTTPBasicAuth +from typing import Any, Dict, Optional, Set + +class RestRequestMethod(enum.Enum): + GET = 'get' + POST = 'post' + PUT = 'put' + PATCH = 'patch' + DELETE = 'delete' + +EXPECTED_STATUS_CODES : Set[int] = { + requests.codes['OK' ], # 200 - OK + requests.codes['CREATED' ], # 201 - Created + requests.codes['ACCEPTED' ], # 202 - Accepted + requests.codes['NO_CONTENT'], # 204 - No Content +} + +def compose_basic_auth( + username : Optional[str] = None, password : Optional[str] = None +) -> Optional[HTTPBasicAuth]: + if username is None or password is None: return None + return HTTPBasicAuth(username, password) + +class SchemeEnum(enum.Enum): + HTTP = 'http' + HTTPS = 'https' + +def check_scheme(scheme : str) -> str: + str_scheme = str(scheme).lower() + enm_scheme = SchemeEnum._value2member_map_[str_scheme] + return enm_scheme.value + +HOST_META_URL = '{:s}://{:s}:{:d}/.well-known/host-meta' +RESTCONF_URL = '{:s}://{:s}:{:d}/{:s}' + +class RestConfClient: + def __init__( + self, address : str, port : int = 8080, scheme : str = 'http', + username : Optional[str] = None, password : Optional[str] = None, + timeout : int = 10, verify_certs : bool = True, allow_redirects : bool = True, + logger : Optional[logging.Logger] = None + ) -> None: + self._address = address + self._port = int(port) + self._scheme = check_scheme(scheme) + self._auth = compose_basic_auth(username=username, password=password) + self._base_url = '' + self._timeout = int(timeout) + self._verify_certs = verify_certs + self._allow_redirects = allow_redirects + self._logger = logger + + self._discover_base_url() + + def _discover_base_url(self) -> None: + host_meta_url = HOST_META_URL.format(self._scheme, self._address, self._port) + host_meta : Dict = self.get(host_meta_url, expected_status_codes={requests.codes['OK']}) + + links = host_meta.get('links') + if links is None: raise AttributeError('Missing attribute "links" in host-meta reply') + if not isinstance(links, list): raise AttributeError('Attribute "links" must be a list') + if len(links) != 1: raise AttributeError('Attribute "links" is expected to have exactly 1 item') + + link = links[0] + if not isinstance(link, dict): raise AttributeError('Attribute "links[0]" must be a dict') + + rel = link.get('rel') + if rel is None: raise AttributeError('Missing attribute "links[0].rel" in host-meta reply') + if not isinstance(rel, str): raise AttributeError('Attribute "links[0].rel" must be a str') + if rel != 'restconf': raise AttributeError('Attribute "links[0].rel" != "restconf"') + + href = link.get('href') + if href is None: raise AttributeError('Missing attribute "links[0]" in host-meta reply') + if not isinstance(href, str): raise AttributeError('Attribute "links[0].href" must be a str') + + self._base_url = str(href + '/data').replace('//', '/') + + def _log_msg_request( + self, method : RestRequestMethod, request_url : str, body : Optional[Any], + log_level : int = logging.INFO + ) -> str: + msg = 'Request: {:s} {:s}'.format(str(method.value).upper(), str(request_url)) + if body is not None: msg += ' body={:s}'.format(str(body)) + if self._logger is not None: self._logger.log(log_level, msg) + return msg + + def _log_msg_check_reply( + self, method : RestRequestMethod, request_url : str, body : Optional[Any], + reply : requests.Response, expected_status_codes : Set[int], + log_level : int = logging.INFO + ) -> str: + msg = 'Reply: {:s}'.format(str(reply.text)) + if self._logger is not None: self._logger.log(log_level, msg) + http_status_code = reply.status_code + if http_status_code in expected_status_codes: return msg + MSG = 'Request failed. method={:s} url={:s} body={:s} status_code={:s} reply={:s}' + msg = MSG.format( + str(method.value).upper(), str(request_url), str(body), + str(http_status_code), str(reply.text) + ) + self._logger.error(msg) + raise Exception(msg) + + def _do_rest_request( + self, method : RestRequestMethod, endpoint : str, body : Optional[Any] = None, + expected_status_codes : Set[int] = EXPECTED_STATUS_CODES + ) -> Optional[Any]: + candidate_schemes = tuple(['{:s}://'.format(m).lower() for m in SchemeEnum.__members__.keys()]) + if endpoint.lower().startswith(candidate_schemes): + request_url = endpoint.lstrip('/') + else: + endpoint = str(self._base_url + '/' + endpoint).replace('//', '/').lstrip('/') + request_url = '{:s}://{:s}:{:d}/{:s}'.format( + self._scheme, self._address, self._port, endpoint.lstrip('/') + ) + self._log_msg_request(method, request_url, body) + try: + headers = {'accept': 'application/json'} + reply = requests.request( + method.value, request_url, headers=headers, json=body, + auth=self._auth, verify=self._verify_certs, timeout=self._timeout, + allow_redirects=self._allow_redirects + ) + except Exception as e: + MSG = 'Request failed. method={:s} url={:s} body={:s}' + msg = MSG.format(str(method.value).upper(), request_url, str(body)) + self._logger.exception(msg) + raise Exception(msg) from e + self._log_msg_check_reply(method, request_url, body, reply, expected_status_codes) + if reply.content and len(reply.content) > 0: return reply.json() + return None + + def get( + self, endpoint : str, + expected_status_codes : Set[int] = {requests.codes['OK']} + ) -> Optional[Any]: + return self._do_rest_request( + RestRequestMethod.GET, endpoint, + expected_status_codes=expected_status_codes + ) + + def post( + self, endpoint : str, body : Optional[Any] = None, + expected_status_codes : Set[int] = {requests.codes['CREATED']} + ) -> Optional[Any]: + return self._do_rest_request( + RestRequestMethod.POST, endpoint, body=body, + expected_status_codes=expected_status_codes + ) + + def put( + self, endpoint : str, body : Optional[Any] = None, + expected_status_codes : Set[int] = {requests.codes['CREATED'], requests.codes['NO_CONTENT']} + ) -> Optional[Any]: + return self._do_rest_request( + RestRequestMethod.PUT, endpoint, body=body, + expected_status_codes=expected_status_codes + ) + + def patch( + self, endpoint : str, body : Optional[Any] = None, + expected_status_codes : Set[int] = {requests.codes['NO_CONTENT']} + ) -> Optional[Any]: + return self._do_rest_request( + RestRequestMethod.PATCH, endpoint, body=body, + expected_status_codes=expected_status_codes + ) + + def delete( + self, endpoint : str, body : Optional[Any] = None, + expected_status_codes : Set[int] = {requests.codes['NO_CONTENT']} + ) -> Optional[Any]: + return self._do_rest_request( + RestRequestMethod.DELETE, endpoint, body=body, + expected_status_codes=expected_status_codes + ) diff --git a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/simap_client/SimapClient.py b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/simap_client/SimapClient.py new file mode 100644 index 000000000..b4c27d43a --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/simap_client/SimapClient.py @@ -0,0 +1,242 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import Dict, List, Tuple +from .RestConfClient import RestConfClient + + +class TerminationPoint: + ENDPOINT_NO_ID = '/ietf-network:networks/network[network-id="{:s}"]/node[node-id="{:s}"]' + ENDPOINT_ID = ENDPOINT_NO_ID + '/ietf-network-topology:termination-point[tp-id="{:s}"]' + + def __init__(self, restconf_client : RestConfClient, network_id : str, node_id : str, tp_id : str): + self._restconf_client = restconf_client + self._network_id = network_id + self._node_id = node_id + self._tp_id = tp_id + + def create(self, supporting_termination_point_ids : List[Tuple[str, str, str]] = []) -> None: + endpoint = TerminationPoint.ENDPOINT_ID.format(self._network_id, self._node_id, self._tp_id) + tp = {'tp-id': self._tp_id} + stps = [ + {'network-ref': snet_id, 'node-ref': snode_id, 'tp-ref': stp_id} + for snet_id,snode_id,stp_id in supporting_termination_point_ids + ] + if len(stps) > 0: tp['supporting-termination-point'] = stps + node = {'node-id': self._node_id, 'ietf-network-topology:termination-point': [tp]} + network = {'network-id': self._network_id, 'node': [node]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.post(endpoint, payload) + + def get(self) -> Dict: + endpoint = TerminationPoint.ENDPOINT_ID.format(self._network_id, self._node_id, self._tp_id) + node : Dict = self._restconf_client.get(endpoint) + return node['ietf-network-topology:termination-point'][0] + + def update(self, supporting_termination_point_ids : List[Tuple[str, str, str]] = []) -> None: + endpoint = TerminationPoint.ENDPOINT_ID.format(self._network_id, self._node_id, self._tp_id) + tp = {'tp-id': self._tp_id} + stps = [ + {'network-ref': snet_id, 'node-ref': snode_id, 'tp-ref': stp_id} + for snet_id,snode_id,stp_id in supporting_termination_point_ids + ] + if len(stps) > 0: tp['supporting-termination-point'] = stps + node = {'node-id': self._node_id, 'ietf-network-topology:termination-point': [tp]} + network = {'network-id': self._network_id, 'node': [node]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.patch(endpoint, payload) + + def delete(self) -> None: + endpoint = TerminationPoint.ENDPOINT_ID.format(self._network_id, self._node_id, self._tp_id) + self._restconf_client.delete(endpoint) + +class Node: + ENDPOINT_NO_ID = '/ietf-network:networks/network[network-id="{:s}"]' + ENDPOINT_ID = ENDPOINT_NO_ID + '/node[node-id="{:s}"]' + + def __init__(self, restconf_client : RestConfClient, network_id : str, node_id : str): + self._restconf_client = restconf_client + self._network_id = network_id + self._node_id = node_id + self._tps : Dict[str, TerminationPoint] = dict() + + def termination_points(self) -> List[Dict]: + tps : Dict = self._restconf_client.get(TerminationPoint.ENDPOINT_NO_ID) + return tps['ietf-network-topology:termination-point'].get('termination-point', list()) + + def termination_point(self, tp_id : str) -> TerminationPoint: + _tp = self._tps.get(tp_id) + if _tp is not None: return _tp + _tp = TerminationPoint(self._restconf_client, self._network_id, self._node_id, tp_id) + return self._tps.setdefault(tp_id, _tp) + + def create( + self, termination_point_ids : List[str] = [], + supporting_node_ids : List[Tuple[str, str]] = [] + ) -> None: + endpoint = Node.ENDPOINT_ID.format(self._network_id, self._node_id) + node = {'node-id': self._node_id} + tps = [{'tp-id': tp_id} for tp_id in termination_point_ids] + if len(tps) > 0: node['ietf-network-topology:termination-point'] = tps + sns = [{'network-ref': snet_id, 'node-ref': snode_id} for snet_id,snode_id in supporting_node_ids] + if len(sns) > 0: node['supporting-node'] = sns + network = {'network-id': self._network_id, 'node': [node]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.post(endpoint, payload) + + def get(self) -> Dict: + endpoint = Node.ENDPOINT_ID.format(self._network_id, self._node_id) + node : Dict = self._restconf_client.get(endpoint) + return node['ietf-network:node'][0] + + def update( + self, termination_point_ids : List[str] = [], + supporting_node_ids : List[Tuple[str, str]] = [] + ) -> None: + endpoint = Node.ENDPOINT_ID.format(self._network_id, self._node_id) + node = {'node-id': self._node_id} + tps = [{'tp-id': tp_id} for tp_id in termination_point_ids] + if len(tps) > 0: node['ietf-network-topology:termination-point'] = tps + sns = [{'network-ref': snet_id, 'node-ref': snode_id} for snet_id,snode_id in supporting_node_ids] + if len(sns) > 0: node['supporting-node'] = sns + network = {'network-id': self._network_id, 'node': [node]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.patch(endpoint, payload) + + def delete(self) -> None: + endpoint = Node.ENDPOINT_ID.format(self._network_id, self._node_id) + self._restconf_client.delete(endpoint) + +class Link: + ENDPOINT_NO_ID = '/ietf-network:networks/network[network-id="{:s}"]' + ENDPOINT_ID = ENDPOINT_NO_ID + '/ietf-network-topology:link[link-id="{:s}"]' + + def __init__(self, restconf_client : RestConfClient, network_id : str, link_id : str): + self._restconf_client = restconf_client + self._network_id = network_id + self._link_id = link_id + + def create( + self, src_node_id : str, src_tp_id : str, dst_node_id : str, dst_tp_id : str, + supporting_link_ids : List[Tuple[str, str]] = [] + ) -> None: + endpoint = Link.ENDPOINT_ID.format(self._network_id, self._link_id) + link = { + 'link-id' : self._link_id, + 'source' : {'source-node': src_node_id, 'source-tp': src_tp_id}, + 'destination': {'dest-node' : dst_node_id, 'dest-tp' : dst_tp_id}, + } + sls = [{'network-ref': snet_id, 'link-ref': slink_id} for snet_id,slink_id in supporting_link_ids] + if len(sls) > 0: link['supporting-link'] = sls + network = {'network-id': self._network_id, 'ietf-network-topology:link': [link]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.post(endpoint, payload) + + def get(self) -> Dict: + endpoint = Link.ENDPOINT_ID.format(self._network_id, self._link_id) + link : Dict = self._restconf_client.get(endpoint) + return link['ietf-network-topology:link'][0] + + def update( + self, src_node_id : str, src_tp_id : str, dst_node_id : str, dst_tp_id : str, + supporting_link_ids : List[Tuple[str, str]] = [] + ) -> None: + endpoint = Link.ENDPOINT_ID.format(self._network_id, self._link_id) + link = { + 'link-id' : self._link_id, + 'source' : {'source-node': src_node_id, 'source-tp': src_tp_id}, + 'destination': {'dest-node' : dst_node_id, 'dest-tp' : dst_tp_id}, + } + sls = [{'network-ref': snet_id, 'link-ref': slink_id} for snet_id,slink_id in supporting_link_ids] + if len(sls) > 0: link['supporting-link'] = sls + network = {'network-id': self._network_id, 'ietf-network-topology:link': [link]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.patch(endpoint, payload) + + def delete(self) -> None: + endpoint = Link.ENDPOINT_ID.format(self._network_id, self._link_id) + self._restconf_client.delete(endpoint) + + +class Network: + ENDPOINT_NO_ID = '/ietf-network:networks' + ENDPOINT_ID = ENDPOINT_NO_ID + '/network[network-id="{:s}"]' + + def __init__(self, restconf_client : RestConfClient, network_id : str): + self._restconf_client = restconf_client + self._network_id = network_id + self._nodes : Dict[str, Node] = dict() + self._links : Dict[str, Link] = dict() + + def nodes(self) -> List[Dict]: + reply : Dict = self._restconf_client.get(Node.ENDPOINT_NO_ID.format(self._network_id)) + return reply['ietf-network:network'][0].get('node', list()) + + def links(self) -> List[Dict]: + reply : Dict = self._restconf_client.get(Link.ENDPOINT_NO_ID.format(self._network_id)) + return reply['ietf-network:network'][0].get('ietf-network-topology:link', list()) + + def node(self, node_id : str) -> Node: + _node = self._nodes.get(node_id) + if _node is not None: return _node + _node = Node(self._restconf_client, self._network_id, node_id) + return self._nodes.setdefault(node_id, _node) + + def link(self, link_id : str) -> Link: + _link = self._links.get(link_id) + if _link is not None: return _link + _link = Link(self._restconf_client, self._network_id, link_id) + return self._links.setdefault(link_id, _link) + + def create(self, supporting_network_ids : List[str] = []) -> None: + endpoint = Network.ENDPOINT_ID.format(self._network_id) + network = {'network-id': self._network_id} + sns = [{'network-ref': sn_id} for sn_id in supporting_network_ids] + if len(sns) > 0: network['supporting-network'] = sns + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.post(endpoint, payload) + + def get(self) -> Dict: + endpoint = Network.ENDPOINT_ID.format(self._network_id) + networks : Dict = self._restconf_client.get(endpoint) + return networks['ietf-network:network'][0] + + def update(self, supporting_network_ids : List[str] = []) -> None: + endpoint = Network.ENDPOINT_ID.format(self._network_id) + network = {'network-id': self._network_id} + sns = [{'network-ref': sn_id} for sn_id in supporting_network_ids] + if len(sns) > 0: network['supporting-network'] = sns + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.patch(endpoint, payload) + + def delete(self) -> None: + endpoint = Network.ENDPOINT_ID.format(self._network_id) + self._restconf_client.delete(endpoint) + + +class SimapClient: + def __init__(self, restconf_client : RestConfClient) -> None: + self._restconf_client = restconf_client + self._networks : Dict[str, Network] = dict() + + def networks(self) -> List[Dict]: + reply : Dict = self._restconf_client.get(Network.ENDPOINT_NO_ID) + return reply['ietf-network:networks'].get('network', list()) + + def network(self, network_id : str) -> Network: + _network = self._networks.get(network_id) + if _network is not None: return _network + _network = Network(self._restconf_client, network_id) + return self._networks.setdefault(network_id, _network) diff --git a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/simap_client/__init__.py b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/simap_client/__init__.py new file mode 100644 index 000000000..3ccc21c7d --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/simap_client/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/tests/tools/mock_nce_fan_ctrl/requirements.in b/src/tests/tools/mock_nce_fan_ctrl/requirements.in new file mode 100644 index 000000000..17155ed58 --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/requirements.in @@ -0,0 +1,25 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cryptography==39.0.1 +eventlet==0.39.0 +Flask-HTTPAuth==4.5.0 +Flask-RESTful==0.3.9 +Flask==2.1.3 +gunicorn==23.0.0 +jsonschema==4.4.0 +libyang==2.8.4 +pyopenssl==23.0.0 +requests==2.27.1 +werkzeug==2.3.7 diff --git a/src/tests/tools/mock_nce_fan_ctrl/run_ctrl_gunicorn.sh b/src/tests/tools/mock_nce_fan_ctrl/run_ctrl_gunicorn.sh new file mode 100755 index 000000000..78fe25b9e --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/run_ctrl_gunicorn.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Make folder containing the script the root folder for its execution +cd $(dirname $0) + +export FLASK_ENV=development +gunicorn -w 1 --worker-class eventlet -b 0.0.0.0:8080 --log-level DEBUG nce_fan_ctrl.app:app diff --git a/src/tests/tools/mock_nce_fan_ctrl/run_ctrl_standalone.sh b/src/tests/tools/mock_nce_fan_ctrl/run_ctrl_standalone.sh new file mode 100755 index 000000000..06432851f --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/run_ctrl_standalone.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Make folder containing the script the root folder for its execution +cd $(dirname $0) + +python -m nce_fan_ctrl diff --git a/src/tests/tools/mock_nce_fan_ctrl/startup.json b/src/tests/tools/mock_nce_fan_ctrl/startup.json new file mode 100644 index 000000000..5c70a2da7 --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/startup.json @@ -0,0 +1,49 @@ +{ + "ietf-network:networks": { + "network": [ + { + "network-id": "admin", + "ietf-te-topology:te": { + "name": "admin" + }, + "network-types": { + "ietf-te-topology:te-topology": { + "ietf-eth-te-topology:eth-tran-topology": {} + } + }, + "node": [ + { + "node-id": "ONT1", "ietf-te-topology:te-node-id": "172.16.61.10", + "ietf-te-topology:te": {"te-node-attributes": {"name": "ONT1", "admin-status": "up"}, "oper-status": "up"}, + "ietf-network-topology:termination-point": [ + {"tp-id": "500", "ietf-te-topology:te": {"name": "500"}, "ietf-te-topology:te-tp-id": "0.0.0.0"}, + {"tp-id": "200", "ietf-te-topology:te": {"name": "200"}, "ietf-te-topology:te-tp-id": "0.0.0.0"} + ] + }, + { + "node-id": "ONT2", "ietf-te-topology:te-node-id": "172.16.61.11", + "ietf-te-topology:te": {"te-node-attributes": {"name": "ONT2", "admin-status": "up"}, "oper-status": "up"}, + "ietf-network-topology:termination-point": [ + {"tp-id": "500", "ietf-te-topology:te": {"name": "500"}, "ietf-te-topology:te-tp-id": "0.0.0.0"}, + {"tp-id": "200", "ietf-te-topology:te": {"name": "200"}, "ietf-te-topology:te-tp-id": "0.0.0.0"} + ] + }, + { + "node-id": "OLT", "ietf-te-topology:te-node-id": "172.16.58.10", + "ietf-te-topology:te": {"te-node-attributes": {"name": "OLT", "admin-status": "up"}, "oper-status": "up"}, + "ietf-network-topology:termination-point": [ + {"tp-id": "500", "ietf-te-topology:te": {"name": "500"}, "ietf-te-topology:te-tp-id": "0.0.0.0"}, + {"tp-id": "501", "ietf-te-topology:te": {"name": "501"}, "ietf-te-topology:te-tp-id": "0.0.0.0"}, + {"tp-id": "200", "ietf-te-topology:te": {"name": "200"}, "ietf-te-topology:te-tp-id": "0.0.0.0"}, + {"tp-id": "201", "ietf-te-topology:te": {"name": "201"}, "ietf-te-topology:te-tp-id": "0.0.0.0"} + ] + } + ], + "ietf-network-topology:link": [ + {"link-id": "L1", "source": {"source-node": "ONT1", "source-tp": "500"}, "destination": {"dest-node": "OLT", "dest-tp": "200"}}, + {"link-id": "L2", "source": {"source-node": "ONT2", "source-tp": "500"}, "destination": {"dest-node": "OLT", "dest-tp": "201"}} + ] + } + ] + } +} diff --git a/src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-ccamp-client-signal-yang-10/ietf-eth-tran-service.yang b/src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-ccamp-client-signal-yang-10/ietf-eth-tran-service.yang new file mode 100644 index 000000000..633d74715 --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-ccamp-client-signal-yang-10/ietf-eth-tran-service.yang @@ -0,0 +1,1010 @@ +module ietf-eth-tran-service { + yang-version 1.1; + namespace "urn:ietf:params:xml:ns:yang:ietf-eth-tran-service"; + + prefix "ethtsvc"; + import ietf-yang-types { + prefix "yang"; + reference "RFC 6991 - Common YANG Data Types"; + } + + import ietf-network { + prefix "nw"; + reference "RFC8345 - A YANG Data Model for Network Topologies"; + } + + import ietf-network-topology { + prefix "nt"; + reference "RFC8345 - A YANG Data Model for Network Topologies"; + } + + import ietf-te-types { + prefix "te-types"; + reference "RFC 8776 - Traffic Engineering Common YANG Types"; + } + + import ietf-eth-tran-types { + prefix "etht-types"; + reference "RFC XXXX - A YANG Data Model for Transport + Network Client Signals"; + } + + import ietf-routing-types { + prefix "rt-types"; + reference "RFC 8294 - Common YANG Data Types for the + Routing Area"; + + } + + import ietf-te { + prefix "te"; + reference "RFC YYYY - A YANG Data Model for Traffic + Engineering Tunnels and Interfaces"; + } + + organization + "Internet Engineering Task Force (IETF) CCAMP WG"; + contact + " + WG List: + + ID-draft editor: + Haomian Zheng (zhenghaomian@huawei.com); + Italo Busi (italo.busi@huawei.com); + Aihua Guo (aihuaguo.ietf@gmail.com); + Anton Snitser (antons@sedonasys.com);0 + Francesco Lazzeri (francesco.lazzeri@ericsson.com); + Yunbin Xu (xuyunbin@caict.ac.cn); + Yang Zhao (zhaoyangyjy@chinamobile.com); + Xufeng Liu (xufeng.liu.ietf@gmail.com); + Giuseppe Fioccola (giuseppe.fioccola@huawei.com); + Chaode Yu (yuchaode@huawei.com) + "; + + description + "This module defines a YANG data model for describing + the Ethernet services. The model fully conforms to the + Network Management Datastore Architecture (NMDA). + + Copyright (c) 2021 IETF Trust and the persons + identified as authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + This version of this YANG module is part of RFC XXXX; see + the RFC itself for full legal notices."; + + revision 2023-10-23 { + description + "version -04 as an WG document"; + reference + "draft-ietf-ccamp-client-signal-yang"; + } + + /* + * Groupings + */ + + grouping vlan-classification { + description + "A grouping which represents classification + on an 802.1Q VLAN tag."; + + leaf tag-type { + type etht-types:eth-tag-classify; + description + "The tag type used for VLAN classification."; + } + choice individual-bundling-vlan { + description + "VLAN based classification can be individual + or bundling."; + + case individual-vlan { + leaf vlan-value { + type etht-types:vlanid; + description + "VLAN ID value."; + } + } + + case vlan-bundling { + leaf vlan-range { + type etht-types:vid-range-type; + description + "List of VLAN ID values."; + } + } + } + } + + grouping vlan-write { + description + "A grouping which represents push/pop operations + of an 802.1Q VLAN tag."; + + leaf tag-type { + type etht-types:eth-tag-type; + description + "The VLAN tag type to push/swap."; + } + leaf vlan-value { + type etht-types:vlanid; + description + "The VLAN ID value to push/swap."; + } +/* + * To be added: this attribute is used when: + * a) the ETH service has only one CoS (as in current version) + * b) as a default when a mapping between a given CoS value + * and the PCP value is not defined (in future versions) + */ + leaf default-pcp { + type uint8 { + range "0..7"; + } + description + "The default Priority Code Point (PCP) value to push/swap"; + } + } + + grouping vlan-operations { + description + "A grouping which represents VLAN operations."; + + leaf pop-tags { + type uint8 { + range "1..2"; + } + description + "The number of VLAN tags to pop (or swap if used in + conjunction with push-tags)"; + } + container push-tags { + description + "The VLAN tags to push (or swap if used in + conjunction with pop-tags)"; + + container outer-tag { + presence + "Indicates existence of the outermost VLAN tag to + push/swap"; + + description + "The outermost VLAN tag to push/swap."; + + uses vlan-write; + } + container second-tag { + must + '../outer-tag/tag-type = "etht-types:s-vlan-tag-type" and ' + + 'tag-type = "etht-types:c-vlan-tag-type"' + { + + error-message + " + When pushing/swapping two tags, the outermost tag must + be specified and of S-VLAN type and the second + outermost tag must be of C-VLAN tag type. + "; + description + " + For IEEE 802.1Q interoperability, when pushing/swapping + two tags, it is required that the outermost tag exists + and is an S-VLAN, and the second outermost tag is a + C-VLAN. + "; + } + + presence + "Indicates existence of a second outermost VLAN tag to + push/swap"; + + description + "The second outermost VLAN tag to push/swap."; + uses vlan-write; + } + } + } + + grouping named-or-value-bandwidth-profile { + description + "A grouping to configure a bandwdith profile either by + referencing a named bandwidth profile or by + configuring the values of the bandwidth profile attributes."; + choice style { + description + "Whether the bandwidth profile is named or defined by value"; + + case named { + description + "Named bandwidth profile."; + leaf bandwidth-profile-name { + type leafref { + path "/ethtsvc:etht-svc/ethtsvc:globals/" + + "ethtsvc:named-bandwidth-profiles/" + + "ethtsvc:bandwidth-profile-name"; + } + description + "Name of the bandwidth profile."; + } + } + case value { + description + "Bandwidth profile configured by value."; + uses etht-types:etht-bandwidth-profiles; + } + } + } + + grouping bandwidth-profiles { + description + "A grouping which represent bandwidth profile configuration."; + + choice direction { + description + "Whether the bandwidth profiles are symmetrical or + asymmetrical"; + case symmetrical { + description + "The same bandwidth profile is used to describe both + the ingress and the egress bandwidth profile."; + container ingress-egress-bandwidth-profile { + description + "The bandwdith profile used in both directions."; + uses named-or-value-bandwidth-profile; + } + } + case asymmetrical { + description + "Ingress and egress bandwidth profiles can be specified."; + container ingress-bandwidth-profile { + description + "The bandwdith profile used in the ingress direction."; + uses named-or-value-bandwidth-profile; + } + container egress-bandwidth-profile { + description + "The bandwdith profile used in the egress direction."; + uses named-or-value-bandwidth-profile; + } + } + } + } + + grouping etht-svc-access-parameters { + description + "ETH services access parameters"; + + leaf access-node-id { + type te-types:te-node-id; + description + "The identifier of the access node in + the ETH TE topology."; + } + + leaf access-node-uri { + type nw:node-id; + description + "The identifier of the access node in the network."; + } + + leaf access-ltp-id { + type te-types:te-tp-id; + description + "The TE link termination point identifier, used + together with access-node-id to identify the + access LTP."; + } + + leaf access-ltp-uri { + type nt:tp-id; + description + "The link termination point identifier in network topology, + used together with access-node-uri to identify the + access LTP."; + } + + leaf access-role { + type identityref { + base etht-types:access-role; + } + description + "Indicate the role of access, e.g., working or protection. "; + } + + container pm-config { + uses pm-config-grouping; + description + "This grouping is used to set the threshold value for + performance monitoring. "; + } + + container state { + config false; + description + "The state is used to monitor the status of service. "; + leaf operational-state { + type identityref { + base te-types:tunnel-state-type; + } + description + "Indicating the operational state of client signal. "; + } + leaf provisioning-state { + type identityref { + base te-types:lsp-state-type; + } + description + "Indicating the provisional state of client signal, + especially when there is a change, i.e., revise, create. "; + } + } + + leaf performance { + type identityref { + base etht-types:performance; + } + config false; + description + "Performance Monitoring for the service. "; + } + + } + + grouping etht-svc-tunnel-parameters { + description + "ETH services tunnel parameters."; + choice technology { + description + "Service multiplexing is optional and flexible."; + + case native-ethernet { + /* + placeholder to support proprietary multiplexing + (for further discussion) + */ + list eth-tunnels { + key name; + description + "ETH Tunnel list in native Ethernet scenario."; + uses tunnels-grouping; + } + } + + case frame-base { + list otn-tunnels { + key name; + description + "OTN Tunnel list in Frame-based scenario."; + uses tunnels-grouping; + } + } + + case mpls-tp { + container pw { + description + "Pseudowire information for Ethernet over MPLS-TP."; + uses pw-segment-grouping; + } + } + } + +/* + * Open issue: can we constraints it to be used only with mp services? + */ + leaf src-split-horizon-group { + type string; + description + "Identify a split horizon group at the Tunnel source TTP"; + } + leaf dst-split-horizon-group { + type string; + description + "Identify a split horizon group at the Tunnel destination TTP"; + } + } + + grouping etht-svc-pm-threshold-config { + description + "Configuraiton parameters for Ethernet service PM thresholds."; + + leaf sending-rate-high { + type uint64; + description + "High threshold of packet sending rate in kbps."; + } + leaf sending-rate-low { + type uint64; + description + "Low threshold of packet sending rate in kbps."; + } + leaf receiving-rate-high { + type uint64; + description + "High threshold of packet receiving rate in kbps."; + } + leaf receiving-rate-low { + type uint64; + description + "Low threshold of packet receiving rate in kbps."; + } + } + + grouping etht-svc-pm-stats { + description + "Ethernet service PM statistics."; + + leaf sending-rate-too-high { + type uint32; + description + "Counter that indicates the number of times the + sending rate is above the high threshold"; + } + leaf sending-rate-too-low { + type uint32; + description + "Counter that indicates the number of times the + sending rate is below the low threshold"; + } + leaf receiving-rate-too-high { + type uint32; + description + "Counter that indicates the number of times the + receiving rate is above the high threshold"; + } + leaf receiving-rate-too-low { + type uint32; + description + "Counter that indicates the number of times the + receiving rate is below the low threshold"; + } + } + + grouping etht-svc-instance-config { + description + "Configuraiton parameters for Ethernet services."; + + leaf etht-svc-name { + type string; + description + "Name of the ETH service."; + } + + leaf etht-svc-title { + type string; + description + "The Identifier of the ETH service."; + } + + leaf user-label { + type string; + description + "Alias of the ETH service."; + } + + leaf etht-svc-descr { + type string; + description + "Description of the ETH service."; + } + + leaf etht-svc-customer { + type string; + description + "Customer of the ETH service."; + } + + leaf etht-svc-type { + type etht-types:service-type; + description + "Type of ETH service (p2p, mp2mp or rmp)."; + /* Add default as p2p */ + } + + leaf etht-svc-lifecycle { + type etht-types:lifecycle-status; + description + "Lifecycle state of ETH service."; + /* Add default as installed */ + } + uses te-types:te-topology-identifier; + + uses resilience-grouping; + list etht-svc-end-points { + key etht-svc-end-point-name; + description + "The logical end point for the ETH service. "; + uses etht-svc-end-point-grouping; + } + + + container alarm-shreshold { + description "threshold configuration for the E2E client signal"; + uses alarm-shreshold-grouping; + } + + container underlay { + description + "The unterlay tunnel information that carrying the + ETH service. "; + uses etht-svc-tunnel-parameters; + } + + leaf admin-status { + type identityref { + base te-types:tunnel-admin-state-type; + } + default te-types:tunnel-admin-state-up; + description "ETH service administrative state."; + } + } + + grouping etht-svc-instance-state { + description + "State parameters for Ethernet services."; + + leaf operational-state { + type identityref { + base te-types:tunnel-state-type; + } + default te-types:tunnel-state-up; + description "ETH service operational state."; + } + leaf provisioning-state { + type identityref { + base te-types:lsp-state-type; + } + description "ETH service provisioning state."; + } + leaf creation-time { + type yang:date-and-time; + description + "Time of ETH service creation."; + } + leaf last-updated-time { + type yang:date-and-time; + description + "Time of ETH service last update."; + } + + leaf created-by { + type string; + description + "The client signal is created by whom, + can be a system or staff ID."; + } + leaf last-updated-by { + type string; + description + "The client signal is last updated by whom, + can be a system or staff ID."; + } + leaf owned-by { + type string; + description + "The client signal is last updated by whom, + can be a system ID."; + } + container pm-state { + description + "PM data of E2E Ethernet service"; + uses pm-state-grouping; + } + container error-info { + description "error messages of configuration"; + uses error-info-grouping; + } + } + + grouping pm-state-grouping { + leaf latency { + description + "latency value of the E2E Ethernet service"; + type uint32; + units microsecond; + } + } + + grouping error-info-grouping { + leaf error-code { + description "error code"; + type uint16; + } + + leaf error-description { + description "detail message of error"; + type string; + } + + leaf error-timestamp { + description "the date and time error is happened"; + type yang:date-and-time; + } + } + + grouping alarm-shreshold-grouping { + leaf latency-threshold { + description "a threshold for the E2E client signal service's + latency. Once the latency value exceed this threshold, an alarm + should be triggered."; + type uint32; + units microsecond; + } + } + + /* + * Data nodes + */ + + container etht-svc { + description + "ETH services."; + + container globals { + description + "Globals Ethernet configuration data container"; + list named-bandwidth-profiles { + key bandwidth-profile-name; + description + "List of named bandwidth profiles used by + Ethernet services."; + + leaf bandwidth-profile-name { + type string; + description + "Name of the bandwidth profile."; + } + uses etht-types:etht-bandwidth-profiles; + } + } + + list etht-svc-instances { + key etht-svc-name; + description + "The list of p2p ETH service instances"; + + uses etht-svc-instance-config; + + container state { + config false; + description + "Ethernet Service states."; + + uses etht-svc-instance-state; + } + } + } + + grouping resilience-grouping { + description + "Grouping for resilience configuration. "; + container resilience { + description + "To configure the data plane protection parameters, + currently a placeholder only, future candidate attributes + include, Revert, WTR, Hold-off Timer, ..."; + uses te:protection-restoration-properties; + } + } + + grouping etht-svc-end-point-grouping { + description + "Grouping for the end point configuration."; + leaf etht-svc-end-point-name { + type string; + description + "The name of the logical end point of ETH service. "; + } + + leaf etht-svc-end-point-id { + type string; + description + "The identifier of the logical end point of ETH service."; + } + + leaf etht-svc-end-point-descr { + type string; + description + "The description of the logical end point of ETH service. "; + } + + leaf topology-role { + type identityref { + base etht-types:topology-role; + } + description + "Indicating the underlay topology role, + e.g., hub,spoke, any-to-any "; + } + + container resilience { + description + "Placeholder for resilience configuration, for future study. "; + } + + list etht-svc-access-points { + key access-point-id; + min-elements "1"; +/* + Open Issue: + Is it possible to limit the max-elements only for p2p services? + max-elements "2"; +*/ + description + "List of the ETH trasport services access point instances."; + + leaf access-point-id { + type string; + description + "ID of the service access point instance"; + } + uses etht-svc-access-parameters; + } + + leaf service-classification-type { + type identityref { + base etht-types:service-classification-type; + } + description + "Service classification type."; + } + + choice service-classification { + description + "Access classification can be port-based or + VLAN based."; + + case port-classification { + /* no additional information */ + } + + case vlan-classification { + container outer-tag { + presence "The outermost VLAN tag exists"; + description + "Classifies traffic using the outermost VLAN tag."; + + uses vlan-classification; + } + container second-tag { + must + '../outer-tag/tag-type = "etht-types:classify-s-vlan" and ' + + 'tag-type = "etht-types:classify-c-vlan"' + { + error-message + " + When matching two tags, the outermost tag must be + specified and of S-VLAN type and the second + outermost tag must be of C-VLAN tag type. + "; + description + " + For IEEE 802.1Q interoperability, when matching two + tags, it is required that the outermost tag exists + and is an S-VLAN, and the second outermost tag is a + C-VLAN. + "; + } + presence "The second outermost VLAN tag exists"; + + description + "Classifies traffic using the second outermost VLAN tag."; + + uses vlan-classification; + } + } + } + +/* + * Open issue: can we constraints it to be used only with mp services? + */ + leaf split-horizon-group { + type string; + description "Identify a split horizon group"; + } + + uses bandwidth-profiles; + + container vlan-operations { + description + "Configuration of VLAN operations."; + choice direction { + description + "Whether the VLAN operations are symmetrical or + asymmetrical"; + case symmetrical { + container symmetrical-operation { + uses vlan-operations; + description + "Symmetrical operations. + Expressed in the ingress direction, but + the reverse operation is applied to egress traffic"; + } + } + case asymmetrical { + container asymmetrical-operation { + description "Asymmetrical operations"; + container ingress { + uses vlan-operations; + description "Ingress operations"; + } + container egress { + uses vlan-operations; + description "Egress operations"; + } + } + } + } + } + } + + grouping pm-config-grouping { + description + "Grouping used for Performance Monitoring Configuration. "; + leaf pm-enable { + type boolean; + description + "Whether to enable the performance monitoring."; + } + + leaf sending-rate-high { + type uint64; + description + "The upperbound of sending rate."; + } + + leaf sending-rate-low { + type uint64; + description + "The lowerbound of sending rate."; + } + + leaf receiving-rate-high { + type uint64; + description + "The upperbound of receiving rate."; + } + + leaf receiving-rate-low { + type uint64; + description + "The lowerbound of receiving rate."; + } + } + + grouping pw-segment-grouping { + description + "Grouping used for PW configuration. "; + leaf pw-id { + type string; + description + "The Identifier information of pseudowire. "; + } + + leaf pw-name { + type string; + description + "The name information of pseudowire."; + } + + leaf transmit-label { + type rt-types:mpls-label; + description + "Transmit label information in PW. "; + } + + leaf receive-label { + type rt-types:mpls-label; + description + "Receive label information in PW. "; + } + + leaf encapsulation-type { + type identityref { + base etht-types:encapsulation-type; + } + description + "The encapsulation type, raw or tag. "; + } + + leaf oper-status { + type identityref { + base te-types:tunnel-state-type; + } + config false; + description + "The operational state of the PW segment. "; + } + + container ingress-bandwidth-profile { + description + "Bandwidth Profile for ingress. "; + uses pw-segment-named-or-value-bandwidth-profile; + } + + list pw-paths { + key path-id; + description + "A list of pw paths. "; + + leaf path-id { + type uint8; + description + "The identifier of pw paths. "; + + } + + list tp-tunnels { + key name; + description + "Names of TP Tunnel underlay"; + leaf name { + type string; + description + "Names of TP Tunnel underlay"; + } + } + } + + } + + grouping pw-segment-named-or-value-bandwidth-profile { + description + "A grouping to configure a bandwdith profile either by + referencing a named bandwidth profile or by + configuring the values of the bandwidth profile attributes."; + choice style { + description + "Whether the bandwidth profile is named or defined by value"; + case named { + description + "Named bandwidth profile."; + leaf bandwidth-profile-name { + type leafref { + path "/ethtsvc:etht-svc/ethtsvc:globals/" + + "ethtsvc:named-bandwidth-profiles/" + + "ethtsvc:bandwidth-profile-name"; + } + description + "Name of the bandwidth profile."; + } + } + case value { + description + "Bandwidth profile configured by value."; + uses etht-types:pw-segement-bandwidth-profile-grouping; + } + } + } + + grouping tunnels-grouping { + description + "A group of tunnels. "; + leaf name { + type leafref { + path "/te:te/te:tunnels/te:tunnel/te:name"; + require-instance false; + } + description "Dependency tunnel name"; + } + leaf encoding { + type identityref { + base te-types:lsp-encoding-types; + } + description "LSP encoding type"; + reference "RFC3945"; + } + leaf switching-type { + type identityref { + base te-types:switching-capabilities; + } + description "LSP switching type"; + reference "RFC3945"; + } + } +} diff --git a/src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-ccamp-client-signal-yang-10/ietf-eth-tran-types.yang b/src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-ccamp-client-signal-yang-10/ietf-eth-tran-types.yang new file mode 100644 index 000000000..3d152c058 --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-ccamp-client-signal-yang-10/ietf-eth-tran-types.yang @@ -0,0 +1,460 @@ +module ietf-eth-tran-types { + yang-version 1.1; + namespace "urn:ietf:params:xml:ns:yang:ietf-eth-tran-types"; + + prefix "etht-types"; + + organization + "Internet Engineering Task Force (IETF) CCAMP WG"; + contact + " + WG List: + + ID-draft editor: + Haomian Zheng (zhenghaomian@huawei.com); + Italo Busi (italo.busi@huawei.com); + Aihua Guo (aihuaguo.ietf@gmail.com); + Anton Snitser (antons@sedonasys.com); + Francesco Lazzeri (francesco.lazzeri@ericsson.com); + Yunbin Xu (xuyunbin@caict.ac.cn); + Yang Zhao (zhaoyangyjy@chinamobile.com); + Xufeng Liu (xufeng.liu.ietf@gmail.com); + Giuseppe Fioccola (giuseppe.fioccola@huawei.com); + Chaode Yu (yuchaode@huawei.com) + "; + + description + "This module defines the ETH types. + The model fully conforms to the Network Management + Datastore Architecture (NMDA). + + Copyright (c) 2019 IETF Trust and the persons + identified as authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + This version of this YANG module is part of RFC XXXX; see + the RFC itself for full legal notices."; + + revision 2023-10-23 { + description + "version -05 as a WG draft"; + reference + "draft-ietf-ccamp-client-signal-yang"; + } + + /* + * Identities + */ + + identity eth-vlan-tag-type { + description + "ETH VLAN tag type."; + } + + identity c-vlan-tag-type { + base eth-vlan-tag-type; + description + "802.1Q Customer VLAN"; + } + + identity s-vlan-tag-type { + base eth-vlan-tag-type; + description + "802.1Q Service VLAN (QinQ)"; + } + + identity service-classification-type { + description + "Service classification."; + } + + identity port-classification { + base service-classification-type; + description + "Port classification."; + } + + identity vlan-classification { + base service-classification-type; + description + "VLAN classification."; + } + + identity eth-vlan-tag-classify { + description + "VLAN tag classification."; + } + + identity classify-c-vlan { + base eth-vlan-tag-classify; + description + "Classify 802.1Q Customer VLAN tag. + Only C-tag type is accepted"; + } + + identity classify-s-vlan { + base eth-vlan-tag-classify; + description + "Classify 802.1Q Service VLAN (QinQ) tag. + Only S-tag type is accepted"; + } + + identity classify-s-or-c-vlan { + base eth-vlan-tag-classify; + description + "Classify S-VLAN or C-VLAN tag-classify. + Either tag is accepted"; + } + + identity bandwidth-profile-type { + description + "Bandwidth Profile Types"; + } + + identity mef-10-bwp { + base bandwidth-profile-type; + description + "MEF 10 Bandwidth Profile"; + } + + identity rfc-2697-bwp { + base bandwidth-profile-type; + description + "RFC 2697 Bandwidth Profile"; + } + + identity rfc-2698-bwp { + base bandwidth-profile-type; + description + "RFC 2698 Bandwidth Profile"; + } + + identity rfc-4115-bwp { + base bandwidth-profile-type; + description + "RFC 4115 Bandwidth Profile"; + } + + identity service-type { + description + "Type of Ethernet service."; + } + + identity p2p-svc { + base service-type; + description + "Ethernet point-to-point service (EPL, EVPL)."; + } + + identity rmp-svc { + base service-type; + description + "Ethernet rooted-multitpoint service (E-TREE, EP-TREE)."; + } + + identity mp2mp-svc { + base service-type; + description + "Ethernet multipoint-to-multitpoint service (E-LAN, EP-LAN)."; + } + + identity lifecycle-status { + description + "Lifecycle Status."; + } + + identity installed { + base lifecycle-status; + description + "Installed."; + } + + identity planned { + base lifecycle-status; + description + "Planned."; + } + + identity pending-removal { + base lifecycle-status; + description + "Pending Removal."; + } + + /* + * Type Definitions + */ + + typedef eth-tag-type { + type identityref { + base eth-vlan-tag-type; + } + description + "Identifies a specific ETH VLAN tag type."; + } + + typedef eth-tag-classify { + type identityref { + base eth-vlan-tag-classify; + } + description + "Identifies a specific VLAN tag classification."; + } + + typedef vlanid { + type uint16 { + range "1..4094"; + } + description + "The 12-bit VLAN-ID used in the VLAN Tag header."; + } + + typedef vid-range-type { + type string { + pattern "([1-9][0-9]{0,3}(-[1-9][0-9]{0,3})?" + + "(,[1-9][0-9]{0,3}(-[1-9][0-9]{0,3})?)*)"; + } + description + "A list of VLAN Ids, or non overlapping VLAN ranges, in + ascending order, between 1 and 4094. + This type is used to match an ordered list of VLAN Ids, or + contiguous ranges of VLAN Ids. Valid VLAN Ids must be in the + range 1 to 4094, and included in the list in non overlapping + ascending order. + + For example: 1,10-100,50,500-1000"; + } + + typedef bandwidth-profile-type { + type identityref { + base bandwidth-profile-type; + } + description + "Identifies a specific Bandwidth Profile type."; + } + + typedef service-type { + type identityref { + base service-type; + } + description + "Identifies the type of Ethernet service."; + } + + typedef lifecycle-status { + type identityref { + base lifecycle-status; + } + description + "Identifies the lLifecycle Status ."; + } + + /* + * Grouping Definitions + */ + + grouping etht-bandwidth-profiles { + description + "Bandwidth profile configuration paramters."; + + leaf bandwidth-profile-type { + type etht-types:bandwidth-profile-type; + description + "The type of bandwidth profile."; + } + leaf CIR { + type uint64; + description + "Committed Information Rate in Kbps"; + } + leaf CBS { + type uint64; + description + "Committed Burst Size in in KBytes"; + } + leaf EIR { + type uint64; + /* Need to indicate that EIR is not supported by RFC 2697 + + must + '../bw-profile-type = "mef-10-bwp" or ' + + '../bw-profile-type = "rfc-2698-bwp" or ' + + '../bw-profile-type = "rfc-4115-bwp"' + + must + '../bw-profile-type != "rfc-2697-bwp"' + */ + description + "Excess Information Rate in Kbps + In case of RFC 2698, PIR = CIR + EIR"; + } + leaf EBS { + type uint64; + description + "Excess Burst Size in KBytes. + In case of RFC 2698, PBS = CBS + EBS"; + } + leaf color-aware { + type boolean; + description + "Indicates weather the color-mode is + color-aware or color-blind."; + } + leaf coupling-flag { + type boolean; + /* Need to indicate that Coupling Flag is defined only for MEF 10 + + must + '../bw-profile-type = "mef-10-bwp"' + */ + description + "Coupling Flag."; + } + } + + identity topology-role { + description + "The role of underlay topology: e.g., hub, spoke, + any-to-any."; + } + + identity resilience { + description + "Placeholder for resilience information in data plane, + for future study. "; + } + + identity access-role { + description + "Indicating whether the access is a working or protection access."; + } + + identity root-primary { + base access-role; + description + "Designates the primary root UNI of an E-Tree service, and may also + designates the UNI access role of E-LINE and E-LAN service."; + } + + identity root-backup { + base access-role; + description + "Designates the backup root UNI of an E-Tree service."; + } + + identity leaf-access { + base access-role; + description + "Designates the leaf UNI of an E-Tree service."; + } + + identity leaf-edge { + base access-role; + description ""; + } + + identity performance { + description + "Placeholder for performance information, for future study."; + } + + identity encapsulation-type { + description + "Indicating how the service is encapsulated (to PW), e.g, raw or tag. "; + } + grouping pw-segement-bandwidth-profile-grouping { + description + "bandwidth profile grouping for PW segment. "; + leaf bandwidth-profile-type { + type etht-types:bandwidth-profile-type; + description + "The type of bandwidth profile."; + } + leaf CIR { + type uint64; + description + "Committed Information Rate in Kbps"; + } + leaf CBS { + type uint64; + description + "Committed Burst Size in in KBytes"; + } + leaf EIR { + type uint64; + /* Need to indicate that EIR is not supported by RFC 2697 + + must + '../bw-profile-type = "mef-10-bwp" or ' + + '../bw-profile-type = "rfc-2698-bwp" or ' + + '../bw-profile-type = "rfc-4115-bwp"' + + must + '../bw-profile-type != "rfc-2697-bwp"' + */ + description + "Excess Information Rate in Kbps + In case of RFC 2698, PIR = CIR + EIR"; + } + leaf EBS { + type uint64; + description + "Excess Burst Size in KBytes. + In case of RFC 2698, PBS = CBS + EBS"; + } + } + grouping eth-bandwidth { + description + "Available bandwith for ethernet."; + leaf eth-bandwidth { + type uint64{ + range "0..10000000000"; + } + units "Kbps"; + description + "Available bandwith value expressed in kilobits per second"; + } + } + + grouping eth-label-restriction { + description + "Label Restriction for ethernet."; + leaf tag-type { + type etht-types:eth-tag-type; + description "VLAN tag type."; + } + leaf priority { + type uint8; + description "priority."; + } + } + grouping eth-label { + description + "Label for ethernet."; + leaf vlanid { + type etht-types:vlanid; + description + "VLAN tag id."; + } + } + + grouping eth-label-step { + description "Label step for Ethernet VLAN"; + leaf eth-step { + type uint16 { + range "1..4095"; + } + default 1; + description + "Label step which represent possible increments for + an Ethernet VLAN tag."; + reference + "IEEE 802.1ad: Provider Bridges."; + } + } +} diff --git a/src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-ccamp-client-signal-yang-10/ietf-trans-client-service.yang b/src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-ccamp-client-signal-yang-10/ietf-trans-client-service.yang new file mode 100644 index 000000000..f84cae94c --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-ccamp-client-signal-yang-10/ietf-trans-client-service.yang @@ -0,0 +1,325 @@ +module ietf-trans-client-service { + /* TODO: FIXME */ + yang-version 1.1; + + namespace "urn:ietf:params:xml:ns:yang:ietf-trans-client-service"; + prefix "clntsvc"; + + import ietf-network { + prefix "nw"; + reference "RFC8345 - A YANG Data Model for Network Topologies"; + } + + import ietf-network-topology { + prefix "nt"; + reference "RFC8345 - A YANG Data Model for Network Topologies"; + } + + import ietf-te-types { + prefix "te-types"; + reference "RFC 8776 - Traffic Engineering Common YANG Types"; + } + + import ietf-layer1-types { + prefix "layer1-types"; + reference "RFC ZZZZ - A YANG Data Model for Layer 1 Types"; + } + + import ietf-yang-types { + prefix "yang"; + reference "RFC 6991 - Common YANG Data Types"; + } + + import ietf-trans-client-svc-types { + prefix "clntsvc-types"; + reference "RFC XXXX - A YANG Data Model for + Transport Network Client Signals"; + } + + organization + "Internet Engineering Task Force (IETF) CCAMP WG"; + contact + " + ID-draft editor: + Haomian Zheng (zhenghaomian@huawei.com); + Aihua Guo (aihuaguo.ietf@gmail.com); + Italo Busi (italo.busi@huawei.com); + Anton Snitser (antons@sedonasys.com); + Francesco Lazzeri (francesco.lazzeri@ericsson.com); + Yunbin Xu (xuyunbin@caict.ac.cn); + Yang Zhao (zhaoyangyjy@chinamobile.com); + Xufeng Liu (Xufeng_Liu@jabil.com); + Giuseppe Fioccola (giuseppe.fioccola@huawei.com); + Chaode Yu (yuchaode@huawei.com); + "; + + description + "This module defines a YANG data model for describing + transport network client services. The model fully conforms + to the Network Management Datastore Architecture (NMDA). + + Copyright (c) 2021 IETF Trust and the persons + identified as authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + This version of this YANG module is part of RFC XXXX; see + the RFC itself for full legal notices."; + revision 2023-10-23 { + description + "version -04 as a WG document"; + reference + "draft-ietf-ccamp-client-signal-yang"; + } + + /* + * Groupings + */ + grouping client-svc-access-parameters { + description + "Transport network client signals access parameters"; + + leaf access-node-id { + type te-types:te-node-id; + description + "The identifier of the access node in the TE topology."; + } + + leaf access-node-uri { + type nw:node-id; + description + "The identifier of the access node in the network."; + } + + leaf access-ltp-id { + type te-types:te-tp-id; + description + "The TE link termination point identifier in TE topology, used + together with access-node-id to identify the access LTP."; + } + + leaf access-ltp-uri { + type nt:tp-id; + description + "The link termination point identifier in network topology, + used together with access-node-uri to identify the access LTP"; + } + + leaf client-signal { + type identityref { + base layer1-types:client-signal; + } + description + "Identify the client signal type associated with this port"; + } + + } + + grouping pm-state-grouping { + leaf latency { + description "latency value of the E2E client signal service"; + type uint32; + units microsecond; + } + } + + grouping error-info-grouping { + leaf error-code { + description "error code"; + type uint16; + } + + leaf error-description { + description "detail message of error"; + type string; + } + + leaf error-timestamp { + description "the date and time error is happened"; + type yang:date-and-time; + } + } + + grouping alarm-shreshold-grouping { + leaf latency-threshold { + description "a threshold for the E2E client signal service's + latency. Once the latency value exceed this threshold, an alarm + should be triggered."; + type uint32; + units microsecond; + } + } + + grouping client-svc-tunnel-parameters { + description + "Transport network client signals tunnel parameters"; + + leaf tunnel-name { + type string; + description + "TE tunnel instance name."; + } + } + + grouping client-svc-instance-config { + description + "Configuration parameters for client services."; + leaf client-svc-name { + type string; + description + "Identifier of the p2p transport network client signals."; + } + + leaf client-svc-title { + type string; + description + "Name of the p2p transport network client signals."; + } + + leaf user-label { + type string; + description + "Alias of the p2p transport network client signals."; + } + + leaf client-svc-descr { + type string; + description + "Description of the transport network client signals."; + } + + leaf client-svc-customer { + type string; + description + "Customer of the transport network client signals."; + } + + container resilience { + description "Place holder for resilience functionalities"; + } + + uses te-types:te-topology-identifier; + + leaf admin-status { + type identityref { + base te-types:tunnel-admin-state-type; + } + default te-types:tunnel-admin-state-up; + description "Client signals administrative state."; + } + + container src-access-ports { + description + "Source access port of a client signal."; + uses client-svc-access-parameters; + } + container dst-access-ports { + description + "Destination access port of a client signal."; + uses client-svc-access-parameters; + } + + container pm-state { + config false; + description "PM data of E2E client signal"; + uses pm-state-grouping; + } + + container error-info { + config false; + description "error messages of configuration"; + uses error-info-grouping; + } + + container alarm-shreshold { + description "threshold configuration for the E2E client signal"; + uses alarm-shreshold-grouping; + } + + leaf direction { + type identityref { + base clntsvc-types:direction; + } + description "Uni-dir or Bi-dir for the client signal."; + } + + list svc-tunnels { + key tunnel-name; + description + "List of the TE Tunnels supporting the client signal."; + uses client-svc-tunnel-parameters; + } + } + + grouping client-svc-instance-state { + description + "State parameters for client services."; + leaf operational-state { + type identityref { + base te-types:tunnel-state-type; + } + config false; + description "Client signal operational state."; + } + leaf provisioning-state { + type identityref { + base te-types:lsp-state-type; + } + config false; + description "Client signal provisioning state."; + } + leaf creation-time { + type yang:date-and-time; + config false; + description "The time of the client signal be created."; + } + leaf last-updated-time { + type yang:date-and-time; + config false; + description "The time of the client signal's latest update."; + } + leaf created-by { + type string; + config false; + description + "The client signal is created by whom, + can be a system or staff ID."; + } + leaf last-updated-by { + type string; + config false; + description + "The client signal is last updated by whom, + can be a system or staff ID."; + } + leaf owned-by { + type string; + config false; + description + "The client signal is owned by whom, + can be a system ID."; + } + } + + /* + * Data nodes + */ + + container client-svc { + description + "Transport client services."; + + list client-svc-instances { + key client-svc-name; + description + "The list of p2p transport client service instances"; + + uses client-svc-instance-config; + uses client-svc-instance-state; + } + } +} diff --git a/src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-ccamp-client-signal-yang-10/ietf-trans-client-svc-types.yang b/src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-ccamp-client-signal-yang-10/ietf-trans-client-svc-types.yang new file mode 100644 index 000000000..925511735 --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-ccamp-client-signal-yang-10/ietf-trans-client-svc-types.yang @@ -0,0 +1,63 @@ +module ietf-trans-client-svc-types { + namespace "urn:ietf:params:xml:ns:yang:ietf-trans-client-svc-types"; + prefix "clntsvc-types"; + + organization + "Internet Engineering Task Force (IETF) CCAMP WG"; + contact + " + ID-draft editor: + Haomian Zheng (zhenghaomian@huawei.com); + Aihua Guo (aihuaguo.ietf@gmail.com); + Italo Busi (italo.busi@huawei.com); + Anton Snitser (antons@sedonasys.com); + Francesco Lazzeri (francesco.lazzeri@ericsson.com); + Yunbin Xu (xuyunbin@caict.ac.cn); + Yang Zhao (zhaoyangyjy@chinamobile.com); + Xufeng Liu (Xufeng_Liu@jabil.com); + Giuseppe Fioccola (giuseppe.fioccola@huawei.com); + Chaode Yu (yuchaode@huawei.com); + "; + + description + "This module defines a YANG data model for describing + transport network client types. The model fully conforms + to the Network Management Datastore Architecture (NMDA). + + Copyright (c) 2019 IETF Trust and the persons + identified as authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + This version of this YANG module is part of RFC XXXX; see + the RFC itself for full legal notices."; + + revision 2023-10-23 { + description + "version -01 as a WG document"; + reference + "draft-ietf-ccamp-client-signal-yang"; + } + + identity direction { + description + "Direction information of Client Signal."; + } + + identity bidirectional { + base direction; + description + "Client Signal is bi-directional."; + } + + identity unidirectional { + base direction; + description + "Client Signal is uni-directional."; + } + +} diff --git a/src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-ccamp-eth-client-te-topo-yang-09/ietf-eth-te-topology.yang b/src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-ccamp-eth-client-te-topo-yang-09/ietf-eth-te-topology.yang new file mode 100644 index 000000000..a04eb213d --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-ccamp-eth-client-te-topo-yang-09/ietf-eth-te-topology.yang @@ -0,0 +1,2278 @@ +module ietf-eth-te-topology { + yang-version 1.1; + namespace "urn:ietf:params:xml:ns:yang:ietf-eth-te-topology"; + prefix "etht"; + + import ietf-network { + prefix "nw"; + reference + "RFC 8345: A YANG Data Model for Network Topologies"; + } + + import ietf-network-topology { + prefix "nt"; + reference + "RFC 8345: A YANG Data Model for Network Topologies"; + } + + import ietf-te-topology { + prefix "tet"; + reference + "RFC 8795: YANG Data Model for Traffic Engineering + (TE) Topologies"; + } + + import ietf-yang-types { + prefix "yang"; + reference + "RFC 6991: Common YANG Data Types"; + } + + import ietf-eth-tran-types { + prefix "etht-types"; + reference + "RFC YYYY: A YANG Data Model for Transport Network Client + Signals"; + } + // RFC Ed.: replace YYYY with actual RFC number, update date + // information and remove this note + + organization + "IETF CCAMP Working Group"; + contact + "WG Web: + WG List: + + Editor: Haomian Zheng + + + Editor: Italo Busi + + + Editor: Aihua Guo + + + Editor: Yunbin Xu + + + Editor: Yang Zhao + + + Editor: Xufeng Liu + "; + + description + "This module defines a YANG data model for describing + layer-2 Ethernet transport topologies. The model fully + conforms to the Network Management Datastore + Architecture (NMDA). + + Copyright (c) 2023 IETF Trust and the persons identified + as authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Revised BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC XXXX; see + the RFC itself for full legal notices. + + The key words 'MUST', 'MUST NOT', 'REQUIRED', 'SHALL', 'SHALL + NOT', 'SHOULD', 'SHOULD NOT', 'RECOMMENDED', 'NOT RECOMMENDED', + 'MAY', and 'OPTIONAL' in this document are to be interpreted as + described in BCP 14 (RFC 2119) (RFC 8174) when, and only when, + they appear in all capitals, as shown here."; + + revision 2023-09-28 { + description + "Initial Revision"; + reference + "RFC XXXX: A YANG Data Model for Ethernet TE Topology"; + // RFC Ed.: replace XXXX with actual RFC number, update date + // information and remove this note + } + + /* + * Groupings + */ + + grouping label-range-info { + description + "Ethernet technology-specific label range related + information with a presence container indicating that the + label range is an Ethernet technology-specific label range. + + This grouping SHOULD be used together with the + eth-label and eth-label-step groupings to provide Ethernet + technology-specific label information to the models which + use the label-restriction-info grouping defined in the module + ietf-te-types."; + + container ethernet-label-range { + presence + "Indicates the label range is an Ethernet label range. + + This container must not be present if there are other + presence containers or attributes indicating another type + of label range."; + description + "Ethernet-specific label range related information."; + + uses etht-types:eth-label-restriction; + } + } + + grouping eth-tran-topology-type { + description + "Identifies the Ethernet Transport topology type"; + + container eth-tran-topology { + presence "indicates a topology type of + Ethernet Transport Network."; + description "Eth transport topology type"; + } + } + + grouping ltp-bandwidth-profiles { + description + "A grouping which represents the bandwidth profile(s) + for the ETH LTP."; + + choice direction { + description + "Whether the bandwidth profiles are symmetrical or + asymmetrical"; + case symmetrical { + description + "The same bandwidth profile is used to describe the ingress + and the egress bandwidth profile."; + + container ingress-egress-bandwidth-profile { + description + "The bandwith profile used in the ingress and egress + direction."; + uses etht-types:etht-bandwidth-profiles; + } + } + case asymmetrical { + description + "Different ingress and egress bandwidth profiles + can be specified."; + container ingress-bandwidth-profile { + description + "The bandwidth profile used in the ingress direction."; + uses etht-types:etht-bandwidth-profiles; + } + container egress-bandwidth-profile { + description + "The bandwidth profile used in the egress direction."; + uses etht-types:etht-bandwidth-profiles; + } + } + } + } + + grouping eth-ltp-attributes { + description + "Ethernet transport Link Termination Point (LTP) attributes"; + + leaf ltp-mac-address { + type yang:mac-address; + description + "The MAC address of the Ethernet LTP."; + } + leaf port-vlan-id { + type etht-types:vlanid; + description + "The Port VLAN ID of the Ethernet LTP."; + reference + "IEEE 802.1Q: Virtual Bridged Local Area Networks"; + } + leaf maximum-frame-size { + type uint16 { + range "64 .. 65535"; + } + description + "Maximum frame size"; + reference + "IEEE 802.1Q: Virtual Bridged Local Area Networks"; + } + uses ltp-bandwidth-profiles; + } + + grouping svc-vlan-classification { + description + "Grouping defining the capabilities for VLAN classification."; + + leaf-list supported-tag-types { + type etht-types:eth-tag-classify; + description + "List of VLAN tag types that can be used for the VLAN + classification. In case VLAN classification is not + supported, the list is empty."; + } + leaf vlan-bundling { + type boolean; + description + "In case VLAN classification is supported, indicates whether + VLAN bundling classification is also supported."; + reference + "MEF 10.3: Ethernet Services Attributes Phase 3"; + } + leaf vlan-range { + type etht-types:vid-range-type; + description + "In case VLAN classification is supported, indicates the + of available VLAN ID values."; + } + } + + grouping svc-vlan-push { + description + "Grouping defining the capabilities for VLAN push or swap + operations."; + + leaf-list supported-tag-types { + type etht-types:eth-tag-type; + description + "List of VLAN tag types that can be used to push or swap a + VLAN tag. In case VLAN push/swap is not supported, the list + is empty."; + reference + "IEEE 802.1Q: Virtual Bridged Local Area Networks"; + } + leaf vlan-range { + type etht-types:vid-range-type; + description + "In case VLAN push/swap operation is supported, the range + of available VLAN ID values."; + } + } + + grouping eth-svc-attributes { + description + "Ethernet Link Termination Point (LTP) service attributes."; + + container supported-classification { + description + "Service classification capability supported by the + Ethernet Link Termination Point (LTP)."; + + leaf port-classification { + type boolean; + description + "Indicates that the ETH LTP support port-based service + classification."; + } + container vlan-classification { + description + "Service classification capabilities based on the VLAN + tag(s) supported by the ETH LTP."; + + leaf vlan-tag-classification { + type boolean; + description + "Indicates that the ETH LTP supports VLAN service + classification."; + } + container outer-tag { + description + "Service classification capabilities based on the outer + VLAN tag, supported by the ETH LTP."; + uses svc-vlan-classification; + } + container second-tag { + description + "Service classification capabilities based on the second + VLAN tag, supported by the ETH LTP."; + leaf second-tag-classification { + type boolean; + must ". = 'false' or " + + "../../vlan-tag-classification = 'true'" { + description + "VLAN service classification based on the second + VLAN tag can be supported only when VLAN service + classification"; + } + description + "Indicates that the ETH LTP support VLAN service + classification based on the second VLAN tag."; + } + uses svc-vlan-classification; + } + } + } + + container supported-vlan-operations { + description + "Reports the VLAN operations supported by the ETH LTP."; + + leaf asymmetrical-operations { + type boolean; + description + "Indicates whether the ETH LTP supports also asymmetrical + VLAN operations.It is assumed that symmetrical VLAN + operations are alwyas supported."; + } + leaf transparent-vlan-operations { + type boolean; + description + "Indicates that the ETH LTP supports transparent + operations."; + } + container vlan-pop { + description + "Indicates VLAN pop or swap operations capabilities."; + + leaf vlan-pop-operations { + type boolean; + description + "Indicates that the ETH LTP supports VLAN pop or + swap operations."; + } + leaf max-pop-tags { + type uint8 { + range "1..2"; + } + description + "Indicates the maximum number of tags that can be + popped/swapped."; + } + } + container vlan-push { + description + "Indicates VLAN push or swap operations capabilities."; + + leaf vlan-push-operation { + type boolean; + description + "Indicates that the ETH LTP supports VLAN push or + swap operations."; + } + container outer-tag { + description + "Indicates the supported VLAN operation capabilities + on the outer VLAN tag."; + uses svc-vlan-push; + } + container second-tag { + description + "Indicates the supported VLAN operation capabilities + on the second VLAN tag."; + leaf push-second-tag { + type boolean; + description + "Indicates that the ETH LTP supports VLAN push or swap + operations for the second VLAN tag."; + } + uses svc-vlan-push; + } + } + } + } + + /* + * Data nodes + */ + + augment "/nw:networks/nw:network/nw:network-types/" + + "tet:te-topology" { + description + "Augment network types to include ETH transport newtork"; + + uses eth-tran-topology-type; + } + + augment "/nw:networks/nw:network/nw:node/tet:te" + + "/tet:te-node-attributes" { + when "../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description "Augment only for Ethernet transport network."; + } + description "Augment TE node attributes."; + container eth-node { + presence "The TE node is an Ethernet node."; + description + "Presence container used only to indicate that the TE node + is an Ethernet node."; + } + } + + augment "/nw:networks/nw:network/nt:link" { + when "../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description "Augment only for Ethernet transport network."; + } + description "Augment link configuration"; + + container eth-svc { + presence + "When present, indicates that the Link supports Ethernet + client signals."; + description + "Presence container used only to indicate that the link + supports Ethernet client signals."; + } + } + + augment "/nw:networks/nw:network/nw:node/nt:termination-point" { + when "../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description "Augment only for Ethernet transport network."; + } + description + "Augment ETH LTP attributes"; + + container eth-svc { + presence + "When present, indicates that the Link Termination Point + (LTP) supports Ethernet client signals."; + description + "ETH LTP Service attributes."; + + uses eth-svc-attributes; + } + container eth-link-tp { + description + "Attributes of the Ethernet Link Termination Point (LTP)."; + uses eth-ltp-attributes; + } + } + + /* + * Augment TE bandwidth + */ + + augment "/nw:networks/nw:network/nw:node/nt:termination-point/" + + "tet:te/" + + "tet:interface-switching-capability/tet:max-lsp-bandwidth/" + + "tet:te-bandwidth/tet:technology" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment maximum LSP TE bandwidth for the link termination + point (LTP)."; + case eth { + uses etht-types:eth-bandwidth; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:path-constraints/tet:te-bandwidth/tet:technology" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE bandwidth path constraints of the TE node + connectivity matrices."; + case eth { + uses etht-types:eth-bandwidth; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:path-constraints/tet:te-bandwidth/tet:technology" { + when "../../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE bandwidth path constraints of the + connectivity matrix entry."; + case eth { + uses etht-types:eth-bandwidth; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:path-constraints/tet:te-bandwidth/tet:technology" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE bandwidth path constraints of the TE node + connectivity matrices information source."; + case eth { + uses etht-types:eth-bandwidth; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:path-constraints/tet:te-bandwidth/tet:technology" { + when "../../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE bandwidth path constraints of the + connectivity matrix entry information source"; + case eth { + uses etht-types:eth-bandwidth; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:client-layer-adaptation/tet:switching-capability/" + + "tet:te-bandwidth/tet:technology" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment client TE bandwidth of the tunnel termination point + (TTP)"; + case eth { + uses etht-types:eth-bandwidth; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/tet:path-constraints/" + + "tet:te-bandwidth/tet:technology" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE bandwidth path constraints for the TTP + Local Link Connectivities."; + case eth { + uses etht-types:eth-bandwidth; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/tet:path-constraints/" + + "tet:te-bandwidth/tet:technology" { + when "../../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE bandwidth path constraints for the TTP + Local Link Connectivity entry."; + case eth { + uses etht-types:eth-bandwidth; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:interface-switching-capability/tet:max-lsp-bandwidth/" + + "tet:te-bandwidth/tet:technology" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment maximum LSP TE bandwidth for the TE link."; + case eth { + uses etht-types:eth-bandwidth; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:max-link-bandwidth/" + + "tet:te-bandwidth" { + when "../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment maximum TE bandwidth for the TE link"; + uses etht-types:eth-bandwidth; + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:max-resv-link-bandwidth/" + + "tet:te-bandwidth" { + when "../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment maximum reservable TE bandwidth for the TE link"; + uses etht-types:eth-bandwidth; + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:unreserved-bandwidth/" + + "tet:te-bandwidth" { + when "../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment unreserved TE bandwidth for the TE Link"; + uses etht-types:eth-bandwidth; + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:information-source-entry/" + + "tet:interface-switching-capability/" + + "tet:max-lsp-bandwidth/" + + "tet:te-bandwidth/tet:technology" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment maximum LSP TE bandwidth for the TE link + information source"; + case eth { + uses etht-types:eth-bandwidth; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:information-source-entry/" + + "tet:max-link-bandwidth/" + + "tet:te-bandwidth" { + when "../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment maximum TE bandwidth for the TE link + information source"; + uses etht-types:eth-bandwidth; + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:information-source-entry/" + + "tet:max-resv-link-bandwidth/" + + "tet:te-bandwidth" { + when "../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment maximum reservable TE bandwidth for the TE link + information-source"; + uses etht-types:eth-bandwidth; + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:information-source-entry/" + + "tet:unreserved-bandwidth/" + + "tet:te-bandwidth" { + when "../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment unreserved TE bandwidth of the TE link + information source"; + uses etht-types:eth-bandwidth; + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:interface-switching-capability/" + + "tet:max-lsp-bandwidth/" + + "tet:te-bandwidth/tet:technology" { + description + "Augment maximum LSP TE bandwidth of the TE link + template"; + case eth { + uses etht-types:eth-bandwidth; + } + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:max-link-bandwidth/" + + "tet:te-bandwidth" { + description + "Augment maximum TE bandwidth the TE link template"; + uses etht-types:eth-bandwidth; + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:max-resv-link-bandwidth/" + + "tet:te-bandwidth" { + description + "Augment maximum reservable TE bandwidth for the TE link + template."; + uses etht-types:eth-bandwidth; + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:unreserved-bandwidth/" + + "tet:te-bandwidth" { + description + "Augment unreserved TE bandwidth the TE link template"; + uses etht-types:eth-bandwidth; + } + + /* + * Augment TE label range information + */ + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:label-restrictions/tet:label-restriction" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range information for the TE node + connectivity matrices."; + uses label-range-info; + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:from/" + + "tet:label-restrictions/tet:label-restriction" { + when "../../../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range information for the source LTP + of the connectivity matrix entry."; + uses label-range-info; + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:to/" + + "tet:label-restrictions/tet:label-restriction" { + when "../../../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range information for the destination LTP + of the connectivity matrix entry."; + uses label-range-info; + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/" + + "tet:connectivity-matrices/tet:label-restrictions/" + + "tet:label-restriction" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range information for the TE node + connectivity matrices information source."; + uses label-range-info; + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:from/tet:label-restrictions/tet:label-restriction" { + when "../../../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range information for the source LTP + of the connectivity matrix entry information source."; + uses label-range-info; + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:to/tet:label-restrictions/tet:label-restriction" { + when "../../../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range information for the destination LTP + of the connectivity matrix entry information source."; + uses label-range-info; + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:label-restrictions/tet:label-restriction" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range information for the TTP + Local Link Connectivities."; + uses label-range-info; + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/" + + "tet:label-restrictions/tet:label-restriction" { + when "../../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range information for the TTP + Local Link Connectivity entry."; + uses label-range-info; + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:label-restrictions/tet:label-restriction" { + when "../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range information for the TE link."; + uses label-range-info; + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:information-source-entry/" + + "tet:label-restrictions/tet:label-restriction" { + when "../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range information for the TE link + information source."; + uses label-range-info; + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:label-restrictions/tet:label-restriction" { + description + "Augment TE label range information for the TE link template."; + uses label-range-info; + } + + /* + * Augment TE label. + */ + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-start/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range start for the TE node + connectivity matrices"; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:label-restrictions/" + + "tet:label-restriction/tet:label-end/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range end for the TE node + connectivity matrices"; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:label-restrictions/" + + "tet:label-restriction/tet:label-step/" + + "tet:technology" { + when "../../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range step for the TE node + connectivity matrices"; + case eth { + uses etht-types:eth-label-step; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:underlay/tet:primary-path/tet:path-element/" + + "tet:type/tet:label/tet:label-hop/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the underlay primary path of the + TE node connectivity matrices"; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:underlay/tet:backup-path/tet:path-element/" + + "tet:type/tet:label/tet:label-hop/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the underlay backup path of the + TE node connectivity matrices"; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-exclude-objects/" + + "tet:route-object-exclude-object/" + + "tet:type/tet:label/tet:label-hop/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the explicit route objects excluded + by the path computation of the TE node connectivity + matrices"; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-include-objects/" + + "tet:route-object-include-object/" + + "tet:type/tet:label/tet:label-hop/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the explicit route objects included + by the path computation of the TE node connectivity + matrices"; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:path-properties/tet:path-route-objects/" + + "tet:path-route-object/tet:type/tet:label/tet:label-hop/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the computed path route objects + of the TE node connectivity matrices"; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:from/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-start/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range start for the source LTP + of the connectivity matrix entry."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:from/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-end/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range end for the source LTP + of the connectivity matrix entry."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:from/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-step/" + + "tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range step for the source LTP + of the connectivity matrix entry."; + case eth { + uses etht-types:eth-label-step; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:to/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-start/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range start for the destination LTP + of the connectivity matrix entry."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:to/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-end/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range end for the destination LTP + of the connectivity matrix entry."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:to/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-step/" + + "tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range step for the destination LTP + of the connectivity matrix entry."; + case eth { + uses etht-types:eth-label-step; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:underlay/tet:primary-path/tet:path-element/" + + "tet:type/tet:label/tet:label-hop/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the underlay primary path + of the connectivity matrix entry."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:underlay/tet:backup-path/tet:path-element/" + + "tet:type/tet:label/tet:label-hop/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the underlay backup path + of the connectivity matrix entry."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:optimizations/" + + "tet:algorithm/tet:metric/tet:optimization-metric/" + + "tet:explicit-route-exclude-objects/" + + "tet:route-object-exclude-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the explicit route objects excluded + by the path computation of the connectivity matrix entry."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:optimizations/" + + "tet:algorithm/tet:metric/tet:optimization-metric/" + + "tet:explicit-route-include-objects/" + + "tet:route-object-include-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the explicit route objects included + by the path computation of the connectivity matrix entry."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:path-properties/tet:path-route-objects/" + + "tet:path-route-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the computed path route objects + of the connectivity matrix entry."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/" + + "tet:connectivity-matrices/tet:label-restrictions/" + + "tet:label-restriction/" + + "tet:label-start/tet:te-label/tet:technology" { + when "../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range start for the TE node connectivity + matrices information source."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/" + + "tet:connectivity-matrices/tet:label-restrictions/" + + "tet:label-restriction/" + + "tet:label-end/tet:te-label/tet:technology" { + when "../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range end for the TE node connectivity + matrices information source."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/" + + "tet:connectivity-matrices/tet:label-restrictions/" + + "tet:label-restriction/" + + "tet:label-step/tet:technology" { + when "../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range step for the TE node connectivity + matrices information source."; + case eth { + uses etht-types:eth-label-step; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:underlay/tet:primary-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the underlay primary path + of the TE node connectivity matrices of the information + source entry."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:underlay/tet:backup-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the underlay backup path + of the TE node connectivity matrices of the information + source entry."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-exclude-objects/" + + "tet:route-object-exclude-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the explicit route objects excluded + by the path computation of the TE node connectivity matrices + information source."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-include-objects/" + + "tet:route-object-include-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the explicit route objects included + by the path computation of the TE node connectivity matrices + information source."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:path-properties/tet:path-route-objects/" + + "tet:path-route-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the computed path route objects + of the TE node connectivity matrices information source."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:from/tet:label-restrictions/" + + "tet:label-restriction/" + + "tet:label-start/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range start for the source LTP + of the connectivity matrix entry information source."; + case eth { + uses etht-types:eth-label; + } + } + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:from/tet:label-restrictions/" + + "tet:label-restriction/" + + "tet:label-end/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range end for the source LTP + of the connectivity matrix entry information source."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:from/tet:label-restrictions/" + + "tet:label-restriction/" + + "tet:label-step/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range step for the source LTP + of the connectivity matrix entry information source."; + case eth { + uses etht-types:eth-label-step; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:to/tet:label-restrictions/tet:label-restriction/" + + "tet:label-start/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range start for the destination LTP + of the connectivity matrix entry information source."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:to/tet:label-restrictions/tet:label-restriction/" + + "tet:label-end/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range end for the destination LTP + of the connectivity matrix entry information source."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:to/tet:label-restrictions/tet:label-restriction/" + + "tet:label-step/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range step for the destination LTP + of the connectivity matrix entry information source."; + case eth { + uses etht-types:eth-label-step; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:underlay/tet:primary-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the underlay primary path + of the connectivity matrix entry information source."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:underlay/tet:backup-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the underlay backup path + of the connectivity matrix entry information source."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-exclude-objects/" + + "tet:route-object-exclude-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the explicit route objects excluded + by the path computation of the connectivity matrix entry + information source."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-include-objects/" + + "tet:route-object-include-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the explicit route objects included + by the path computation of the connectivity matrix entry + information source."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:path-properties/tet:path-route-objects/" + + "tet:path-route-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the computed path route objects + of the connectivity matrix entry information source."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-start/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range start for the TTP + Local Link Connectivities."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-end/" + + "tet:te-label/tet:technology"{ + when "../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range end for the TTP + Local Link Connectivities."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-step/" + + "tet:technology"{ + when "../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range step for the TTP + Local Link Connectivities."; + case eth { + uses etht-types:eth-label-step; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:underlay/tet:primary-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the underlay primary path + of the TTP Local Link Connectivities."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:underlay/tet:backup-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the underlay backup path + of the TTP Local Link Connectivities."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-exclude-objects/" + + "tet:route-object-exclude-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the explicit route objects excluded + by the path computation of the TTP Local Link + Connectivities."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-include-objects/" + + "tet:route-object-include-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the explicit route objects included + by the path computation of the TTP Local Link + Connectivities."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:path-properties/tet:path-route-objects/" + + "tet:path-route-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the computed path route objects + of the TTP Local Link Connectivities."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-start/tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range start for the TTP + Local Link Connectivity entry."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-end/tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range end for the TTP + Local Link Connectivity entry."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-step/tet:technology" { + when "../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range step for the TTP + Local Link Connectivity entry."; + case eth { + uses etht-types:eth-label-step; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/" + + "tet:underlay/tet:primary-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the underlay primary path + of the TTP Local Link Connectivity entry."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/" + + "tet:underlay/tet:backup-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the underlay backup path + of the TTP Local Link Connectivity entry."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-exclude-objects/" + + "tet:route-object-exclude-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the explicit route objects excluded + by the path computation of the TTP Local Link + Connectivity entry."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-include-objects/" + + "tet:route-object-include-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the explicit route objects included + by the path computation of the TTP Local Link + Connectivity entry."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/" + + "tet:path-properties/tet:path-route-objects/" + + "tet:path-route-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the computed path route objects + of the TTP Local Link Connectivity entry."; + case eth { + uses etht-types:eth-label; + } + } + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:underlay/tet:primary-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the underlay primary path + of the TE link."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:underlay/tet:backup-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label hop for the underlay backup path + of the TE link."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-start/tet:te-label/tet:technology" { + when "../../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range start for the TE link."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-end/tet:te-label/tet:technology" { + when "../../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range end for the TE link."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-step/tet:technology" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range step for the TE link."; + case eth { + uses etht-types:eth-label-step; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:information-source-entry/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-start/tet:te-label/tet:technology" { + when "../../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range start for the TE link + information source."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:information-source-entry/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-end/tet:te-label/tet:technology" { + when "../../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range end for the TE link + information source."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:information-source-entry/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-step/tet:technology" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "etht:eth-tran-topology" { + description + "Augmentation parameters apply only for networks with + Ethernet topology type."; + } + description + "Augment TE label range step for the TE link + information source."; + case eth { + uses etht-types:eth-label-step; + } + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:underlay/tet:primary-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + description + "Augment TE label hop for the underlay primary path + of the TE link template."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:underlay/tet:backup-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + description + "Augment TE label hop for the underlay backup path + of the TE link template."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-start/tet:te-label/tet:technology" { + description + "Augment TE label range start for the TE link template."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-end/tet:te-label/tet:technology" { + description + "Augment TE label range end for the TE link template."; + case eth { + uses etht-types:eth-label; + } + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-step/tet:technology" { + description + "Augment TE label range step for the TE link template."; + case eth { + uses etht-types:eth-label-step; + } + } + +} \ No newline at end of file diff --git a/src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-ccamp-otn-topo-yang-20/ietf-otn-topology.yang b/src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-ccamp-otn-topo-yang-20/ietf-otn-topology.yang new file mode 100644 index 000000000..15e7ac508 --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-ccamp-otn-topo-yang-20/ietf-otn-topology.yang @@ -0,0 +1,2230 @@ +module ietf-otn-topology { + yang-version 1.1; + namespace "urn:ietf:params:xml:ns:yang:ietf-otn-topology"; + prefix "otnt"; + + import ietf-network { + prefix "nw"; + reference + "RFC 8345: A YANG Data Model for Network Topologies"; + } + + import ietf-network-topology { + prefix "nt"; + reference + "RFC 8345: A YANG Data Model for Network Topologies"; + } + + import ietf-te-topology { + prefix "tet"; + reference + "RFC 8795: YANG Data Model for Traffic Engineering + (TE) Topologies"; + } + + import ietf-layer1-types { + prefix "l1-types"; + reference + "RFC YYYY: A YANG Data Model for Layer 1 Types"; + } + // RFC Editor: replace YYYY with actual RFC number assigned to + // [I-D.ietf-ccamp-layer1-types] and remove this note + + organization + "IETF CCAMP Working Group"; + contact + "WG Web: + WG List: + + Editor: Haomian Zheng + + + Editor: Italo Busi + + + Editor: Xufeng Liu + + + Editor: Sergio Belotti + + + Editor: Oscar Gonzalez de Dios + "; + + description + "This module defines a protocol independent Layer 1/ODU topology + data model. The model fully conforms + to the Network Management Datastore Architecture (NMDA). + + Copyright (c) 2024 IETF Trust and the persons identified + as authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Revised BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC XXXX; see + the RFC itself for full legal notices. + + The key words 'MUST', 'MUST NOT', 'REQUIRED', 'SHALL', 'SHALL + NOT', 'SHOULD', 'SHOULD NOT', 'RECOMMENDED', 'NOT RECOMMENDED', + 'MAY', and 'OPTIONAL' in this document are to be interpreted as + described in BCP 14 (RFC 2119) (RFC 8174) when, and only when, + they appear in all capitals, as shown here."; + + revision 2024-06-21 { + description + "Initial Revision"; + reference + "RFC XXXX: A YANG Data Model for Optical Transport Network + Topology"; + } + // RFC Editor: replace XXXX with actual RFC number, update date + // information and remove this note + + /* + * Groupings + */ + + grouping label-range-info { + description + "OTN technology-specific label range related information with + a presence container indicating that the label range is an + OTN technology-specific label range. + + This grouping SHOULD be used together with the + otn-label-start-end and otn-label-step groupings to provide + OTN technology-specific label information to the models which + use the label-restriction-info grouping defined in the module + ietf-te-types."; + uses l1-types:otn-label-range-info { + refine otn-label-range { + presence + "Indicates the label range is an OTN label range. + + This container MUST NOT be present if there are other + presence containers or attributes indicating another type + of label range."; + } + } + } + + /* + * Data nodes + */ + + augment "/nw:networks/nw:network/nw:network-types/" + + "tet:te-topology" { + container otn-topology { + presence "indicates a topology type of Optical Transport + Network (OTN)-electrical layer."; + description "OTN topology type"; + } + description "augment network types to include OTN."; + } + + augment "/nw:networks/nw:network/nw:node/tet:te" + + "/tet:te-node-attributes" { + when "../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description "Augment only for OTN."; + } + description "Augment TE node attributes."; + container otn-node { + presence "The TE node is an OTN node."; + description + "Introduce new TE node type for OTN node."; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes" { + when "../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description "Augment only for OTN."; + } + description "Augment link configuration"; + + container otn-link { + description + "Attributes of the OTN Link."; + leaf odtu-flex-type { + type l1-types:odtu-flex-type; + description + "The type of Optical Data Tributary Unit (ODTU) + whose nominal bitrate is used to compute the number of + Tributary Slots (TS) required by the ODUflex LSPs set up + on this OTN Link."; + } + leaf tsg { + type identityref { + base l1-types:tributary-slot-granularity; + } + description "Tributary slot granularity."; + reference + "ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN)"; + } + leaf distance { + type uint32; + description "distance in the unit of kilometers"; + } + } + container client-svc { + presence + "When present, indicates that the Link supports Constant + Bit Rate (CBR) client signals."; + description + "Attributes of the Link supporting CBR client signals."; + leaf-list supported-client-signal { + type identityref { + base l1-types:client-signal; + } + min-elements 1; + description + "List of client signal types supported by the Link."; + } + } + } + + augment "/nw:networks/nw:network/nw:node/nt:termination-point/" + + "tet:te" { + when "../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description "Augment only for OTN."; + } + description + "Augment link termination point (LTP) configuration."; + + container otn-link-tp { + description + "Attributes of the OTN Link Termination Point (LTP)."; + leaf odtu-flex-type { + type l1-types:odtu-flex-type; + description + "The type of Optical Data Tributary Unit (ODTU) + whose nominal bitrate is used to compute the number of + Tributary Slots (TS) required by the ODUflex LSPs set up + on this OTN Link Termination Point (LTP)."; + } + } + container client-svc { + presence + "When present, indicates that the Link Termination Point + (LTP) supports Constant Bit Rate (CBR) client signals."; + description + "OTN LTP Service attributes."; + leaf-list supported-client-signal { + type identityref { + base l1-types:client-signal; + } + description + "List of client signal types supported by the LTP."; + } + } + } + + /* + * Augment TE bandwidth + */ + + augment "/nw:networks/nw:network/nw:node/nt:termination-point/" + + "tet:te/" + + "tet:interface-switching-capability/tet:max-lsp-bandwidth/" + + "tet:te-bandwidth/tet:technology" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment maximum LSP TE bandwidth for the link termination + point (LTP)."; + case otn { + uses l1-types:otn-max-path-bandwidth { + description + "The odtu-flex-type attribute of the OTN Link Termination + Point (LTP) is used to compute the number of Tributary + Slots (TS) required by the ODUflex LSPs set up on this + OTN LTP."; + } + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:path-constraints/tet:te-bandwidth/tet:technology" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE bandwidth path constraints of the TE node + connectivity matrices."; + case otn { + uses l1-types:otn-link-bandwidth { + augment otn-bandwidth { + description + "Augment OTN link bandwidth information."; + leaf odtu-flex-type { + type l1-types:odtu-flex-type; + description + "The type of Optical Data Tributary Unit (ODTU) + whose nominal bitrate is used to compute the number of + Tributary Slots (TS) required by the ODUflex LSPs + set up along the underlay paths of these OTN + connectivity matrices."; + } + } + } + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:path-constraints/tet:te-bandwidth/tet:technology" { + when "../../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE bandwidth path constraints of the + connectivity matrix entry."; + case otn { + uses l1-types:otn-link-bandwidth { + augment otn-bandwidth { + description + "Augment OTN link bandwidth information."; + leaf odtu-flex-type { + type l1-types:odtu-flex-type; + description + "The type of Optical Data Tributary Unit (ODTU) + whose nominal bitrate is used to compute the number of + Tributary Slots (TS) required by the ODUflex LSPs + set up along the underlay path of this OTN + connectivity matrix entry."; + } + } + } + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:path-constraints/tet:te-bandwidth/tet:technology" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE bandwidth path constraints of the TE node + connectivity matrices information source."; + case otn { + uses l1-types:otn-link-bandwidth { + augment otn-bandwidth { + description + "Augment OTN link bandwidth information."; + leaf odtu-flex-type { + type l1-types:odtu-flex-type; + description + "The type of Optical Data Tributary Unit (ODTU) + whose nominal bitrate is used to compute the number of + Tributary Slots (TS) required by the ODUflex LSPs + set up along the underlay paths of these OTN + connectivity matrices."; + } + } + } + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:path-constraints/tet:te-bandwidth/tet:technology" { + when "../../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE bandwidth path constraints of the + connectivity matrix entry information source"; + case otn { + uses l1-types:otn-link-bandwidth { + augment otn-bandwidth { + description + "Augment OTN link bandwidth information."; + leaf odtu-flex-type { + type l1-types:odtu-flex-type; + description + "The type of Optical Data Tributary Unit (ODTU) + whose nominal bitrate is used to compute the number of + Tributary Slots (TS) required by the ODUflex LSPs + set up along the underlay path of this OTN + connectivity matrix entry."; + } + } + } + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:client-layer-adaptation/tet:switching-capability/" + + "tet:te-bandwidth/tet:technology" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment client TE bandwidth of the tunnel termination point + (TTP)"; + case otn { + uses l1-types:otn-link-bandwidth { + augment otn-bandwidth { + description + "Augment OTN link bandwidth information."; + leaf odtu-flex-type { + type l1-types:odtu-flex-type; + description + "The type of Optical Data Tributary Unit (ODTU) + whose nominal bitrate is used to compute the number of + Tributary Slots (TS) required by the ODUflex LSPs + terminated on this OTN Tunnel Termination Point + (TTP)."; + } + } + } + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/tet:path-constraints/" + + "tet:te-bandwidth/tet:technology" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE bandwidth path constraints for the TTP + Local Link Connectivities."; + case otn { + uses l1-types:otn-link-bandwidth { + augment otn-bandwidth { + description + "Augment OTN link bandwidth information."; + leaf odtu-flex-type { + type l1-types:odtu-flex-type; + description + "The type of Optical Data Tributary Unit (ODTU) + whose nominal bitrate is used to compute the number of + Tributary Slots (TS) required by the ODUflex LSPs + set up along the underlay paths of these OTN Local + Link Connectivities."; + } + } + } + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/tet:path-constraints/" + + "tet:te-bandwidth/tet:technology" { + when "../../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE bandwidth path constraints for the TTP + Local Link Connectivity entry."; + case otn { + uses l1-types:otn-link-bandwidth { + augment otn-bandwidth { + description + "Augment OTN link bandwidth information."; + leaf odtu-flex-type { + type l1-types:odtu-flex-type; + description + "The type of Optical Data Tributary Unit (ODTU) + whose nominal bitrate is used to compute the number of + Tributary Slots (TS) required by the ODUflex LSPs + set up along the underlay path of this OTN Local + Link Connectivity entry."; + } + } + } + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:interface-switching-capability/tet:max-lsp-bandwidth/" + + "tet:te-bandwidth/tet:technology" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment maximum LSP TE bandwidth for the TE link."; + case otn { + uses l1-types:otn-max-path-bandwidth { + description + "The odtu-flex-type attribute of the OTN Link is used + to compute the number of Tributary Slots (TS) required + by the ODUflex LSPs set up on this OTN Link."; + } + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:max-link-bandwidth/" + + "tet:te-bandwidth" { + when "../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment maximum TE bandwidth for the TE link"; + uses l1-types:otn-link-bandwidth { + description + "The odtu-flex-type attribute of the OTN Link is used + to compute the number of Tributary Slots (TS) required + by the ODUflex LSPs set up on this OTN Link."; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:max-resv-link-bandwidth/" + + "tet:te-bandwidth" { + when "../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment maximum reservable TE bandwidth for the TE link"; + uses l1-types:otn-link-bandwidth { + description + "The odtu-flex-type attribute of the OTN Link is used + to compute the number of Tributary Slots (TS) required + by the ODUflex LSPs set up on this OTN Link."; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:unreserved-bandwidth/" + + "tet:te-bandwidth" { + when "../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment unreserved TE bandwidth for the TE Link"; + uses l1-types:otn-link-bandwidth { + description + "The odtu-flex-type attribute of the OTN Link is used + to compute the number of Tributary Slots (TS) required + by the ODUflex LSPs set up on this OTN Link."; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:information-source-entry/" + + "tet:interface-switching-capability/" + + "tet:max-lsp-bandwidth/" + + "tet:te-bandwidth/tet:technology" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment maximum LSP TE bandwidth for the TE link + information source"; + case otn { + uses l1-types:otn-max-path-bandwidth { + description + "The odtu-flex-type attribute of the OTN Link is used + to compute the number of Tributary Slots (TS) required + by the ODUflex LSPs set up on this OTN Link."; + } + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:information-source-entry/" + + "tet:max-link-bandwidth/" + + "tet:te-bandwidth" { + when "../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment maximum TE bandwidth for the TE link + information source"; + uses l1-types:otn-link-bandwidth { + description + "The odtu-flex-type attribute of the OTN Link is used + to compute the number of Tributary Slots (TS) required + by the ODUflex LSPs set up on this OTN Link."; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:information-source-entry/" + + "tet:max-resv-link-bandwidth/" + + "tet:te-bandwidth" { + when "../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment maximum reservable TE bandwidth for the TE link + information-source"; + uses l1-types:otn-link-bandwidth { + description + "The odtu-flex-type attribute of the OTN Link is used + to compute the number of Tributary Slots (TS) required + by the ODUflex LSPs set up on this OTN Link."; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:information-source-entry/" + + "tet:unreserved-bandwidth/" + + "tet:te-bandwidth" { + when "../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment unreserved TE bandwidth of the TE link + information source"; + uses l1-types:otn-link-bandwidth { + description + "The odtu-flex-type attribute of the OTN Link is used + to compute the number of Tributary Slots (TS) required + by the ODUflex LSPs set up on this OTN Link."; + } + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:interface-switching-capability/" + + "tet:max-lsp-bandwidth/" + + "tet:te-bandwidth/tet:technology" { + description + "Augment maximum LSP TE bandwidth of the TE link + template"; + case otn { + uses l1-types:otn-max-path-bandwidth { + description + "The odtu-flex-type attribute of the OTN Link is used + to compute the number of Tributary Slots (TS) required + by the ODUflex LSPs set up on the OTN Link that uses this + Link Template."; + } + } + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:max-link-bandwidth/" + + "tet:te-bandwidth" { + description + "Augment maximum TE bandwidth the TE link template"; + uses l1-types:otn-link-bandwidth { + description + "The odtu-flex-type attribute of the OTN Link is used + to compute the number of Tributary Slots (TS) required + by the ODUflex LSPs set up on the OTN Link that uses this + Link Template."; + } + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:max-resv-link-bandwidth/" + + "tet:te-bandwidth" { + description + "Augment maximum reservable TE bandwidth for the TE link + template."; + uses l1-types:otn-link-bandwidth { + description + "The odtu-flex-type attribute of the OTN Link is used + to compute the number of Tributary Slots (TS) required + by the ODUflex LSPs set up on the OTN Link that uses this + Link Template."; + } + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:unreserved-bandwidth/" + + "tet:te-bandwidth" { + description + "Augment unreserved TE bandwidth the TE link template"; + uses l1-types:otn-link-bandwidth { + description + "The odtu-flex-type attribute of the OTN Link is used + to compute the number of Tributary Slots (TS) required + by the ODUflex LSPs set up on the OTN Link that uses this + Link Template."; + } + } + + /* + * Augment TE label range information + */ + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:label-restrictions/tet:label-restriction" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range information for the TE node + connectivity matrices."; + uses label-range-info; + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:from/" + + "tet:label-restrictions/tet:label-restriction" { + when "../../../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range information for the source LTP + of the connectivity matrix entry."; + uses label-range-info; + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:to/" + + "tet:label-restrictions/tet:label-restriction" { + when "../../../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range information for the destination LTP + of the connectivity matrix entry."; + uses label-range-info; + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/" + + "tet:connectivity-matrices/tet:label-restrictions/" + + "tet:label-restriction" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range information for the TE node + connectivity matrices information source."; + uses label-range-info; + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:from/tet:label-restrictions/tet:label-restriction" { + when "../../../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range information for the source LTP + of the connectivity matrix entry information source."; + uses label-range-info; + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:to/tet:label-restrictions/tet:label-restriction" { + when "../../../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range information for the destination LTP + of the connectivity matrix entry information source."; + uses label-range-info; + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:label-restrictions/tet:label-restriction" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range information for the TTP + Local Link Connectivities."; + uses label-range-info; + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/" + + "tet:label-restrictions/tet:label-restriction" { + when "../../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range information for the TTP + Local Link Connectivity entry."; + uses label-range-info; + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:label-restrictions/tet:label-restriction" { + when "../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range information for the TE link."; + uses label-range-info; + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:information-source-entry/" + + "tet:label-restrictions/tet:label-restriction" { + when "../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range information for the TE link + information source."; + uses label-range-info; + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:label-restrictions/tet:label-restriction" { + description + "Augment TE label range information for the TE link template."; + uses label-range-info; + } + + /* + * Augment TE label + */ + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-start/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range start for the TE node + connectivity matrices"; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:label-restrictions/" + + "tet:label-restriction/tet:label-end/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range end for the TE node + connectivity matrices"; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:label-restrictions/" + + "tet:label-restriction/tet:label-step/" + + "tet:technology" { + when "../../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range step for the TE node + connectivity matrices"; + case otn { + uses l1-types:otn-label-step; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:underlay/tet:primary-path/tet:path-element/" + + "tet:type/tet:label/tet:label-hop/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the underlay primary path of the + TE node connectivity matrices"; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:underlay/tet:backup-path/tet:path-element/" + + "tet:type/tet:label/tet:label-hop/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the underlay backup path of the + TE node connectivity matrices"; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-exclude-objects/" + + "tet:route-object-exclude-object/" + + "tet:type/tet:label/tet:label-hop/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the explicit route objects excluded + by the path computation of the TE node connectivity + matrices"; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-include-objects/" + + "tet:route-object-include-object/" + + "tet:type/tet:label/tet:label-hop/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the explicit route objects included + by the path computation of the TE node connectivity + matrices"; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:path-properties/tet:path-route-objects/" + + "tet:path-route-object/tet:type/tet:label/tet:label-hop/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the computed path route objects + of the TE node connectivity matrices"; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:from/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-start/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range start for the source LTP + of the connectivity matrix entry."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:from/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-end/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range end for the source LTP + of the connectivity matrix entry."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:from/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-step/" + + "tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range step for the source LTP + of the connectivity matrix entry."; + case otn { + uses l1-types:otn-label-step; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:to/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-start/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range start for the destination LTP + of the connectivity matrix entry."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:to/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-end/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range end for the destination LTP + of the connectivity matrix entry."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:to/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-step/" + + "tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range step for the destination LTP + of the connectivity matrix entry."; + case otn { + uses l1-types:otn-label-step; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:underlay/tet:primary-path/tet:path-element/" + + "tet:type/tet:label/tet:label-hop/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the underlay primary path + of the connectivity matrix entry."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:underlay/tet:backup-path/tet:path-element/" + + "tet:type/tet:label/tet:label-hop/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the underlay backup path + of the connectivity matrix entry."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:optimizations/" + + "tet:algorithm/tet:metric/tet:optimization-metric/" + + "tet:explicit-route-exclude-objects/" + + "tet:route-object-exclude-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the explicit route objects excluded + by the path computation of the connectivity matrix entry."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/tet:optimizations/" + + "tet:algorithm/tet:metric/tet:optimization-metric/" + + "tet:explicit-route-include-objects/" + + "tet:route-object-include-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the explicit route objects included + by the path computation of the connectivity matrix entry."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:te-node-attributes/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:path-properties/tet:path-route-objects/" + + "tet:path-route-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the computed path route objects + of the connectivity matrix entry."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/" + + "tet:connectivity-matrices/tet:label-restrictions/" + + "tet:label-restriction/" + + "tet:label-start/tet:te-label/tet:technology" { + when "../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range start for the TE node connectivity + matrices information source."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/" + + "tet:connectivity-matrices/tet:label-restrictions/" + + "tet:label-restriction/" + + "tet:label-end/tet:te-label/tet:technology" { + when "../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range end for the TE node connectivity + matrices information source."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/" + + "tet:connectivity-matrices/tet:label-restrictions/" + + "tet:label-restriction/" + + "tet:label-step/tet:technology" { + when "../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range step for the TE node connectivity + matrices information source."; + case otn { + uses l1-types:otn-label-step; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:underlay/tet:primary-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the underlay primary path + of the TE node connectivity matrices of the information + source entry."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:underlay/tet:backup-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the underlay backup path + of the TE node connectivity matrices of the information + source entry."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-exclude-objects/" + + "tet:route-object-exclude-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the explicit route objects excluded + by the path computation of the TE node connectivity matrices + information source."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-include-objects/" + + "tet:route-object-include-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the explicit route objects included + by the path computation of the TE node connectivity matrices + information source."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:path-properties/tet:path-route-objects/" + + "tet:path-route-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the computed path route objects + of the TE node connectivity matrices information source."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:from/tet:label-restrictions/" + + "tet:label-restriction/" + + "tet:label-start/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range start for the source LTP + of the connectivity matrix entry information source."; + case otn { + uses l1-types:otn-label-start-end; + } + } + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:from/tet:label-restrictions/" + + "tet:label-restriction/" + + "tet:label-end/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range end for the source LTP + of the connectivity matrix entry information source."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:from/tet:label-restrictions/" + + "tet:label-restriction/" + + "tet:label-step/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range step for the source LTP + of the connectivity matrix entry information source."; + case otn { + uses l1-types:otn-label-step; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:to/tet:label-restrictions/tet:label-restriction/" + + "tet:label-start/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range start for the destination LTP + of the connectivity matrix entry information source."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:to/tet:label-restrictions/tet:label-restriction/" + + "tet:label-end/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range end for the destination LTP + of the connectivity matrix entry information source."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:to/tet:label-restrictions/tet:label-restriction/" + + "tet:label-step/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range step for the destination LTP + of the connectivity matrix entry information source."; + case otn { + uses l1-types:otn-label-step; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:underlay/tet:primary-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the underlay primary path + of the connectivity matrix entry information source."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:underlay/tet:backup-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the underlay backup path + of the connectivity matrix entry information source."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-exclude-objects/" + + "tet:route-object-exclude-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the explicit route objects excluded + by the path computation of the connectivity matrix entry + information source."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-include-objects/" + + "tet:route-object-include-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the explicit route objects included + by the path computation of the connectivity matrix entry + information source."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:information-source-entry/tet:connectivity-matrices/" + + "tet:connectivity-matrix/" + + "tet:path-properties/tet:path-route-objects/" + + "tet:path-route-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the computed path route objects + of the connectivity matrix entry information source."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-start/" + + "tet:te-label/tet:technology" { + when "../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range start for the TTP + Local Link Connectivities."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-end/" + + "tet:te-label/tet:technology"{ + when "../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range end for the TTP + Local Link Connectivities."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-step/" + + "tet:technology"{ + when "../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range step for the TTP + Local Link Connectivities."; + case otn { + uses l1-types:otn-label-step; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:underlay/tet:primary-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the underlay primary path + of the TTP Local Link Connectivities."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:underlay/tet:backup-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the underlay backup path + of the TTP Local Link Connectivities."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-exclude-objects/" + + "tet:route-object-exclude-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the explicit route objects excluded + by the path computation of the TTP Local Link + Connectivities."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-include-objects/" + + "tet:route-object-include-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the explicit route objects included + by the path computation of the TTP Local Link + Connectivities."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:path-properties/tet:path-route-objects/" + + "tet:path-route-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the computed path route objects + of the TTP Local Link Connectivities."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-start/tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range start for the TTP + Local Link Connectivity entry."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-end/tet:te-label/tet:technology" { + when "../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range end for the TTP + Local Link Connectivity entry."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-step/tet:technology" { + when "../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range step for the TTP + Local Link Connectivity entry."; + case otn { + uses l1-types:otn-label-step; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/" + + "tet:underlay/tet:primary-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the underlay primary path + of the TTP Local Link Connectivity entry."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/" + + "tet:underlay/tet:backup-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the underlay backup path + of the TTP Local Link Connectivity entry."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-exclude-objects/" + + "tet:route-object-exclude-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the explicit route objects excluded + by the path computation of the TTP Local Link + Connectivity entry."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/" + + "tet:optimizations/tet:algorithm/tet:metric/" + + "tet:optimization-metric/" + + "tet:explicit-route-include-objects/" + + "tet:route-object-include-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the explicit route objects included + by the path computation of the TTP Local Link + Connectivity entry."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nw:node/tet:te/" + + "tet:tunnel-termination-point/" + + "tet:local-link-connectivities/" + + "tet:local-link-connectivity/" + + "tet:path-properties/tet:path-route-objects/" + + "tet:path-route-object/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the computed path route objects + of the TTP Local Link Connectivity entry."; + case otn { + uses l1-types:otn-label-hop; + } + } + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:underlay/tet:primary-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the underlay primary path + of the TE link."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:underlay/tet:backup-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + when "../../../../../../../../" + + "nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label hop for the underlay backup path + of the TE link."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-start/tet:te-label/tet:technology" { + when "../../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range start for the TE link."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-end/tet:te-label/tet:technology" { + when "../../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range end for the TE link."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:te-link-attributes/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-step/tet:technology" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range step for the TE link."; + case otn { + uses l1-types:otn-label-step; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:information-source-entry/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-start/tet:te-label/tet:technology" { + when "../../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range start for the TE link + information source."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:information-source-entry/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-end/tet:te-label/tet:technology" { + when "../../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range end for the TE link + information source."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/nw:network/nt:link/tet:te/" + + "tet:information-source-entry/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-step/tet:technology" { + when "../../../../../../nw:network-types/tet:te-topology/" + + "otnt:otn-topology" { + description + "Augmentation parameters apply only for networks with + OTN topology type."; + } + description + "Augment TE label range step for the TE link + information source."; + case otn { + uses l1-types:otn-label-step; + } + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:underlay/tet:primary-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + description + "Augment TE label hop for the underlay primary path + of the TE link template."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:underlay/tet:backup-path/tet:path-element/tet:type/" + + "tet:label/tet:label-hop/tet:te-label/tet:technology" { + description + "Augment TE label hop for the underlay backup path + of the TE link template."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-start/tet:te-label/tet:technology" { + description + "Augment TE label range start for the TE link template."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-end/tet:te-label/tet:technology" { + description + "Augment TE label range end for the TE link template."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/nw:networks/tet:te/tet:templates/" + + "tet:link-template/tet:te-link-attributes/" + + "tet:label-restrictions/tet:label-restriction/" + + "tet:label-step/tet:technology" { + description + "Augment TE label range step for the TE link template."; + case otn { + uses l1-types:otn-label-step; + } + } +} \ No newline at end of file diff --git a/src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-teas-rfc8776-update-18/ietf-te-packet-types.yang b/src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-teas-rfc8776-update-18/ietf-te-packet-types.yang new file mode 100644 index 000000000..834e78bcd --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-teas-rfc8776-update-18/ietf-te-packet-types.yang @@ -0,0 +1,835 @@ +module ietf-te-packet-types { + yang-version 1.1; + namespace "urn:ietf:params:xml:ns:yang:ietf-te-packet-types"; + prefix te-packet-types; + + import ietf-yang-types { + prefix yang; + reference + "RFC 6991: Common YANG Data Types"; + } + import ietf-te-types { + prefix te-types; + reference + "RFC XXXX: Common YANG Data Types for Traffic Engineering"; + } + + // RFC Editor: replace XXXX with actual RFC number + // and remove this note + + organization + "IETF Traffic Engineering Architecture and Signaling (TEAS) + Working Group"; + contact + "WG Web: + WG List: + + Editor: Tarek Saad + + + Editor: Rakesh Gandhi + + + Editor: Vishnu Pavan Beeram + + + Editor: Xufeng Liu + + + Editor: Igor Bryskin + "; + description + "This YANG module contains a collection of generally useful YANG + data type definitions specific to Packet Traffic Engineering + (TE). + + The model conforms to the Network Management Datastore + Architecture (NMDA). + + The key words 'MUST', 'MUST NOT', 'REQUIRED', 'SHALL', 'SHALL + NOT', 'SHOULD', 'SHOULD NOT', 'RECOMMENDED', 'NOT RECOMMENDED', + 'MAY', and 'OPTIONAL' in this document are to be interpreted as + described in BCP 14 (RFC 2119) (RFC 8174) when, and only when, + they appear in all capitals, as shown here. + + Copyright (c) 2025 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject to + the license terms contained in, the Revised BSD License set + forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC XXXX + (https://www.rfc-editor.org/info/rfcXXXX); see the RFC itself + for full legal notices."; + + revision 2025-01-24 { + description + "This revision adds the following new identities: + - bandwidth-profile-type; + - link-metric-delay-variation; + - link-metric-loss; + - path-metric-delay-variation; + - path-metric-loss. + + This revision adds the following new groupings: + - bandwidth-profile-parameters; + - te-packet-path-bandwidth; + - te-packet-link-bandwidth. + + This revision provides also few editorial changes."; + reference + "RFC XXXX: Common YANG Data Types for Traffic Engineering"; + } + + // RFC Editor: replace XXXX with actual RFC number, update date + // information and remove this note + + revision 2020-06-10 { + description + "Latest revision of TE MPLS types."; + reference + "RFC 8776: Common YANG Data Types for Traffic Engineering"; + } + + /* + * Identities + */ + + identity bandwidth-profile-type { + description + "Bandwidth Profile Types"; + } + + identity mef-10 { + base bandwidth-profile-type; + description + "MEF 10 Bandwidth Profile"; + reference + "MEF 10.3: Ethernet Services Attributes Phase 3"; + } + + identity rfc-2697 { + base bandwidth-profile-type; + description + "RFC 2697 Bandwidth Profile"; + reference + "RFC 2697: A Single Rate Three Color Marker"; + } + + identity rfc-2698 { + base bandwidth-profile-type; + description + "RFC 2698 Bandwidth Profile"; + reference + "RFC 2698: A Two Rate Three Color Marker"; + } + + // Derived identities from te-types:link-metric-type + + identity link-metric-delay-variation { + base te-types:link-metric-type; + description + "The Unidirectional Delay Variation Metric, + measured in units of microseconds."; + reference + "RFC 7471: OSPF Traffic Engineering (TE) Metric Extensions, + Section 4.3 + RFC 8570: IS-IS Traffic Engineering (TE) Metric Extensions, + Section 4.3"; + } + + identity link-metric-loss { + base te-types:link-metric-type; + description + "The Unidirectional Link Loss Metric, + measured in units of 0.000003%."; + reference + "RFC 7471: OSPF Traffic Engineering (TE) Metric Extensions, + Section 4.4 + RFC 8570: IS-IS Traffic Engineering (TE) Metric Extensions, + Section 4.4"; + } + + // Derived identities from te-types:link-metric-type + + identity path-metric-delay-variation { + base te-types:path-metric-type; + description + "The Path Delay Variation Metric, + measured in units of microseconds."; + reference + "RFC 8233: Extensions to the Path Computation Element + Communication Protocol (PCEP) to Compute + Service-Aware Label Switched Paths (LSPs), + Section 3.1.2"; + } + + identity path-metric-loss { + base te-types:path-metric-type; + description + "The Path Loss Metric, measured in units of 0.000003%."; + reference + "RFC 8233: Extensions to the Path Computation Element + Communication Protocol (PCEP) to Compute + Service-Aware Label Switched Paths (LSPs), + Section 3.1.3"; + } + + identity backup-protection-type { + description + "Base identity for the backup protection type."; + } + + identity backup-protection-link { + base backup-protection-type; + description + "Backup provides link protection only."; + } + + identity backup-protection-node-link { + base backup-protection-type; + description + "Backup offers node (preferred) or link protection."; + } + + identity bc-model-type { + description + "Base identity for the Diffserv-TE Bandwidth Constraints + Model type."; + reference + "RFC 4124: Protocol Extensions for Support of Diffserv-aware + MPLS Traffic Engineering"; + } + + identity bc-model-rdm { + base bc-model-type; + description + "Russian Dolls Bandwidth Constraints Model type."; + reference + "RFC 4127: Russian Dolls Bandwidth Constraints Model for + Diffserv-aware MPLS Traffic Engineering"; + } + + identity bc-model-mam { + base bc-model-type; + description + "Maximum Allocation Bandwidth Constraints Model type."; + reference + "RFC 4125: Maximum Allocation Bandwidth Constraints Model for + Diffserv-aware MPLS Traffic Engineering"; + } + + identity bc-model-mar { + base bc-model-type; + description + "Maximum Allocation with Reservation Bandwidth Constraints + Model type."; + reference + "RFC 4126: Max Allocation with Reservation Bandwidth + Constraints Model for Diffserv-aware MPLS Traffic + Engineering & Performance Comparisons"; + } + + /* + * Typedefs + */ + + typedef te-bandwidth-requested-type { + type enumeration { + enum specified-value { + description + "Bandwidth value is explicitly specified."; + } + enum specified-profile { + description + "Bandwidth profile is explicitly specified."; + } + enum auto { + description + "Bandwidth is automatically computed."; + } + } + description + "Enumerated type for specifying whether bandwidth is + explicitly specified or automatically computed."; + } + + typedef te-class-type { + type uint8; + description + "Diffserv-TE Class-Type. + Defines a set of Traffic Trunks crossing a link that is + governed by a specific set of bandwidth constraints. + + Class-Type is used for the purposes of link bandwidth + allocation, constraint-based routing, and admission control."; + reference + "RFC 4124: Protocol Extensions for Support of Diffserv-aware + MPLS Traffic Engineering"; + } + + typedef bc-type { + type uint8 { + range "0..7"; + } + description + "Diffserv-TE bandwidth constraints as defined in RFC 4124."; + reference + "RFC 4124: Protocol Extensions for Support of Diffserv-aware + MPLS Traffic Engineering"; + } + + typedef bandwidth-kbps { + type uint64; + units "kilobits per second"; + description + "Bandwidth values, expressed in kilobits per second."; + } + + typedef bandwidth-mbps { + type uint64; + units "megabits per second"; + description + "Bandwidth values, expressed in megabits per second."; + } + + typedef bandwidth-gbps { + type uint64; + units "gigabits per second"; + description + "Bandwidth values, expressed in gigabits per second."; + } + + /* + * Groupings + */ + + grouping performance-metrics-attributes-packet { + description + "Contains Performance Metrics (PM) information."; + uses te-types:performance-metrics-attributes { + augment "performance-metrics-one-way" { + description + "Performance Metrics (PM) one-way packet-specific + augmentation for a generic PM grouping."; + leaf one-way-min-delay { + type uint32 { + range "0..16777215"; + } + units "microseconds"; + description + "One-way minimum delay or latency."; + } + leaf one-way-min-delay-normality { + type te-types:performance-metrics-normality; + default "normal"; + description + "One-way minimum delay or latency normality."; + } + leaf one-way-max-delay { + type uint32 { + range "0..16777215"; + } + units "microseconds"; + description + "One-way maximum delay or latency."; + } + leaf one-way-max-delay-normality { + type te-types:performance-metrics-normality; + default "normal"; + description + "One-way maximum delay or latency normality."; + } + leaf one-way-delay-variation { + type uint32 { + range "0..16777215"; + } + units "microseconds"; + description + "One-way delay variation."; + reference + "RFC 5481: Packet Delay Variation Applicability + Statement, Section 4.2"; + } + leaf one-way-delay-variation-normality { + type te-types:performance-metrics-normality; + default "normal"; + description + "One-way delay variation normality."; + reference + "RFC 7471: OSPF Traffic Engineering (TE) Metric + Extensions + RFC 8570: IS-IS Traffic Engineering (TE) Metric + Extensions + RFC 7823: Performance-Based Path Selection for + Explicitly Routed Label Switched Paths (LSPs) + Using TE Metric Extensions"; + } + leaf one-way-packet-loss { + type decimal64 { + fraction-digits 6; + range "0..50.331642"; + } + units "%"; + description + "One-way packet loss as a percentage of the total traffic + sent over a configurable interval. + + The finest precision is 0.000003%."; + reference + "RFC 8570: IS-IS Traffic Engineering (TE) Metric + Extensions, Section 4.4"; + } + leaf one-way-packet-loss-normality { + type te-types:performance-metrics-normality; + default "normal"; + description + "Packet loss normality."; + reference + "RFC 7471: OSPF Traffic Engineering (TE) Metric + Extensions + RFC 8570: IS-IS Traffic Engineering (TE) Metric + Extensions + RFC 7823: Performance-Based Path Selection for + Explicitly Routed Label Switched Paths (LSPs) + Using TE Metric Extensions"; + } + } + augment "performance-metrics-two-way" { + description + "Performance Metrics (PM) two-way packet-specific + augmentation for a generic PM grouping."; + reference + "RFC 7471: OSPF Traffic Engineering (TE) Metric Extensions + RFC 8570: IS-IS Traffic Engineering (TE) Metric Extensions + RFC 7823: Performance-Based Path Selection for Explicitly + Routed Label Switched Paths (LSPs) Using TE + Metric Extensions"; + leaf two-way-min-delay { + type uint32 { + range "0..16777215"; + } + units "microseconds"; + default "0"; + description + "Two-way minimum delay or latency."; + } + leaf two-way-min-delay-normality { + type te-types:performance-metrics-normality; + default "normal"; + description + "Two-way minimum delay or latency normality."; + reference + "RFC 7471: OSPF Traffic Engineering (TE) Metric + Extensions + RFC 8570: IS-IS Traffic Engineering (TE) Metric + Extensions + RFC 7823: Performance-Based Path Selection for + Explicitly Routed Label Switched Paths (LSPs) + Using TE Metric Extensions"; + } + leaf two-way-max-delay { + type uint32 { + range "0..16777215"; + } + units "microseconds"; + default "0"; + description + "Two-way maximum delay or latency."; + } + leaf two-way-max-delay-normality { + type te-types:performance-metrics-normality; + default "normal"; + description + "Two-way maximum delay or latency normality."; + reference + "RFC 7471: OSPF Traffic Engineering (TE) Metric + Extensions + RFC 8570: IS-IS Traffic Engineering (TE) Metric + Extensions + RFC 7823: Performance-Based Path Selection for + Explicitly Routed Label Switched Paths (LSPs) + Using TE Metric Extensions"; + } + leaf two-way-delay-variation { + type uint32 { + range "0..16777215"; + } + units "microseconds"; + default "0"; + description + "Two-way delay variation."; + reference + "RFC 5481: Packet Delay Variation Applicability + Statement, Section 4.2"; + } + leaf two-way-delay-variation-normality { + type te-types:performance-metrics-normality; + default "normal"; + description + "Two-way delay variation normality."; + reference + "RFC 7471: OSPF Traffic Engineering (TE) Metric + Extensions + RFC 8570: IS-IS Traffic Engineering (TE) Metric + Extensions + RFC 7823: Performance-Based Path Selection for + Explicitly Routed Label Switched Paths (LSPs) + Using TE Metric Extensions"; + } + leaf two-way-packet-loss { + type decimal64 { + fraction-digits 6; + range "0..50.331642"; + } + units "%"; + default "0"; + description + "Two-way packet loss as a percentage of the total traffic + sent over a configurable interval. + + The finest precision is 0.000003%."; + } + leaf two-way-packet-loss-normality { + type te-types:performance-metrics-normality; + default "normal"; + description + "Two-way packet loss normality."; + } + } + } + } + + grouping one-way-performance-metrics-packet { + description + "One-way packet Performance Metrics (PM) throttle grouping."; + leaf one-way-min-delay { + type uint32 { + range "0..16777215"; + } + units "microseconds"; + default "0"; + description + "One-way minimum delay or latency."; + } + leaf one-way-max-delay { + type uint32 { + range "0..16777215"; + } + units "microseconds"; + default "0"; + description + "One-way maximum delay or latency."; + } + leaf one-way-delay-variation { + type uint32 { + range "0..16777215"; + } + units "microseconds"; + default "0"; + description + "One-way delay variation."; + } + leaf one-way-packet-loss { + type decimal64 { + fraction-digits 6; + range "0..50.331642"; + } + units "%"; + default "0"; + description + "One-way packet loss as a percentage of the total traffic + sent over a configurable interval. + + The finest precision is 0.000003%."; + } + } + + grouping one-way-performance-metrics-gauge-packet { + description + "One-way packet Performance Metrics (PM) throttle grouping. + + This grouping is used to report the same metrics defined in + the one-way-performance-metrics-packet grouping, using gauges + instead of uint32 data types and referencing IPPM RFCs + instead of IGP-TE RFCs."; + leaf one-way-min-delay { + type yang:gauge64; + units "microseconds"; + description + "One-way minimum delay or latency."; + } + leaf one-way-max-delay { + type yang:gauge64; + units "microseconds"; + description + "One-way maximum delay or latency."; + reference + "RFC 7679: A One-Way Delay Metric for IP Performance + Metrics (IPPM)"; + } + leaf one-way-delay-variation { + type yang:gauge64; + units "microseconds"; + description + "One-way delay variation."; + reference + "RFC 3393: IP Packet Delay Variation Metric for IP + Performance Metrics (IPPM)"; + } + leaf one-way-packet-loss { + type decimal64 { + fraction-digits 5; + range "0..100"; + } + description + "The ratio of packets dropped to packets transmitted between + two endpoints."; + reference + "RFC 7680: A One-Way Loss Metric for IP Performance + Metrics (IPPM)"; + } + } + + grouping two-way-performance-metrics-packet { + description + "Two-way packet Performance Metrics (PM) throttle grouping."; + leaf two-way-min-delay { + type uint32 { + range "0..16777215"; + } + units "microseconds"; + default "0"; + description + "Two-way minimum delay or latency."; + } + leaf two-way-max-delay { + type uint32 { + range "0..16777215"; + } + units "microseconds"; + default "0"; + description + "Two-way maximum delay or latency."; + } + leaf two-way-delay-variation { + type uint32 { + range "0..16777215"; + } + units "microseconds"; + default "0"; + description + "Two-way delay variation."; + } + leaf two-way-packet-loss { + type decimal64 { + fraction-digits 6; + range "0..50.331642"; + } + units "%"; + default "0"; + description + "Two-way packet loss as a percentage of the total traffic + sent over a configurable interval. + + The finest precision is 0.000003%."; + } + } + + grouping two-way-performance-metrics-gauge-packet { + description + "Two-way packet Performance Metrics (PM) throttle grouping. + + This grouping is used to report the same metrics defined in + the two-way-performance-metrics-packet grouping, using gauges + instead of uint32 data types and referencing IPPM RFCs + instead of IGP-TE RFCs."; + leaf two-way-min-delay { + type yang:gauge64; + units "microseconds"; + description + "Two-way minimum delay or latency."; + reference + "RFC 2681: A Round-trip Delay Metric for IPPM"; + } + leaf two-way-max-delay { + type yang:gauge64; + units "microseconds"; + description + "Two-way maximum delay or latency."; + reference + "RFC 2681: A Round-trip Delay Metric for IPPM"; + } + leaf two-way-delay-variation { + type yang:gauge64; + units "microseconds"; + description + "Two-way delay variation."; + reference + "RFC 5481: Packet Delay Variation Applicability Statement"; + } + leaf two-way-packet-loss { + type decimal64 { + fraction-digits 5; + range "0..100"; + } + description + "The ratio of packets dropped to packets transmitted between + two endpoints."; + } + } + + grouping performance-metrics-throttle-container-packet { + description + "Packet Performance Metrics (PM) threshold grouping."; + uses te-types:performance-metrics-throttle-container { + augment "throttle/threshold-out" { + description + "Performance Metrics (PM) threshold-out packet + augmentation for a generic grouping."; + uses one-way-performance-metrics-packet; + uses two-way-performance-metrics-packet; + } + augment "throttle/threshold-in" { + description + "Performance Metrics (PM) threshold-in packet augmentation + for a generic grouping."; + uses one-way-performance-metrics-packet; + uses two-way-performance-metrics-packet; + } + augment "throttle/threshold-accelerated-advertisement" { + description + "Performance Metrics (PM) accelerated advertisement packet + augmentation for a generic grouping."; + uses one-way-performance-metrics-packet; + uses two-way-performance-metrics-packet; + } + } + } + + grouping bandwidth-profile-parameters { + description + "Common parameters to define bandwidth profiles in packet + networks."; + leaf cir { + type uint64; + units "bits per second"; + description + "Committed Information Rate (CIR)."; + } + leaf cbs { + type uint64; + units "bytes"; + description + "Committed Burst Size (CBS)."; + } + leaf eir { + type uint64; + units "bits per second"; + description + "Excess Information Rate (EIR)."; + } + leaf ebs { + type uint64; + units "bytes"; + description + "Excess Burst Size (EBS)."; + } + leaf pir { + type uint64; + units "bits per second"; + description + "Peak Information Rate (PIR)."; + } + leaf pbs { + type uint64; + units "bytes"; + description + "Peak Burst Size (PBS)."; + } + } + + grouping te-packet-path-bandwidth { + description + "Bandwidth attributes for TE Packet paths."; + container packet-bandwidth { + description + "Bandwidth attributes for TE Packet paths."; + leaf specification-type { + type te-bandwidth-requested-type; + description + "The bandwidth specification type, either explicitly + specified or automatically computed."; + } + leaf set-bandwidth { + when "../specification-type = 'specified-value'" { + description + "When the bandwidth value is explicitly specified."; + } + type bandwidth-kbps; + description + "Set the bandwidth value explicitly, e.g., using offline + calculation."; + } + container bandwidth-profile { + when "../specification-type = 'specified-profile'" { + description + "When the bandwidth profile is explicitly specified."; + } + description + "Set the bandwidth profile attributes explicitly."; + leaf bandwidth-profile-name { + type string; + description + "Name of Bandwidth Profile."; + } + leaf bandwidth-profile-type { + type identityref { + base bandwidth-profile-type; + } + description + "Type of Bandwidth Profile."; + } + uses bandwidth-profile-parameters; + } + leaf class-type { + type te-types:te-ds-class; + description + "The Class-Type of traffic transported by the LSP."; + reference + "RFC 4124: Protocol Extensions for Support of + Diffserv-aware MPLS Traffic Engineering, + Section 4.3.1"; + } + leaf signaled-bandwidth { + type te-packet-types:bandwidth-kbps; + config false; + description + "The currently signaled bandwidth of the LSP. + + In the case where the bandwidth is specified + explicitly, then this will match the value of the + set-bandwidth leaf. + + In the cases where the bandwidth is dynamically + computed by the system, the current value of the + bandwidth should be reflected."; + } + } + } + + grouping te-packet-link-bandwidth { + description + "Bandwidth attributes for Packet TE links."; + leaf packet-bandwidth { + type uint64; + units "bits per second"; + description + "Bandwidth value for Packet TE links."; + } + } +} \ No newline at end of file diff --git a/src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-teas-rfc8776-update-18/ietf-te-types.yang b/src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-teas-rfc8776-update-18/ietf-te-types.yang new file mode 100644 index 000000000..aef9434ed --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-teas-rfc8776-update-18/ietf-te-types.yang @@ -0,0 +1,4473 @@ +module ietf-te-types { + yang-version 1.1; + namespace "urn:ietf:params:xml:ns:yang:ietf-te-types"; + prefix te-types; + + import ietf-inet-types { + prefix inet; + reference + "RFC 6991: Common YANG Data Types"; + } + import ietf-yang-types { + prefix yang; + reference + "RFC 6991: Common YANG Data Types"; + } + import ietf-routing-types { + prefix rt-types; + reference + "RFC 8294: Common YANG Data Types for the Routing Area"; + } + import ietf-network { + prefix nw; + reference + "RFC 8345: A YANG Data Model for Network Topologies"; + } + import ietf-network-topology { + prefix nt; + reference + "RFC 8345: A YANG Data Model for Network Topologies"; + } + + organization + "IETF Traffic Engineering Architecture and Signaling (TEAS) + Working Group"; + contact + "WG Web: + WG List: + + Editor: Tarek Saad + + + Editor: Rakesh Gandhi + + + Editor: Vishnu Pavan Beeram + + + Editor: Xufeng Liu + + + Editor: Igor Bryskin + "; + description + "This YANG module contains a collection of generally useful + YANG data type definitions specific to TE. + + The model conforms to the Network Management Datastore + Architecture (NMDA). + + The key words 'MUST', 'MUST NOT', 'REQUIRED', 'SHALL', 'SHALL + NOT', 'SHOULD', 'SHOULD NOT', 'RECOMMENDED', 'NOT RECOMMENDED', + 'MAY', and 'OPTIONAL' in this document are to be interpreted as + described in BCP 14 (RFC 2119) (RFC 8174) when, and only when, + they appear in all capitals, as shown here. + + Copyright (c) 2025 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject to + the license terms contained in, the Revised BSD License set + forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC XXXX + (https://www.rfc-editor.org/info/rfcXXXX); see the RFC itself + for full legal notices."; + + revision 2025-01-24 { + description + "This revision adds the following new identities: + - lsp-provisioning-error-reason; + - association-type-diversity; + - tunnel-admin-state-auto; + - lsp-restoration-restore-none; + - restoration-scheme-rerouting; + - path-metric-optimization-type; + - link-path-metric-type; + - link-metric-type and its derived identities; + - path-computation-error-reason and its derived identities; + - protocol-origin-type and its derived identities; + - svec-objective-function-type and its derived identities; + - svec-metric-type and its derived identities. + + This revision adds the following new data types: + - path-type. + + This revision adds the following new groupings: + - explicit-route-hop-with-srlg; + - encoding-and-switching-type; + - te-generic-node-id. + + This revision updates the following identities: + - objective-function-type; + - action-exercise; + - path-metric-type; + - path-metric-te; + - path-metric-igp; + - path-metric-hop; + - path-metric-delay-average; + - path-metric-delay-minimum; + - path-metric-residual-bandwidth; + - path-metric-optimize-includes; + - path-metric-optimize-excludes; + - te-optimization-criterion. + + This revision updates the following data types: + - te-node-id. + + This revision updates the following groupings: + - explicit-route-hop: + - adds the following leaves: + - node-id-uri; + - link-tp-id-uri; + - updates the following leaves: + - node-id; + - link-tp-id; + - record-route-state: + - adds the following leaves: + - node-id-uri; + - link-tp-id-uri; + - updates the following leaves: + - node-id; + - link-tp-id; + - optimization-metric-entry: + - updates the following leaves: + - metric-type; + - tunnel-constraints; + - adds the following leaves: + - network-id; + - path-constraints-route-objects: + - updates the following containers: + - explicit-route-objects-always; + - generic-path-metric-bounds: + - updates the following leaves: + - metric-type; + - generic-path-optimization + - adds the following leaves: + - tiebreaker; + - deprecate the following containers: + - tiebreakers. + + This revision obsoletes the following identities: + - of-minimize-agg-bandwidth-consumption; + - of-minimize-load-most-loaded-link; + - of-minimize-cost-path-set; + - lsp-protection-reroute-extra; + - lsp-protection-reroute. + + This revision provides also few editorial changes."; + reference + "RFC XXXX: Common YANG Data Types for Traffic Engineering"; + } + + // RFC Editor: replace XXXX with actual RFC number, update date + // information and remove this note + + revision 2020-06-10 { + description + "Initial Version of TE types."; + reference + "RFC 8776: Common YANG Data Types for Traffic Engineering"; + } + + /* + * Features + */ + + feature p2mp-te { + description + "Indicates support for Point-to-Multipoint TE (P2MP-TE)."; + reference + "RFC 4875: Extensions to Resource Reservation Protocol - + Traffic Engineering (RSVP-TE) for + Point-to-Multipoint TE Label Switched Paths (LSPs)"; + } + + feature frr-te { + description + "Indicates support for TE Fast Reroute (FRR)."; + reference + "RFC 4090: Fast Reroute Extensions to RSVP-TE for LSP Tunnels"; + } + + feature extended-admin-groups { + description + "Indicates support for TE link extended administrative + groups."; + reference + "RFC 7308: Extended Administrative Groups in MPLS Traffic + Engineering (MPLS-TE)"; + } + + feature named-path-affinities { + description + "Indicates support for named path affinities."; + } + + feature named-extended-admin-groups { + description + "Indicates support for named extended administrative groups."; + } + + feature named-srlg-groups { + description + "Indicates support for named Shared Risk Link Group (SRLG)."; + } + + feature named-path-constraints { + description + "Indicates support for named path constraints."; + } + + feature path-optimization-metric { + description + "Indicates support for path optimization metrics."; + } + + feature path-optimization-objective-function { + description + "Indicates support for path optimization objective functions."; + } + + /* + * Identities + */ + + identity lsp-provisioning-error-reason { + description + "Base identity for LSP provisioning errors."; + } + + identity session-attributes-flags { + description + "Base identity for the RSVP-TE session attributes flags."; + } + + identity local-protection-desired { + base session-attributes-flags; + description + "Local protection is desired."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels, + Section 4.7.1"; + } + + identity se-style-desired { + base session-attributes-flags; + description + "Shared explicit style, to allow the LSP to be established + and share resources with the old LSP."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels"; + } + + identity local-recording-desired { + base session-attributes-flags; + description + "Label recording is desired."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels, + Section 4.7.1"; + } + + identity bandwidth-protection-desired { + base session-attributes-flags; + description + "Requests FRR bandwidth protection on LSRs, if present."; + reference + "RFC 4090: Fast Reroute Extensions to RSVP-TE for LSP + Tunnels"; + } + + identity node-protection-desired { + base session-attributes-flags; + description + "Requests FRR node protection on LSRs, if present."; + reference + "RFC 4090: Fast Reroute Extensions to RSVP-TE for LSP + Tunnels"; + } + + identity path-reevaluation-request { + base session-attributes-flags; + description + "This flag indicates that a path re-evaluation (of the + current path in use) is requested. + + Note that this does not trigger any LSP reroutes but + instead just signals a request to evaluate whether a + preferable path exists."; + reference + "RFC 4736: Reoptimization of Multiprotocol Label Switching + (MPLS) Traffic Engineering (TE) Loosely Routed + Label Switched Path (LSP)"; + } + + identity soft-preemption-desired { + base session-attributes-flags; + description + "Soft preemption of LSP resources is desired."; + reference + "RFC 5712: MPLS Traffic Engineering Soft Preemption"; + } + + identity lsp-attributes-flags { + description + "Base identity for LSP attributes flags."; + } + + identity end-to-end-rerouting-desired { + base lsp-attributes-flags; + description + "Indicates end-to-end rerouting behavior for an LSP + undergoing establishment. + + This MAY also be used to specify the behavior of end-to-end + LSP recovery for established LSPs."; + reference + "RFC 4920: Crankback Signaling Extensions for MPLS and GMPLS + RSVP-TE + RFC 5420: Encoding of Attributes for MPLS LSP Establishment + Using Resource Reservation Protocol Traffic + Engineering (RSVP-TE) + RFC 7570: Label Switched Path (LSP) Attribute in the + Explicit Route Object (ERO)"; + } + + identity boundary-rerouting-desired { + base lsp-attributes-flags; + description + "Indicates boundary rerouting behavior for an LSP undergoing + establishment. + + This MAY also be used to specify segment-based LSP recovery + through nested crankback for established LSPs. + + The boundary Area Border Router (ABR) / Autonomous System + Border Router (ASBR) can decide to forward the PathErr + message upstream to either an upstream boundary ABR/ASBR or + the ingress LSR. + + Alternatively, it can try to select another egress boundary + LSR."; + reference + "RFC 4920: Crankback Signaling Extensions for MPLS and GMPLS + RSVP-TE + RFC 5420: Encoding of Attributes for MPLS LSP Establishment + Using Resource Reservation Protocol Traffic + Engineering (RSVP-TE) + RFC 7570: Label Switched Path (LSP) Attribute in the + Explicit Route Object (ERO)"; + } + + identity segment-based-rerouting-desired { + base lsp-attributes-flags; + description + "Indicates segment-based rerouting behavior for an LSP + undergoing establishment. + + This MAY also be used to specify segment-based LSP recovery + for established LSPs."; + reference + "RFC 4920: Crankback Signaling Extensions for MPLS and GMPLS + RSVP-TE + RFC 5420: Encoding of Attributes for MPLS LSP Establishment + Using Resource Reservation Protocol + Traffic Engineering (RSVP-TE) + RFC 7570: Label Switched Path (LSP) Attribute in the + Explicit Route Object (ERO)"; + } + + identity lsp-integrity-required { + base lsp-attributes-flags; + description + "Indicates that LSP integrity is required."; + reference + "RFC 4875: Extensions to Resource Reservation Protocol - + Traffic Engineering (RSVP-TE) for + Point-to-Multipoint TE Label Switched Paths (LSPs) + RFC 7570: Label Switched Path (LSP) Attribute in the + Explicit Route Object (ERO)"; + } + + identity contiguous-lsp-desired { + base lsp-attributes-flags; + description + "Indicates that a contiguous LSP is desired."; + reference + "RFC 5151: Inter-Domain MPLS and GMPLS Traffic Engineering -- + Resource Reservation Protocol-Traffic Engineering + (RSVP-TE) Extensions + RFC 7570: Label Switched Path (LSP) Attribute in the + Explicit Route Object (ERO)"; + } + + identity lsp-stitching-desired { + base lsp-attributes-flags; + description + "Indicates that LSP stitching is desired."; + reference + "RFC 5150: Label Switched Path Stitching with Generalized + Multiprotocol Label Switching Traffic Engineering + (GMPLS TE) + RFC 7570: Label Switched Path (LSP) Attribute in the + Explicit Route Object (ERO)"; + } + + identity pre-planned-lsp-flag { + base lsp-attributes-flags; + description + "Indicates that the LSP MUST be provisioned in the + control plane only."; + reference + "RFC 6001: Generalized MPLS (GMPLS) Protocol Extensions for + Multi-Layer and Multi-Region Networks (MLN/MRN) + RFC 7570: Label Switched Path (LSP) Attribute in the + Explicit Route Object (ERO)"; + } + + identity non-php-behavior-flag { + base lsp-attributes-flags; + description + "Indicates that non-PHP (non-Penultimate Hop Popping) + behavior for the LSP is desired."; + reference + "RFC 6511: Non-Penultimate Hop Popping Behavior and + Out-of-Band Mapping for RSVP-TE Label Switched + Paths + RFC 7570: Label Switched Path (LSP) Attribute in the + Explicit Route Object (ERO)"; + } + + identity oob-mapping-flag { + base lsp-attributes-flags; + description + "Indicates that signaling of the egress binding information + is out of band (e.g., via the Border Gateway Protocol + (BGP))."; + reference + "RFC 6511: Non-Penultimate Hop Popping Behavior and + Out-of-Band Mapping for RSVP-TE Label Switched + Paths + RFC 7570: Label Switched Path (LSP) Attribute in the + Explicit Route Object (ERO)"; + } + + identity entropy-label-capability { + base lsp-attributes-flags; + description + "Indicates entropy label capability."; + reference + "RFC 6790: The Use of Entropy Labels in MPLS Forwarding + RFC 7570: Label Switched Path (LSP) Attribute in the + Explicit Route Object (ERO)"; + } + + identity oam-mep-entity-desired { + base lsp-attributes-flags; + description + "OAM Maintenance Entity Group End Point (MEP) entities + desired."; + reference + "RFC 7260: GMPLS RSVP-TE Extensions for Operations, + Administration, and Maintenance (OAM) + Configuration"; + } + + identity oam-mip-entity-desired { + base lsp-attributes-flags; + description + "OAM Maintenance Entity Group Intermediate Points (MIP) + entities desired."; + reference + "RFC 7260: GMPLS RSVP-TE Extensions for Operations, + Administration, and Maintenance (OAM) + Configuration"; + } + + identity srlg-collection-desired { + base lsp-attributes-flags; + description + "Shared Risk Link Group (SRLG) collection desired."; + reference + "RFC 7570: Label Switched Path (LSP) Attribute in the + Explicit Route Object (ERO) + RFC 8001: RSVP-TE Extensions for Collecting Shared Risk + Link Group (SRLG) Information"; + } + + identity loopback-desired { + base lsp-attributes-flags; + description + "This flag indicates that a particular node on the LSP is + required to enter loopback mode. + + This can also be used to specify the loopback state of the + node."; + reference + "RFC 7571: GMPLS RSVP-TE Extensions for Lock Instruct and + Loopback"; + } + + identity p2mp-te-tree-eval-request { + base lsp-attributes-flags; + description + "P2MP-TE tree re-evaluation request."; + reference + "RFC 8149: RSVP Extensions for Reoptimization of Loosely + Routed Point-to-Multipoint Traffic Engineering + Label Switched Paths (LSPs)"; + } + + identity rtm-set-desired { + base lsp-attributes-flags; + description + "Residence Time Measurement (RTM) attribute flag requested."; + reference + "RFC 8169: Residence Time Measurement in MPLS Networks"; + } + + identity link-protection-type { + description + "Base identity for the link protection type."; + } + + identity link-protection-unprotected { + base link-protection-type; + description + "Unprotected link type."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + } + + identity link-protection-extra-traffic { + base link-protection-type; + description + "Extra-Traffic protected link type."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + } + + identity link-protection-shared { + base link-protection-type; + description + "Shared protected link type."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + } + + identity link-protection-1-for-1 { + base link-protection-type; + description + "One-for-one (1:1) protected link type."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + } + + identity link-protection-1-plus-1 { + base link-protection-type; + description + "One-plus-one (1+1) protected link type."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + } + + identity link-protection-enhanced { + base link-protection-type; + description + "A compound link protection type derived from the underlay + TE tunnel protection configuration supporting the TE link."; + } + + identity association-type { + description + "Base identity for the tunnel association."; + } + + identity association-type-recovery { + base association-type; + description + "Association type for recovery, used to associate LSPs of the + same tunnel for recovery."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery + RFC 6780: RSVP ASSOCIATION Object Extensions"; + } + + identity association-type-resource-sharing { + base association-type; + description + "Association type for resource sharing, used to enable + resource sharing during make-before-break."; + reference + "RFC 4873: GMPLS Segment Recovery + RFC 6780: RSVP ASSOCIATION Object Extensions"; + } + + identity association-type-double-sided-bidir { + base association-type; + description + "Association type for double-sided bidirectional LSPs, + used to associate two LSPs of two tunnels that are + independently configured on either endpoint."; + reference + "RFC 7551: RSVP-TE Extensions for Associated Bidirectional + Label Switched Paths (LSPs)"; + } + + identity association-type-single-sided-bidir { + base association-type; + description + "Association type for single-sided bidirectional LSPs, + used to associate two LSPs of two tunnels, where one + tunnel is configured on one side/endpoint and the other + tunnel is dynamically created on the other endpoint."; + reference + "RFC 6780: RSVP ASSOCIATION Object Extensions + RFC 7551: RSVP-TE Extensions for Associated Bidirectional + Label Switched Paths (LSPs)"; + } + + identity association-type-diversity { + base association-type; + description + "Association Type diversity used to associate LSPs whose + paths are to be diverse from each other."; + reference + "RFC 8800: Path Computation Element Communication Protocol + (PCEP) Extension for Label Switched Path (LSP) + Diversity Constraint Signaling"; + } + + identity objective-function-type { + description + "Base identity for path objective function types."; + } + + identity of-minimize-cost-path { + base objective-function-type; + description + "Objective function for minimizing path cost."; + reference + "RFC 5541: Encoding of Objective Functions in the Path + Computation Element Communication Protocol + (PCEP)"; + } + + identity of-minimize-load-path { + base objective-function-type; + description + "Objective function for minimizing the load on one or more + paths."; + reference + "RFC 5541: Encoding of Objective Functions in the Path + Computation Element Communication Protocol + (PCEP)"; + } + + identity of-maximize-residual-bandwidth { + base objective-function-type; + description + "Objective function for maximizing residual bandwidth."; + reference + "RFC 5541: Encoding of Objective Functions in the Path + Computation Element Communication Protocol + (PCEP)"; + } + + identity of-minimize-agg-bandwidth-consumption { + base objective-function-type; + status obsolete; + description + "Objective function for minimizing aggregate bandwidth + consumption. + + This identity has been obsoleted: the + 'svec-of-minimize-agg-bandwidth-consumption' identity SHOULD + be used instead."; + reference + "RFC 5541: Encoding of Objective Functions in the Path + Computation Element Communication Protocol + (PCEP)"; + } + + identity of-minimize-load-most-loaded-link { + base objective-function-type; + status obsolete; + description + "Objective function for minimizing the load on the link that + is carrying the highest load. + + This identity has been obsoleted: the + 'svec-of-minimize-load-most-loaded-link' identity SHOULD + be used instead."; + reference + "RFC 5541: Encoding of Objective Functions in the Path + Computation Element Communication Protocol + (PCEP)"; + } + + identity of-minimize-cost-path-set { + base objective-function-type; + status obsolete; + description + "Objective function for minimizing the cost on a path set. + + This identity has been obsoleted: the + 'svec-of-minimize-cost-path-set' identity SHOULD + be used instead."; + reference + "RFC 5541: Encoding of Objective Functions in the Path + Computation Element Communication Protocol + (PCEP)"; + } + + identity path-computation-method { + description + "Base identity for supported path computation mechanisms."; + } + + identity path-locally-computed { + base path-computation-method; + description + "Indicates a constrained-path LSP in which the + path is computed by the local LER."; + reference + "RFC 9522: Overview and Principles of Internet Traffic + Engineering, Section 4.4"; + } + + identity path-externally-queried { + base path-computation-method; + description + "Constrained-path LSP in which the path is obtained by + querying an external source, such as a PCE server. + In the case that an LSP is defined to be externally queried, + it may also have associated explicit definitions (provided + to the external source to aid computation). + + The path that is returned by the external source may + require further local computation on the device."; + reference + "RFC 9522: Overview and Principles of Internet Traffic + Engineering + RFC 4657: Path Computation Element (PCE) Communication + Protocol Generic Requirements"; + } + + identity path-explicitly-defined { + base path-computation-method; + description + "Constrained-path LSP in which the path is + explicitly specified as a collection of strict and/or loose + hops."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels + RFC 9522: Overview and Principles of Internet Traffic + Engineering"; + } + + identity lsp-metric-type { + description + "Base identity for the LSP metric specification types."; + } + + identity lsp-metric-relative { + base lsp-metric-type; + description + "The metric specified for the LSPs to which this identity + refers is specified as a value relative to the IGP metric + cost to the LSP's tail end."; + reference + "RFC 4657: Path Computation Element (PCE) Communication + Protocol Generic Requirements"; + } + + identity lsp-metric-absolute { + base lsp-metric-type; + description + "The metric specified for the LSPs to which this identity + refers is specified as an absolute value."; + reference + "RFC 4657: Path Computation Element (PCE) Communication + Protocol Generic Requirements"; + } + + identity lsp-metric-inherited { + base lsp-metric-type; + description + "The metric for the LSPs to which this identity refers is + not specified explicitly; rather, it is directly inherited + from the IGP cost."; + reference + "RFC 4657: Path Computation Element (PCE) Communication + Protocol Generic Requirements"; + } + + identity te-tunnel-type { + description + "Base identity from which specific tunnel types are derived."; + } + + identity te-tunnel-p2p { + base te-tunnel-type; + description + "TE Point-to-Point (P2P) tunnel type."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels"; + } + + identity te-tunnel-p2mp { + base te-tunnel-type; + description + "TE P2MP tunnel type."; + reference + "RFC 4875: Extensions to Resource Reservation Protocol - + Traffic Engineering (RSVP-TE) for + Point-to-Multipoint TE Label Switched Paths + (LSPs)"; + } + + identity tunnel-action-type { + description + "Base identity from which specific tunnel action types + are derived."; + } + + identity tunnel-action-resetup { + base tunnel-action-type; + description + "TE tunnel action that tears down the tunnel's current LSP + (if any) and attempts to re-establish a new LSP."; + } + + identity tunnel-action-reoptimize { + base tunnel-action-type; + description + "TE tunnel action that reoptimizes the placement of the + tunnel LSP(s)."; + } + + identity tunnel-action-switchpath { + base tunnel-action-type; + description + "TE tunnel action that switches the tunnel's LSP to use the + specified path."; + } + + identity te-action-result { + description + "Base identity from which specific TE action results + are derived."; + } + + identity te-action-success { + base te-action-result; + description + "TE action was successful."; + } + + identity te-action-fail { + base te-action-result; + description + "TE action failed."; + } + + identity tunnel-action-inprogress { + base te-action-result; + description + "TE action is in progress."; + } + + identity tunnel-admin-state-type { + description + "Base identity for TE tunnel administrative states."; + } + + identity tunnel-admin-state-up { + base tunnel-admin-state-type; + description + "Tunnel's administrative state is up."; + } + + identity tunnel-admin-state-down { + base tunnel-admin-state-type; + description + "Tunnel's administrative state is down."; + } + + identity tunnel-admin-state-auto { + base tunnel-admin-state-type; + description + "Tunnel administrative auto state. The administrative status + in state datastore transitions to 'tunnel-admin-up' when the + tunnel used by the client layer, and to 'tunnel-admin-down' + when it is not used by the client layer."; + } + + identity tunnel-state-type { + description + "Base identity for TE tunnel states."; + } + + identity tunnel-state-up { + base tunnel-state-type; + description + "Tunnel's state is up."; + } + + identity tunnel-state-down { + base tunnel-state-type; + description + "Tunnel's state is down."; + } + + identity lsp-state-type { + description + "Base identity for TE LSP states."; + } + + identity lsp-path-computing { + base lsp-state-type; + description + "State path computation is in progress."; + } + + identity lsp-path-computation-ok { + base lsp-state-type; + description + "State path computation was successful."; + } + + identity lsp-path-computation-failed { + base lsp-state-type; + description + "State path computation failed."; + } + + identity lsp-state-setting-up { + base lsp-state-type; + description + "State is being set up."; + } + + identity lsp-state-setup-ok { + base lsp-state-type; + description + "State setup was successful."; + } + + identity lsp-state-setup-failed { + base lsp-state-type; + description + "State setup failed."; + } + + identity lsp-state-up { + base lsp-state-type; + description + "State is up."; + } + + identity lsp-state-tearing-down { + base lsp-state-type; + description + "State is being torn down."; + } + + identity lsp-state-down { + base lsp-state-type; + description + "State is down."; + } + + identity path-invalidation-action-type { + description + "Base identity for TE path invalidation action types."; + } + + identity path-invalidation-action-drop { + base path-invalidation-action-type; + description + "Upon invalidation of the TE tunnel path, the tunnel remains + valid, but any packet mapped over the tunnel is dropped."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels, + Section 2.5"; + } + + identity path-invalidation-action-teardown { + base path-invalidation-action-type; + description + "TE path invalidation action teardown."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels, + Section 2.5"; + } + + identity lsp-restoration-type { + description + "Base identity from which LSP restoration types are derived."; + } + + identity lsp-restoration-restore-none { + base lsp-restoration-type; + description + "No LSP affected by a failure is restored."; + } + + identity lsp-restoration-restore-any { + base lsp-restoration-type; + description + "Any LSP affected by a failure is restored."; + } + + identity lsp-restoration-restore-all { + base lsp-restoration-type; + description + "Affected LSPs are restored after all LSPs of the tunnel are + broken."; + } + + identity restoration-scheme-type { + description + "Base identity for LSP restoration schemes."; + } + + identity restoration-scheme-rerouting { + base restoration-scheme-type; + description + "Restoration LSP is computed, signalled and configured after + the failure detection. + + This restoration scheme is also known as + 'Full LSP Re-routing', with the alternate route being + computed after the failure occurs."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + } + + identity restoration-scheme-preconfigured { + base restoration-scheme-type; + description + "Restoration LSP is precomputed, presignalled and + preconfigured prior to the failure."; + } + + identity restoration-scheme-precomputed { + base restoration-scheme-type; + description + "Restoration LSP is precomputed, but not presignalled nor + preconfigured, prior to the failure. + + This restoration scheme is also known as + 'Full LSP Re-routing', with the alternate route being + pre-computed and stored for use when the failure occurs."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + } + + identity restoration-scheme-presignaled { + base restoration-scheme-type; + description + "Restoration LSP is presignaled, but not preconfigured, + prior to the failure. + + This restoration scheme is also known as + 'Pre-planned LSP Re-routing'."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + } + + identity lsp-protection-type { + description + "Base identity from which LSP protection types are derived."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + } + + identity lsp-protection-unprotected { + base lsp-protection-type; + description + "'Unprotected' LSP protection type."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + } + + identity lsp-protection-reroute-extra { + base lsp-protection-type; + status obsolete; + description + "'(Full) Rerouting' LSP protection type. + + This identity has been obsoleted: the + 'restoration-scheme-rerouting' identity SHOULD be used + instead."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + } + + identity lsp-protection-reroute { + base lsp-protection-type; + status obsolete; + description + "'Rerouting without Extra-Traffic' LSP protection type. + + This identity has been obsoleted: the + 'restoration-scheme-rerouting' identity SHOULD be used + instead."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + } + + identity lsp-protection-1-for-n { + base lsp-protection-type; + description + "'1:N Protection with Extra-Traffic' LSP protection type."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + } + + identity lsp-protection-1-for-1 { + base lsp-protection-type; + description + "LSP protection '1:1 Protection Type'."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + } + + identity lsp-protection-unidir-1-plus-1 { + base lsp-protection-type; + description + "'1+1 Unidirectional Protection' LSP protection type."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + } + + identity lsp-protection-bidir-1-plus-1 { + base lsp-protection-type; + description + "'1+1 Bidirectional Protection' LSP protection type."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + } + + identity lsp-protection-extra-traffic { + base lsp-protection-type; + description + "Extra-Traffic LSP protection type."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + } + + identity lsp-protection-state { + description + "Base identity of protection states for reporting purposes."; + } + + identity normal { + base lsp-protection-state; + description + "Normal state."; + reference + "RFC 6378: MPLS Transport Profile (MPLS-TP) Linear Protection + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + identity signal-fail-of-protection { + base lsp-protection-state; + description + "The protection transport entity has a signal fail condition + that is of higher priority than the forced switchover + command."; + reference + "RFC 6378: MPLS Transport Profile (MPLS-TP) Linear Protection + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + identity lockout-of-protection { + base lsp-protection-state; + description + "A Loss of Protection (LoP) command is active."; + reference + "RFC 6378: MPLS Transport Profile (MPLS-TP) Linear Protection + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + identity forced-switch { + base lsp-protection-state; + description + "A forced switchover command is active."; + reference + "RFC 6378: MPLS Transport Profile (MPLS-TP) Linear Protection + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + identity signal-fail { + base lsp-protection-state; + description + "There is a signal fail condition on either the working path + or the protection path."; + reference + "RFC 6378: MPLS Transport Profile (MPLS-TP) Linear Protection + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + identity signal-degrade { + base lsp-protection-state; + description + "There is a signal degrade condition on either the working + path or the protection path."; + reference + "RFC 6378: MPLS Transport Profile (MPLS-TP) Linear Protection + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + identity manual-switch { + base lsp-protection-state; + description + "A manual switchover command is active."; + reference + "RFC 6378: MPLS Transport Profile (MPLS-TP) Linear Protection + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + identity wait-to-restore { + base lsp-protection-state; + description + "A Wait-to-Restore (WTR) timer is running."; + reference + "RFC 6378: MPLS Transport Profile (MPLS-TP) Linear Protection + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + identity do-not-revert { + base lsp-protection-state; + description + "A Do Not Revert (DNR) condition is active because of + non-revertive behavior."; + reference + "RFC 6378: MPLS Transport Profile (MPLS-TP) Linear Protection + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + identity failure-of-protocol { + base lsp-protection-state; + description + "LSP protection is not working because of a protocol failure + condition."; + reference + "RFC 7271: MPLS Transport Profile (MPLS-TP) Linear Protection + to Match the Operational Expectations of + Synchronous Digital Hierarchy, Optical Transport + Network, and Ethernet Transport Network Operators + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + identity protection-external-commands { + description + "Base identity from which protection-related external commands + used for troubleshooting purposes are derived."; + } + + identity action-freeze { + base protection-external-commands; + description + "A temporary configuration action initiated by an operator + command that prevents any switchover action from being taken + and, as such, freezes the current state."; + reference + "RFC 7271: MPLS Transport Profile (MPLS-TP) Linear Protection + to Match the Operational Expectations of + Synchronous Digital Hierarchy, Optical Transport + Network, and Ethernet Transport Network Operators + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + identity clear-freeze { + base protection-external-commands; + description + "An action that clears the active freeze state."; + reference + "RFC 7271: MPLS Transport Profile (MPLS-TP) Linear Protection + to Match the Operational Expectations of + Synchronous Digital Hierarchy, Optical Transport + Network, and Ethernet Transport Network Operators + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + identity action-lockout-of-normal { + base protection-external-commands; + description + "A temporary configuration action initiated by an operator + command to ensure that the normal traffic is not allowed + to use the protection transport entity."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + identity clear-lockout-of-normal { + base protection-external-commands; + description + "An action that clears the active lockout of the + normal state."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + identity action-lockout-of-protection { + base protection-external-commands; + description + "A temporary configuration action initiated by an operator + command to ensure that the protection transport entity is + temporarily not available to transport a traffic signal + (either normal or Extra-Traffic)."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + identity action-forced-switch { + base protection-external-commands; + description + "A switchover action initiated by an operator command to + switch the Extra-Traffic signal, the normal traffic signal, + or the null signal to the protection transport entity, + unless a switchover command of equal or higher priority is + in effect."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + identity action-manual-switch { + base protection-external-commands; + description + "A switchover action initiated by an operator command to + switch the Extra-Traffic signal, the normal traffic signal, + or the null signal to the protection transport entity, + unless a fault condition exists on other transport entities + or a switchover command of equal or higher priority is in + effect."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + identity action-exercise { + base protection-external-commands; + description + "An action that starts testing whether or not Automatic + Protection Switching (APS) communication is operating + correctly. + + It is of lower priority than any other state or command."; + reference + "RFC 7271: MPLS Transport Profile (MPLS-TP) Linear Protection + to Match the Operational Expectations of + Synchronous Digital Hierarchy, Optical Transport + Network, and Ethernet Transport Network Operators + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + identity clear { + base protection-external-commands; + description + "An action that clears the active near-end lockout of a + protection, forced switchover, manual switchover, + Wait-to-Restore (WTR) state, or exercise command."; + reference + "RFC 6378: MPLS Transport Profile (MPLS-TP) Linear Protection + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + identity switching-capabilities { + description + "Base identity for interface switching capabilities."; + reference + "RFC 3471: Generalized Multi-Protocol Label Switching (GMPLS) + Signaling Functional Description"; + } + + identity switching-psc1 { + base switching-capabilities; + description + "Packet-Switch Capable-1 (PSC-1)."; + reference + "RFC 3471: Generalized Multi-Protocol Label Switching (GMPLS) + Signaling Functional Description"; + } + + identity switching-evpl { + base switching-capabilities; + description + "Ethernet Virtual Private Line (EVPL)."; + reference + "RFC 6004: Generalized MPLS (GMPLS) Support for Metro + Ethernet Forum and G.8011 Ethernet Service + Switching"; + } + + identity switching-l2sc { + base switching-capabilities; + description + "Layer-2 Switch Capable (L2SC)."; + reference + "RFC 3471: Generalized Multi-Protocol Label Switching (GMPLS) + Signaling Functional Description"; + } + + identity switching-tdm { + base switching-capabilities; + description + "Time-Division-Multiplex Capable (TDM)."; + reference + "RFC 3471: Generalized Multi-Protocol Label Switching (GMPLS) + Signaling Functional Description"; + } + + identity switching-otn { + base switching-capabilities; + description + "OTN-TDM capable."; + reference + "RFC 7138: Traffic Engineering Extensions to OSPF for GMPLS + Control of Evolving G.709 Optical Transport + Networks"; + } + + identity switching-dcsc { + base switching-capabilities; + description + "Data Channel Switching Capable (DCSC)."; + reference + "RFC 6002: Generalized MPLS (GMPLS) Data Channel + Switching Capable (DCSC) and Channel Set Label + Extensions"; + } + + identity switching-lsc { + base switching-capabilities; + description + "Lambda-Switch Capable (LSC)."; + reference + "RFC 3471: Generalized Multi-Protocol Label Switching (GMPLS) + Signaling Functional Description"; + } + + identity switching-fsc { + base switching-capabilities; + description + "Fiber-Switch Capable (FSC)."; + reference + "RFC 3471: Generalized Multi-Protocol Label Switching (GMPLS) + Signaling Functional Description"; + } + + identity lsp-encoding-types { + description + "Base identity for encoding types."; + reference + "RFC 3471: Generalized Multi-Protocol Label Switching (GMPLS) + Signaling Functional Description"; + } + + identity lsp-encoding-packet { + base lsp-encoding-types; + description + "Packet LSP encoding."; + reference + "RFC 3471: Generalized Multi-Protocol Label Switching (GMPLS) + Signaling Functional Description"; + } + + identity lsp-encoding-ethernet { + base lsp-encoding-types; + description + "Ethernet LSP encoding."; + reference + "RFC 3471: Generalized Multi-Protocol Label Switching (GMPLS) + Signaling Functional Description"; + } + + identity lsp-encoding-pdh { + base lsp-encoding-types; + description + "ANSI/ETSI PDH LSP encoding."; + reference + "RFC 3471: Generalized Multi-Protocol Label Switching (GMPLS) + Signaling Functional Description"; + } + + identity lsp-encoding-sdh { + base lsp-encoding-types; + description + "SDH ITU-T G.707 / SONET ANSI T1.105 LSP encoding."; + reference + "RFC 3471: Generalized Multi-Protocol Label Switching (GMPLS) + Signaling Functional Description"; + } + + identity lsp-encoding-digital-wrapper { + base lsp-encoding-types; + description + "Digital Wrapper LSP encoding."; + reference + "RFC 3471: Generalized Multi-Protocol Label Switching (GMPLS) + Signaling Functional Description"; + } + + identity lsp-encoding-lambda { + base lsp-encoding-types; + description + "Lambda (photonic) LSP encoding."; + reference + "RFC 3471: Generalized Multi-Protocol Label Switching (GMPLS) + Signaling Functional Description"; + } + + identity lsp-encoding-fiber { + base lsp-encoding-types; + description + "Fiber LSP encoding."; + reference + "RFC 3471: Generalized Multi-Protocol Label Switching (GMPLS) + Signaling Functional Description"; + } + + identity lsp-encoding-fiber-channel { + base lsp-encoding-types; + description + "FiberChannel LSP encoding."; + reference + "RFC 3471: Generalized Multi-Protocol Label Switching (GMPLS) + Signaling Functional Description"; + } + + identity lsp-encoding-oduk { + base lsp-encoding-types; + description + "G.709 ODUk (Digital Path) LSP encoding."; + reference + "RFC 4328: Generalized Multi-Protocol Label Switching (GMPLS) + Signaling Extensions for G.709 Optical Transport + Networks Control"; + } + + identity lsp-encoding-optical-channel { + base lsp-encoding-types; + description + "G.709 Optical Channel LSP encoding."; + reference + "RFC 4328: Generalized Multi-Protocol Label Switching (GMPLS) + Signaling Extensions for G.709 Optical Transport + Networks Control"; + } + + identity lsp-encoding-line { + base lsp-encoding-types; + description + "Line (e.g., 8B/10B) LSP encoding."; + reference + "RFC 6004: Generalized MPLS (GMPLS) Support for Metro + Ethernet Forum and G.8011 Ethernet Service + Switching"; + } + + identity path-signaling-type { + description + "Base identity from which specific LSP path setup types + are derived."; + } + + identity path-setup-static { + base path-signaling-type; + description + "Static LSP provisioning path setup."; + } + + identity path-setup-rsvp { + base path-signaling-type; + description + "RSVP-TE signaling path setup."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels"; + } + + identity path-setup-sr { + base path-signaling-type; + description + "Segment-routing path setup."; + } + + identity path-scope-type { + description + "Base identity from which specific path scope types are + derived."; + } + + identity path-scope-segment { + base path-scope-type; + description + "Path scope segment."; + reference + "RFC 4873: GMPLS Segment Recovery"; + } + + identity path-scope-end-to-end { + base path-scope-type; + description + "Path scope end to end."; + reference + "RFC 4873: GMPLS Segment Recovery"; + } + + identity route-usage-type { + description + "Base identity for route usage."; + } + + identity route-include-object { + base route-usage-type; + description + "'Include route' object."; + } + + identity route-exclude-object { + base route-usage-type; + description + "'Exclude route' object."; + reference + "RFC 4874: Exclude Routes - Extension to Resource ReserVation + Protocol-Traffic Engineering (RSVP-TE)"; + } + + identity route-exclude-srlg { + base route-usage-type; + description + "Excludes Shared Risk Link Groups (SRLGs)."; + reference + "RFC 4874: Exclude Routes - Extension to Resource ReserVation + Protocol-Traffic Engineering (RSVP-TE)"; + } + + identity path-metric-optimization-type { + description + "Base identity used to define the path metric optimization + types."; + } + + identity link-path-metric-type { + description + "Base identity used to define the link and the path metric + types. + + The unit of the path metric value is interpreted in the + context of the path metric type and the derived identities + SHOULD describe the unit of the path metric types they + define."; + } + + identity link-metric-type { + base link-path-metric-type; + description + "Base identity for the link metric types."; + } + + identity link-metric-te { + base link-metric-type; + description + "Traffic Engineering (TE) Link Metric."; + reference + "RFC 3630: Traffic Engineering (TE) Extensions to OSPF + Version 2, Section 2.5.5 + RFC 5305: IS-IS Extensions for Traffic Engineering, + Section 3.7"; + } + + identity link-metric-igp { + base link-metric-type; + description + "Interior Gateway Protocol (IGP) Link Metric."; + reference + "RFC 3785: Use of Interior Gateway Protocol (IGP) Metric + as a second MPLS Traffic Engineering (TE) + Metric"; + } + + identity link-metric-delay-average { + base link-metric-type; + description + "Unidirectional Link Delay, measured in units of + microseconds."; + reference + "RFC 7471: OSPF Traffic Engineering (TE) Metric + Extensions, Section 4.1 + RFC 8570: IS-IS Traffic Engineering (TE) Metric + Extensions, Section 4.1"; + } + + identity link-metric-delay-minimum { + base link-metric-type; + description + "Minimum unidirectional Link Delay, measured in units of + microseconds."; + reference + "RFC 7471: OSPF Traffic Engineering (TE) Metric + Extensions, Section 4.2 + RFC 8570: IS-IS Traffic Engineering (TE) Metric + Extensions, Section 4.2"; + } + + identity link-metric-delay-maximum { + base link-metric-type; + description + "Maximum unidirectional Link Delay, measured in units of + microseconds."; + reference + "RFC 7471: OSPF Traffic Engineering (TE) Metric + Extensions, Section 4.2 + RFC 8570: IS-IS Traffic Engineering (TE) Metric + Extensions, Section 4.2"; + } + + identity link-metric-residual-bandwidth { + base link-metric-type; + description + "Unidirectional Residual Bandwidth, measured in units of + bytes per second. + + It is defined to be Maximum Bandwidth minus the bandwidth + currently allocated to LSPs."; + reference + "RFC 7471: OSPF Traffic Engineering (TE) Metric + Extensions, Section 4.5 + RFC 8570: IS-IS Traffic Engineering (TE) Metric + Extensions, Section 4.5"; + } + + identity path-metric-type { + base link-path-metric-type; + base path-metric-optimization-type; + description + "Base identity for the path metric types."; + } + + identity path-metric-te { + base path-metric-type; + description + "Traffic Engineering (TE) Path Metric."; + reference + "RFC 5440: Path Computation Element (PCE) Communication + Protocol (PCEP), Section 7.8"; + } + + identity path-metric-igp { + base path-metric-type; + description + "Interior Gateway Protocol (IGP) Path Metric."; + reference + "RFC 5440: Path Computation Element (PCE) Communication + Protocol (PCEP), section 7.8"; + } + + identity path-metric-hop { + base path-metric-type; + description + "Hop Count Path Metric."; + reference + "RFC 5440: Path Computation Element (PCE) Communication + Protocol (PCEP), Section 7.8"; + } + + identity path-metric-delay-average { + base path-metric-type; + description + "The Path Delay Metric, measured in units of + microseconds."; + reference + "RFC 8233: Extensions to the Path Computation Element + Communication Protocol (PCEP) to Compute + Service-Aware Label Switched Paths (LSPs), + Section 3.1.1"; + } + + identity path-metric-delay-minimum { + base path-metric-type; + description + "The Path Min Delay Metric, measured in units of + microseconds."; + reference + "I-D.ietf-pce-sid-algo: Carrying SR-Algorithm information + in PCE-based Networks, + draft-ietf-pce-sid-algo-14, + Sections 3.5.1 and 3.5.2"; + } + + identity path-metric-residual-bandwidth { + base path-metric-type; + description + "The Path Residual Bandwidth, defined as the minimum Link + Residual Bandwidth all the links along the path. + + The Path Residual Bandwidth can be seen as the path + metric associated with the Maximum residual Bandwidth Path + (MBP) objective function."; + reference + "RFC 5541: Encoding of Objective Functions in the Path + Computation Element Communication Protocol + (PCEP)"; + } + + identity path-metric-optimize-includes { + base path-metric-optimization-type; + description + "A metric that optimizes the number of included resources + specified in a set."; + } + + identity path-metric-optimize-excludes { + base path-metric-optimization-type; + description + "A metric that optimizes to a maximum the number of excluded + resources specified in a set."; + } + + identity path-tiebreaker-type { + description + "Base identity for the path tiebreaker type."; + } + + identity path-tiebreaker-minfill { + base path-tiebreaker-type; + description + "Min-Fill LSP path placement: selects the path with the most + available bandwidth (load balance LSPs over more links)."; + } + + identity path-tiebreaker-maxfill { + base path-tiebreaker-type; + description + "Max-Fill LSP path placement: selects the path with the least + available bandwidth (packing more LSPs over few links)."; + } + + identity path-tiebreaker-random { + base path-tiebreaker-type; + description + "Random LSP path placement."; + } + + identity resource-affinities-type { + description + "Base identity for resource class affinities."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels + RFC 2702: Requirements for Traffic Engineering Over MPLS"; + } + + identity resource-aff-include-all { + base resource-affinities-type; + description + "The set of attribute filters associated with a + tunnel, all of which must be present for a link + to be acceptable."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels + RFC 2702: Requirements for Traffic Engineering Over MPLS"; + } + + identity resource-aff-include-any { + base resource-affinities-type; + description + "The set of attribute filters associated with a + tunnel, any of which must be present for a link + to be acceptable."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels + RFC 2702: Requirements for Traffic Engineering Over MPLS"; + } + + identity resource-aff-exclude-any { + base resource-affinities-type; + description + "The set of attribute filters associated with a + tunnel, any of which renders a link unacceptable."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels + RFC 2702: Requirements for Traffic Engineering Over MPLS"; + } + + identity te-optimization-criterion { + description + "Base identity for the TE optimization criteria."; + reference + "RFC 9522: Overview and Principles of Internet Traffic + Engineering"; + } + + identity not-optimized { + base te-optimization-criterion; + description + "Optimization is not applied."; + } + + identity cost { + base te-optimization-criterion; + description + "Optimized on cost."; + reference + "RFC 5541: Encoding of Objective Functions in the Path + Computation Element Communication Protocol + (PCEP)"; + } + + identity delay { + base te-optimization-criterion; + description + "Optimized on delay."; + reference + "RFC 5541: Encoding of Objective Functions in the Path + Computation Element Communication Protocol + (PCEP)"; + } + + identity path-computation-srlg-type { + description + "Base identity for Shared Risk Link Group (SRLG) path + computation."; + } + + identity srlg-ignore { + base path-computation-srlg-type; + description + "Ignores Shared Risk Link Groups (SRLGs) in the path + computation."; + } + + identity srlg-strict { + base path-computation-srlg-type; + description + "Includes a strict Shared Risk Link Group (SRLG) check in + the path computation."; + } + + identity srlg-preferred { + base path-computation-srlg-type; + description + "Includes a preferred Shared Risk Link Group (SRLG) check in + the path computation."; + } + + identity srlg-weighted { + base path-computation-srlg-type; + description + "Includes a weighted Shared Risk Link Group (SRLG) check in + the path computation."; + } + + identity path-computation-error-reason { + description + "Base identity for path computation error reasons."; + } + + identity path-computation-error-path-not-found { + base path-computation-error-reason; + description + "Path computation has failed because of an unspecified + reason."; + reference + "RFC 5440: Path Computation Element (PCE) Communication + Protocol (PCEP), Section 7.5"; + } + + identity path-computation-error-no-topology { + base path-computation-error-reason; + description + "Path computation has failed because there is no topology + with the provided topology-identifier."; + } + + identity path-computation-error-no-dependent-server { + base path-computation-error-reason; + description + "Path computation has failed because one or more dependent + path computation servers are unavailable. + + The dependent path computation server could be + a Backward-Recursive Path Computation (BRPC) downstream + PCE or a child PCE."; + reference + "RFC 5441: A Backward-Recursive PCE-Based Computation (BRPC) + Procedure to Compute Shortest Constrained + Inter-Domain Traffic Engineering Label Switched + Paths + RFC 8685: Path Computation Element Communication Protocol + (PCEP) Extensions for the Hierarchical Path + Computation Element (H-PCE) Architecture"; + } + + identity path-computation-error-pce-unavailable { + base path-computation-error-reason; + description + "Path computation has failed because PCE is not available. + + It corresponds to bit 31 of the Flags field of the + NO-PATH-VECTOR TLV."; + reference + "RFC 5440: Path Computation Element (PCE) Communication + Protocol (PCEP) + + https://www.iana.org/assignments/pcep + /pcep.xhtml#no-path-vector-tlv"; + } + + identity path-computation-error-no-inclusion-hop { + base path-computation-error-reason; + description + "Path computation has failed because there is no + node or link provided by one or more inclusion hops."; + } + + identity path-computation-error-destination-unknown-in-domain { + base path-computation-error-reason; + description + "Path computation has failed because the destination node is + unknown in indicated destination domain. + + It corresponds to bit 19 of the Flags field of the + NO-PATH-VECTOR TLV."; + reference + "RFC 8685: Path Computation Element Communication Protocol + (PCEP) Extensions for the Hierarchical Path + Computation Element (H-PCE) Architecture + + https://www.iana.org/assignments/pcep + /pcep.xhtml#no-path-vector-tlv"; + } + + identity path-computation-error-no-resource { + base path-computation-error-reason; + description + "Path computation has failed because there is no + available resource in one or more domains. + + It corresponds to bit 20 of the Flags field of the + NO-PATH-VECTOR TLV."; + reference + "RFC 8685: Path Computation Element Communication Protocol + (PCEP) Extensions for the Hierarchical Path + Computation Element (H-PCE) Architecture + + https://www.iana.org/assignments/pcep + /pcep.xhtml#no-path-vector-tlv"; + } + + identity path-computation-error-child-pce-unresponsive { + base path-computation-error-no-dependent-server; + description + "Path computation has failed because child PCE is not + responsive. + + It corresponds to bit 21 of the Flags field of the + NO-PATH-VECTOR TLV."; + reference + "RFC 8685: Path Computation Element Communication Protocol + (PCEP) Extensions for the Hierarchical Path + Computation Element (H-PCE) Architecture + + https://www.iana.org/assignments/pcep + /pcep.xhtml#no-path-vector-tlv"; + } + + identity path-computation-error-destination-domain-unknown { + base path-computation-error-reason; + description + "Path computation has failed because the destination domain + was unknown. + + It corresponds to bit 22 of the Flags field of the + NO-PATH-VECTOR TLV."; + reference + "RFC 8685: Path Computation Element Communication Protocol + (PCEP) Extensions for the Hierarchical Path + Computation Element (H-PCE) Architecture + + https://www.iana.org/assignments/pcep + /pcep.xhtml#no-path-vector-tlv"; + } + + identity path-computation-error-p2mp { + base path-computation-error-reason; + description + "Path computation has failed because of P2MP reachability + problem. + + It corresponds to bit 24 of the Flags field of the + NO-PATH-VECTOR TLV."; + reference + "RFC 8306: Extensions to the Path Computation Element + Communication Protocol (PCEP) for + Point-to-Multipoint Traffic Engineering Label + Switched Paths + + https://www.iana.org/assignments/pcep + /pcep.xhtml#no-path-vector-tlv"; + } + + identity path-computation-error-no-gco-migration { + base path-computation-error-reason; + description + "Path computation has failed because of no Global Concurrent + Optimization (GCO) migration path found. + + It corresponds to bit 26 of the Flags field of the + NO-PATH-VECTOR TLV."; + reference + "RFC 5557: Path Computation Element Communication Protocol + (PCEP) Requirements and Protocol Extensions in + Support of Global Concurrent Optimization + + https://www.iana.org/assignments/pcep + /pcep.xhtml#no-path-vector-tlv"; + } + + identity path-computation-error-no-gco-solution { + base path-computation-error-reason; + description + "Path computation has failed because of no GCO solution + found. + + It corresponds to bit 25 of the Flags field of the + NO-PATH-VECTOR TLV."; + reference + "RFC 5557: Path Computation Element Communication Protocol + (PCEP) Requirements and Protocol Extensions in + Support of Global Concurrent Optimization + + https://www.iana.org/assignments/pcep + /pcep.xhtml#no-path-vector-tlv"; + } + + identity path-computation-error-pks-expansion { + base path-computation-error-reason; + description + "Path computation has failed because of Path-Key Subobject + (PKS) expansion failure. + + It corresponds to bit 27 of the Flags field of the + NO-PATH-VECTOR TLV."; + reference + "RFC 5520: Preserving Topology Confidentiality in + Inter-Domain Path Computation Using a + Path-Key-Based Mechanism + + https://www.iana.org/assignments/pcep + /pcep.xhtml#no-path-vector-tlv"; + } + + identity path-computation-error-brpc-chain-unavailable { + base path-computation-error-no-dependent-server; + description + "Path computation has failed because PCE BRPC chain + unavailable. + + It corresponds to bit 28 of the Flags field of the + NO-PATH-VECTOR TLV."; + reference + "RFC 5441: A Backward-Recursive PCE-Based Computation (BRPC) + Procedure to Compute Shortest Constrained + Inter-Domain Traffic Engineering Label Switched + Paths + + https://www.iana.org/assignments/pcep + /pcep.xhtml#no-path-vector-tlv"; + } + + identity path-computation-error-source-unknown { + base path-computation-error-reason; + description + "Path computation has failed because source node is + unknown. + + It corresponds to bit 29 of the Flags field of the + NO-PATH-VECTOR TLV."; + reference + "RFC 5440: Path Computation Element (PCE) Communication + Protocol (PCEP); + + https://www.iana.org/assignments/pcep + /pcep.xhtml#no-path-vector-tlv"; + } + + identity path-computation-error-destination-unknown { + base path-computation-error-reason; + description + "Path computation has failed because destination node is + unknown. + + It corresponds to bit 30 of the Flags field of the + NO-PATH-VECTOR TLV."; + reference + "RFC 5440: Path Computation Element (PCE) Communication + Protocol (PCEP); + + https://www.iana.org/assignments/pcep + /pcep.xhtml#no-path-vector-tlv"; + } + + identity protocol-origin-type { + description + "Base identity for protocol origin type."; + } + + identity protocol-origin-api { + base protocol-origin-type; + description + "Protocol origin is via Application Programming Interface + (API)."; + } + + identity protocol-origin-pcep { + base protocol-origin-type; + description + "Protocol origin is Path Computation Engine Protocol + (PCEP)."; + reference + "RFC 5440: Path Computation Element (PCE) Communication + Protocol (PCEP)"; + } + + identity protocol-origin-bgp { + base protocol-origin-type; + description + "Protocol origin is Border Gateway Protocol (BGP)."; + reference + "RFC 9012: The BGP Tunnel Encapsulation Attribute"; + } + + identity svec-objective-function-type { + description + "Base identity for SVEC objective function type."; + reference + "RFC 5541: Encoding of Objective Functions in the Path + Computation Element Communication Protocol (PCEP)"; + } + + identity svec-of-minimize-agg-bandwidth-consumption { + base svec-objective-function-type; + description + "Objective function for minimizing aggregate bandwidth + consumption (MBC)."; + reference + "RFC 5541: Encoding of Objective Functions in the Path + Computation Element Communication Protocol + (PCEP)"; + } + + identity svec-of-minimize-load-most-loaded-link { + base svec-objective-function-type; + description + "Objective function for minimizing the load on the link that + is carrying the highest load (MLL)."; + reference + "RFC 5541: Encoding of Objective Functions in the Path + Computation Element Communication Protocol + (PCEP)"; + } + + identity svec-of-minimize-cost-path-set { + base svec-objective-function-type; + description + "Objective function for minimizing the cost on a path set + (MCC)."; + reference + "RFC 5541: Encoding of Objective Functions in the Path + Computation Element Communication Protocol + (PCEP)"; + } + + identity svec-of-minimize-common-transit-domain { + base svec-objective-function-type; + description + "Objective function for minimizing the number of common + transit domains (MCTD)."; + reference + "RFC 8685: Path Computation Element Communication Protocol + (PCEP) Extensions for the Hierarchical Path + Computation Element (H-PCE) Architecture."; + } + + identity svec-of-minimize-shared-link { + base svec-objective-function-type; + description + "Objective function for minimizing the number of shared + links (MSL)."; + reference + "RFC 8685: Path Computation Element Communication Protocol + (PCEP) Extensions for the Hierarchical Path + Computation Element (H-PCE) Architecture."; + } + + identity svec-of-minimize-shared-srlg { + base svec-objective-function-type; + description + "Objective function for minimizing the number of shared + Shared Risk Link Groups (SRLG) (MSS)."; + reference + "RFC 8685: Path Computation Element Communication Protocol + (PCEP) Extensions for the Hierarchical Path + Computation Element (H-PCE) Architecture."; + } + + identity svec-of-minimize-shared-nodes { + base svec-objective-function-type; + description + "Objective function for minimizing the number of shared + nodes (MSN)."; + reference + "RFC 8685: Path Computation Element Communication Protocol + (PCEP) Extensions for the Hierarchical Path + Computation Element (H-PCE) Architecture."; + } + + identity svec-metric-type { + description + "Base identity for SVEC metric type."; + reference + "RFC 5541: Encoding of Objective Functions in the Path + Computation Element Communication Protocol (PCEP)"; + } + + identity svec-metric-cumulative-te { + base svec-metric-type; + description + "Cumulative TE cost."; + reference + "RFC 5541: Encoding of Objective Functions in the Path + Computation Element Communication Protocol + (PCEP)"; + } + + identity svec-metric-cumulative-igp { + base svec-metric-type; + description + "Cumulative IGP cost."; + reference + "RFC 5541: Encoding of Objective Functions in the Path + Computation Element Communication Protocol + (PCEP)"; + } + + identity svec-metric-cumulative-hop { + base svec-metric-type; + description + "Cumulative Hop path metric."; + reference + "RFC 5541: Encoding of Objective Functions in the Path + Computation Element Communication Protocol + (PCEP)"; + } + + identity svec-metric-aggregate-bandwidth-consumption { + base svec-metric-type; + description + "Aggregate bandwidth consumption."; + reference + "RFC 5541: Encoding of Objective Functions in the Path + Computation Element Communication Protocol + (PCEP)"; + } + + identity svec-metric-load-of-the-most-loaded-link { + base svec-metric-type; + description + "Load of the most loaded link."; + reference + "RFC 5541: Encoding of Objective Functions in the Path + Computation Element Communication Protocol + (PCEP)"; + } + + /* + * Typedefs + */ + + typedef admin-group { + type yang:hex-string { + /* 01:02:03:04 */ + length "1..11"; + } + description + "Administrative group / resource class / color representation + in 'hex-string' type. + + The most significant byte in the hex-string is the farthest + to the left in the byte sequence. + + Leading zero bytes in the configured value may be omitted + for brevity."; + reference + "RFC 3630: Traffic Engineering (TE) Extensions to OSPF + Version 2 + RFC 5305: IS-IS Extensions for Traffic Engineering + RFC 7308: Extended Administrative Groups in MPLS Traffic + Engineering (MPLS-TE)"; + } + + typedef admin-groups { + type union { + type admin-group; + type extended-admin-group; + } + description + "Derived types for TE administrative groups."; + } + + typedef extended-admin-group { + type yang:hex-string; + description + "Extended administrative group / resource class / color + representation in 'hex-string' type. + + The most significant byte in the hex-string is the farthest + to the left in the byte sequence. + + Leading zero bytes in the configured value may be omitted + for brevity."; + reference + "RFC 7308: Extended Administrative Groups in MPLS Traffic + Engineering (MPLS-TE)"; + } + + typedef path-attribute-flags { + type union { + type identityref { + base session-attributes-flags; + } + type identityref { + base lsp-attributes-flags; + } + } + description + "Path attributes flags type."; + } + + typedef performance-metrics-normality { + type enumeration { + enum unknown { + value 0; + description + "Unknown."; + } + enum normal { + value 1; + description + "Normal. + + Indicates that the anomalous bit is not set."; + } + enum abnormal { + value 2; + description + "Abnormal. + + Indicates that the anomalous bit is set."; + } + } + description + "Indicates whether a performance metric is normal (anomalous + bit not set), abnormal (anomalous bit set), or unknown."; + reference + "RFC 7471: OSPF Traffic Engineering (TE) Metric Extensions + RFC 7823: Performance-Based Path Selection for Explicitly + Routed Label Switched Paths (LSPs) Using TE Metric + Extensions + RFC 8570: IS-IS Traffic Engineering (TE) Metric Extensions"; + } + + typedef srlg { + type uint32; + description + "Shared Risk Link Group (SRLG) type."; + reference + "RFC 4203: OSPF Extensions in Support of Generalized + Multi-Protocol Label Switching (GMPLS) + RFC 5307: IS-IS Extensions in Support of Generalized + Multi-Protocol Label Switching (GMPLS)"; + } + + typedef te-common-status { + type enumeration { + enum up { + description + "Enabled."; + } + enum down { + description + "Disabled."; + } + enum testing { + description + "In some test mode."; + } + enum preparing-maintenance { + description + "The resource is disabled in the control plane to prepare + for a graceful shutdown for maintenance purposes."; + reference + "RFC 5817: Graceful Shutdown in MPLS and Generalized MPLS + Traffic Engineering Networks"; + } + enum maintenance { + description + "The resource is disabled in the data plane for maintenance + purposes."; + } + enum unknown { + description + "Status is unknown."; + } + } + description + "Defines a type representing the common states of a TE + resource."; + } + + typedef te-bandwidth { + type string { + pattern '0[xX](0((\.0?)?[pP](\+)?0?|(\.0?))|' + + '1(\.([\da-fA-F]{0,5}[02468aAcCeE]?)?)?' + + '[pP](\+)?(12[0-7]|' + + '1[01]\d|0?\d?\d)?)|0[xX][\da-fA-F]{1,8}|\d+' + + '(,(0[xX](0((\.0?)?[pP](\+)?0?|(\.0?))|' + + '1(\.([\da-fA-F]{0,5}[02468aAcCeE]?)?)?' + + '[pP](\+)?(12[0-7]|' + + '1[01]\d|0?\d?\d)?)|0[xX][\da-fA-F]{1,8}|\d+))*'; + } + description + "This is the generic bandwidth type. + + It is a string containing a list of numbers separated by + commas, where each of these numbers can be non-negative + decimal, hex integer, or hex float: + + (dec | hex | float)[*(','(dec | hex | float))] + + For the packet-switching type, the string encoding may follow + the type 'bandwidth-ieee-float32' as defined in RFC 8294 + (e.g., 0x1p10), where the units are in bytes per second. + + Canonically, the string is represented as all lowercase and in + hex, where the prefix '0x' precedes the hex number."; + reference + "RFC 8294: Common YANG Data Types for the Routing Area + ITU-T G.709: Interfaces for the optical transport network - + Edition 6.0 (06/2020)"; + } + + typedef te-ds-class { + type uint8 { + range "0..7"; + } + description + "The Differentiated Services Class-Type of traffic."; + reference + "RFC 4124: Protocol Extensions for Support of Diffserv-aware + MPLS Traffic Engineering, Section 4.3.1"; + } + + typedef te-global-id { + type uint32; + description + "An identifier to uniquely identify an operator, which can be + either a provider or a client. + + The definition of this type is taken from RFCs 6370 and 5003. + + This attribute type is used solely to provide a globally + unique context for TE topologies."; + reference + "RFC 5003: Attachment Individual Identifier (AII) Types for + Aggregation + RFC 6370: MPLS Transport Profile (MPLS-TP) Identifiers"; + } + + typedef te-hop-type { + type enumeration { + enum loose { + description + "A loose hop in an explicit path."; + } + enum strict { + description + "A strict hop in an explicit path."; + } + } + description + "Enumerated type for specifying loose or strict paths."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels, + Section 4.3.3"; + } + + typedef te-link-access-type { + type enumeration { + enum point-to-point { + description + "The link is point-to-point."; + } + enum multi-access { + description + "The link is multi-access, including broadcast and NBMA."; + } + } + description + "Defines a type representing the access type of a TE link."; + reference + "RFC 3630: Traffic Engineering (TE) Extensions to OSPF + Version 2"; + } + + typedef te-label-direction { + type enumeration { + enum forward { + description + "Label allocated for the forward LSP direction."; + } + enum reverse { + description + "Label allocated for the reverse LSP direction."; + } + } + description + "Enumerated type for specifying the forward or reverse + label."; + } + + typedef te-link-direction { + type enumeration { + enum incoming { + description + "The explicit route represents an incoming link on + a node."; + } + enum outgoing { + description + "The explicit route represents an outgoing link on + a node."; + } + } + description + "Enumerated type for specifying the direction of a link on + a node."; + } + + typedef te-metric { + type uint32; + description + "Traffic Engineering (TE) metric."; + reference + "RFC 3630: Traffic Engineering (TE) Extensions to OSPF + Version 2, Section 2.5.5 + RFC 5305: IS-IS Extensions for Traffic Engineering, + Section 3.7"; + } + + typedef te-node-id { + type union { + type yang:dotted-quad; + type inet:ipv6-address-no-zone; + } + description + "A type representing the identifier for a node in a TE + topology. + + The identifier is represented either as 4 octets in + dotted-quad notation, or as 16 octets in full, mixed, + shortened, or shortened-mixed IPv6 address notation. + + This attribute MAY be mapped to the Router Address TLV + described in Section 2.4.1 of RFC 3630, the TE Router ID + described in Section 3 of RFC 6827, the Traffic Engineering + Router ID TLV described in Section 4.3 of RFC 5305, the TE + Router ID TLV described in Section 3.2.1 of RFC 6119, or the + IPv6 TE Router ID TLV described in Section 4.1 of RFC 6119. + + The reachability of such a TE node MAY be achieved by a + mechanism such as that described in Section 6.2 of RFC 6827."; + reference + "RFC 3630: Traffic Engineering (TE) Extensions to OSPF + Version 2, Section 2.4.1 + RFC 5305: IS-IS Extensions for Traffic Engineering, + Section 4.3 + RFC 6119: IPv6 Traffic Engineering in IS-IS, Section 3.2.1 + RFC 6827: Automatically Switched Optical Network (ASON) + Routing for OSPFv2 Protocols, Section 3"; + } + + typedef te-oper-status { + type te-common-status; + description + "Defines a type representing the operational status of + a TE resource."; + } + + typedef te-admin-status { + type te-common-status; + description + "Defines a type representing the administrative status of + a TE resource."; + } + + typedef te-path-disjointness { + type bits { + bit node { + position 0; + description + "Node disjoint."; + } + bit link { + position 1; + description + "Link disjoint."; + } + bit srlg { + position 2; + description + "Shared Risk Link Group (SRLG) disjoint."; + } + } + description + "Type of the resource disjointness for a TE tunnel path."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + } + + typedef te-recovery-status { + type enumeration { + enum normal { + description + "Both the recovery span and the working span are fully + allocated and active, data traffic is being + transported over (or selected from) the working + span, and no trigger events are reported."; + } + enum recovery-started { + description + "The recovery action has been started but not completed."; + } + enum recovery-succeeded { + description + "The recovery action has succeeded. + + The working span has reported a failure/degrade condition, + and the user traffic is being transported (or selected) + on the recovery span."; + } + enum recovery-failed { + description + "The recovery action has failed."; + } + enum reversion-started { + description + "The reversion has started."; + } + enum reversion-succeeded { + description + "The reversion action has succeeded."; + } + enum reversion-failed { + description + "The reversion has failed."; + } + enum recovery-unavailable { + description + "The recovery is unavailable, as a result of either an + operator's lockout command or a failure condition + detected on the recovery span."; + } + enum recovery-admin { + description + "The operator has issued a command to switch the user + traffic to the recovery span."; + } + enum wait-to-restore { + description + "The recovery domain is recovering from a failure/degrade + condition on the working span that is being controlled by + the Wait-to-Restore (WTR) timer."; + } + } + description + "Defines the status of a recovery action."; + reference + "RFC 6378: MPLS Transport Profile (MPLS-TP) Linear Protection + RFC 4427: Recovery (Protection and Restoration) Terminology + for Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + + typedef te-template-name { + type string { + pattern '/?([a-zA-Z0-9\-_.]+)(/[a-zA-Z0-9\-_.]+)*'; + } + description + "A type for the name of a TE node template or TE link + template."; + } + + typedef te-topology-event-type { + type enumeration { + enum add { + value 0; + description + "A TE node or TE link has been added."; + } + enum remove { + value 1; + description + "A TE node or TE link has been removed."; + } + enum update { + value 2; + description + "A TE node or TE link has been updated."; + } + } + description + "TE event type for notifications."; + } + + typedef te-topology-id { + type union { + type string { + length "0"; + // empty string + } + type string { + pattern '([a-zA-Z0-9\-_.]+:)*' + + '/?([a-zA-Z0-9\-_.]+)(/[a-zA-Z0-9\-_.]+)*'; + } + } + description + "An identifier for a topology. + + It is optional to have one or more prefixes at the beginning, + separated by colons. + + The prefixes can be 'network-types' as defined in the + 'ietf-network' module in RFC 8345, to help the user better + understand the topology before further inquiry is made."; + reference + "RFC 8345: A YANG Data Model for Network Topologies"; + } + + typedef te-tp-id { + type union { + type uint32; + // Unnumbered + type inet:ip-address; + // IPv4 or IPv6 address + } + description + "An identifier for a TE link endpoint on a node. + + This attribute is mapped to a local or remote link identifier + as defined in RFCs 3630 and 5305."; + reference + "RFC 3630: Traffic Engineering (TE) Extensions to OSPF + Version 2 + RFC 5305: IS-IS Extensions for Traffic Engineering"; + } + + typedef path-type { + type enumeration { + enum primary-path { + description + "Indicates that the TE path is a primary path."; + } + enum secondary-path { + description + "Indicates that the TE path is a secondary path."; + } + enum primary-reverse-path { + description + "Indicates that the TE path is a primary reverse path."; + } + enum secondary-reverse-path { + description + "Indicates that the TE path is a secondary reverse path."; + } + } + description + "The type of TE path, indicating whether a path is a primary, + or a reverse primary, or a secondary, or a reverse secondary + path."; + } + + /* + * TE bandwidth groupings + */ + + grouping te-bandwidth { + description + "This grouping defines the generic TE bandwidth. + + For some known data-plane technologies, specific modeling + structures are specified. + + The string-encoded 'te-bandwidth' type is used for + unspecified technologies. + + The modeling structure can be augmented later for other + technologies."; + container te-bandwidth { + description + "Container that specifies TE bandwidth. + + The choices can be augmented for specific data-plane + technologies."; + choice technology { + default "generic"; + description + "Data-plane technology type."; + case generic { + leaf generic { + type te-bandwidth; + description + "Bandwidth specified in a generic format."; + } + } + } + } + } + + /* + * TE label groupings + */ + + grouping te-label { + description + "This grouping defines the generic TE label. + + The modeling structure can be augmented for each technology. + + For unspecified technologies, 'rt-types:generalized-label' + is used."; + container te-label { + description + "Container that specifies the TE label. + + The choices can be augmented for specific data-plane + technologies."; + choice technology { + default "generic"; + description + "Data-plane technology type."; + case generic { + leaf generic { + type rt-types:generalized-label; + description + "TE label specified in a generic format."; + } + } + } + leaf direction { + type te-label-direction; + default "forward"; + description + "Label direction."; + } + } + } + + grouping te-topology-identifier { + description + "Augmentation for a TE topology."; + container te-topology-identifier { + description + "TE topology identifier container."; + leaf provider-id { + type te-global-id; + default "0"; + description + "An identifier to uniquely identify a provider. + If omitted, it assumes that the topology provider ID + value = 0 (the default)."; + } + leaf client-id { + type te-global-id; + default "0"; + description + "An identifier to uniquely identify a client. + If omitted, it assumes that the topology client ID + value = 0 (the default)."; + } + leaf topology-id { + type te-topology-id; + default ""; + description + "When the datastore contains several topologies, + 'topology-id' distinguishes between them. + + If omitted, the default (empty) string for this leaf is + assumed."; + } + } + } + + /* + * TE performance metrics groupings + */ + + grouping performance-metrics-one-way-delay-loss { + description + "Performance Metrics (PM) information in real time that can + be applicable to links or connections. + + PM defined in this grouping are applicable to generic TE PM + as well as packet TE PM."; + reference + "RFC 7471: OSPF Traffic Engineering (TE) Metric Extensions + RFC 8570: IS-IS Traffic Engineering (TE) Metric Extensions + RFC 7823: Performance-Based Path Selection for Explicitly + Routed Label Switched Paths (LSPs) Using TE Metric + Extensions"; + leaf one-way-delay { + type uint32 { + range "0..16777215"; + } + units "microseconds"; + description + "One-way delay or latency."; + } + leaf one-way-delay-normality { + type te-types:performance-metrics-normality; + description + "One-way delay normality."; + } + } + + grouping performance-metrics-two-way-delay-loss { + description + "Performance Metrics (PM) information in real time that can be + applicable to links or connections. + + PM defined in this grouping are applicable to generic TE PM + as well as packet TE PM."; + reference + "RFC 7471: OSPF Traffic Engineering (TE) Metric Extensions + RFC 8570: IS-IS Traffic Engineering (TE) Metric Extensions + RFC 7823: Performance-Based Path Selection for Explicitly + Routed Label Switched Paths (LSPs) Using TE Metric + Extensions"; + leaf two-way-delay { + type uint32 { + range "0..16777215"; + } + units "microseconds"; + description + "Two-way delay or latency."; + } + leaf two-way-delay-normality { + type te-types:performance-metrics-normality; + description + "Two-way delay normality."; + } + } + + grouping performance-metrics-one-way-bandwidth { + description + "Performance Metrics (PM) information in real time that can be + applicable to links. + + PM defined in this grouping are applicable to generic TE PM + as well as packet TE PM."; + reference + "RFC 7471: OSPF Traffic Engineering (TE) Metric Extensions + RFC 8570: IS-IS Traffic Engineering (TE) Metric Extensions + RFC 7823: Performance-Based Path Selection for Explicitly + Routed Label Switched Paths (LSPs) Using TE Metric + Extensions"; + leaf one-way-residual-bandwidth { + type rt-types:bandwidth-ieee-float32; + units "bytes per second"; + default "0x0p0"; + description + "Residual bandwidth that subtracts tunnel reservations from + Maximum Bandwidth (or link capacity) (RFC 3630) and + provides an aggregated remainder across QoS classes."; + reference + "RFC 3630: Traffic Engineering (TE) Extensions to OSPF + Version 2"; + } + leaf one-way-residual-bandwidth-normality { + type te-types:performance-metrics-normality; + default "normal"; + description + "Residual bandwidth normality."; + } + leaf one-way-available-bandwidth { + type rt-types:bandwidth-ieee-float32; + units "bytes per second"; + default "0x0p0"; + description + "Available bandwidth that is defined to be residual + bandwidth minus the measured bandwidth used for the + actual forwarding of non-RSVP-TE LSP packets. + + For a bundled link, available bandwidth is defined to be + the sum of the component link available bandwidths."; + } + leaf one-way-available-bandwidth-normality { + type te-types:performance-metrics-normality; + default "normal"; + description + "Available bandwidth normality."; + } + leaf one-way-utilized-bandwidth { + type rt-types:bandwidth-ieee-float32; + units "bytes per second"; + default "0x0p0"; + description + "Bandwidth utilization that represents the actual + utilization of the link (i.e., as measured in the router). + For a bundled link, bandwidth utilization is defined to + be the sum of the component link bandwidth utilizations."; + } + leaf one-way-utilized-bandwidth-normality { + type te-types:performance-metrics-normality; + default "normal"; + description + "Bandwidth utilization normality."; + } + } + + grouping one-way-performance-metrics { + description + "One-way Performance Metrics (PM) throttle grouping."; + leaf one-way-delay { + type uint32 { + range "0..16777215"; + } + units "microseconds"; + default "0"; + description + "One-way delay or latency."; + } + leaf one-way-residual-bandwidth { + type rt-types:bandwidth-ieee-float32; + units "bytes per second"; + default "0x0p0"; + description + "Residual bandwidth that subtracts tunnel reservations from + Maximum Bandwidth (or link capacity) (RFC 3630) and + provides an aggregated remainder across QoS classes."; + reference + "RFC 3630: Traffic Engineering (TE) Extensions to OSPF + Version 2"; + } + leaf one-way-available-bandwidth { + type rt-types:bandwidth-ieee-float32; + units "bytes per second"; + default "0x0p0"; + description + "Available bandwidth that is defined to be residual + bandwidth minus the measured bandwidth used for the + actual forwarding of non-RSVP-TE LSP packets. + + For a bundled link, available bandwidth is defined to be + the sum of the component link available bandwidths."; + } + leaf one-way-utilized-bandwidth { + type rt-types:bandwidth-ieee-float32; + units "bytes per second"; + default "0x0p0"; + description + "Bandwidth utilization that represents the actual + utilization of the link (i.e., as measured in the router). + For a bundled link, bandwidth utilization is defined to + be the sum of the component link bandwidth utilizations."; + } + } + + grouping two-way-performance-metrics { + description + "Two-way Performance Metrics (PM) throttle grouping."; + leaf two-way-delay { + type uint32 { + range "0..16777215"; + } + units "microseconds"; + default "0"; + description + "Two-way delay or latency."; + } + } + + grouping performance-metrics-thresholds { + description + "Grouping for configurable thresholds for measured + attributes."; + uses one-way-performance-metrics; + uses two-way-performance-metrics; + } + + grouping performance-metrics-attributes { + description + "Contains Performance Metrics (PM) attributes."; + container performance-metrics-one-way { + description + "One-way link performance information in real time."; + reference + "RFC 7471: OSPF Traffic Engineering (TE) Metric Extensions + RFC 8570: IS-IS Traffic Engineering (TE) Metric Extensions + RFC 7823: Performance-Based Path Selection for Explicitly + Routed Label Switched Paths (LSPs) Using TE Metric + Extensions"; + uses performance-metrics-one-way-delay-loss; + uses performance-metrics-one-way-bandwidth; + } + container performance-metrics-two-way { + description + "Two-way link performance information in real time."; + reference + "RFC 6374: Packet Loss and Delay Measurement for MPLS + Networks"; + uses performance-metrics-two-way-delay-loss; + } + } + + grouping performance-metrics-throttle-container { + description + "Controls Performance Metrics (PM) throttling."; + container throttle { + must 'suppression-interval >= measure-interval' { + error-message "'suppression-interval' cannot be less than " + + "'measure-interval'."; + description + "Constraint on 'suppression-interval' and + 'measure-interval'."; + } + description + "Link performance information in real time."; + reference + "RFC 7471: OSPF Traffic Engineering (TE) Metric Extensions + RFC 8570: IS-IS Traffic Engineering (TE) Metric Extensions + RFC 7823: Performance-Based Path Selection for Explicitly + Routed Label Switched Paths (LSPs) Using TE Metric + Extensions"; + leaf one-way-delay-offset { + type uint32 { + range "0..16777215"; + } + units "microseconds"; + default "0"; + description + "Offset value to be added to the measured delay value."; + } + leaf measure-interval { + type uint32; + units "seconds"; + default "30"; + description + "Interval to measure the extended metric values."; + } + leaf advertisement-interval { + type uint32; + units "seconds"; + default "0"; + description + "Interval to advertise the extended metric values."; + } + leaf suppression-interval { + type uint32 { + range "1..max"; + } + units "seconds"; + default "120"; + description + "Interval to suppress advertisement of the extended metric + values."; + reference + "RFC 8570: IS-IS Traffic Engineering (TE) Metric + Extensions, Section 6"; + } + container threshold-out { + description + "If the measured parameter falls outside an upper bound + for all but the minimum-delay metric (or a lower bound + for the minimum-delay metric only) and the advertised + value is not already outside that bound, an 'anomalous' + announcement (anomalous bit set) will be triggered."; + uses performance-metrics-thresholds; + } + container threshold-in { + description + "If the measured parameter falls inside an upper bound + for all but the minimum-delay metric (or a lower bound + for the minimum-delay metric only) and the advertised + value is not already inside that bound, a 'normal' + announcement (anomalous bit cleared) will be triggered."; + uses performance-metrics-thresholds; + } + container threshold-accelerated-advertisement { + description + "When the difference between the last advertised value and + the current measured value exceeds this threshold, an + 'anomalous' announcement (anomalous bit set) will be + triggered."; + uses performance-metrics-thresholds; + } + } + } + + /* + * TE tunnel generic groupings + */ + + grouping explicit-route-hop { + description + "The explicit route entry grouping."; + choice type { + description + "The explicit route entry type."; + case numbered-node-hop { + container numbered-node-hop { + must 'node-id-uri or node-id' { + description + "At least one node identifier needs to be present."; + } + description + "Numbered node route hop."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels, + Section 4.3, EXPLICIT_ROUTE in RSVP-TE + RFC 3477: Signalling Unnumbered Links in Resource + ReSerVation Protocol - Traffic Engineering + (RSVP-TE)"; + leaf node-id-uri { + type nw:node-id; + description + "The identifier of a node in the topology."; + } + leaf node-id { + type te-node-id; + description + "The identifier of a node in the TE topology."; + } + leaf hop-type { + type te-hop-type; + default "strict"; + description + "Strict or loose hop."; + } + } + } + case numbered-link-hop { + container numbered-link-hop { + description + "Numbered link explicit route hop."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels, + Section 4.3, EXPLICIT_ROUTE in RSVP-TE + RFC 3477: Signalling Unnumbered Links in Resource + ReSerVation Protocol - Traffic Engineering + (RSVP-TE)"; + leaf link-tp-id { + type te-tp-id; + mandatory true; + description + "TE Link Termination Point (LTP) identifier."; + } + leaf hop-type { + type te-hop-type; + default "strict"; + description + "Strict or loose hop."; + } + leaf direction { + type te-link-direction; + default "outgoing"; + description + "Link route object direction."; + } + } + } + case unnumbered-link-hop { + container unnumbered-link-hop { + must '(link-tp-id-uri or link-tp-id) and ' + + '(node-id-uri or node-id)' { + description + "At least one node identifier and at least one Link + Termination Point (LTP) identifier need to be + present."; + } + description + "Unnumbered link explicit route hop."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels, + Section 4.3, EXPLICIT_ROUTE in RSVP-TE + RFC 3477: Signalling Unnumbered Links in Resource + ReSerVation Protocol - Traffic Engineering + (RSVP-TE)"; + leaf link-tp-id-uri { + type nt:tp-id; + description + "Link Termination Point (LTP) identifier."; + } + leaf link-tp-id { + type te-tp-id; + description + "TE LTP identifier. + + The combination of the TE link ID and the TE node ID + is used to identify an unnumbered TE link."; + } + leaf node-id-uri { + type nw:node-id; + description + "The identifier of a node in the topology."; + } + leaf node-id { + type te-node-id; + description + "The identifier of a node in the TE topology."; + } + leaf hop-type { + type te-hop-type; + default "strict"; + description + "Strict or loose hop."; + } + leaf direction { + type te-link-direction; + default "outgoing"; + description + "Link route object direction."; + } + } + } + case as-number { + container as-number-hop { + description + "AS explicit route hop."; + leaf as-number { + type inet:as-number; + mandatory true; + description + "The Autonomous System (AS) number."; + } + leaf hop-type { + type te-hop-type; + default "strict"; + description + "Strict or loose hop."; + } + } + } + case label { + description + "The label explicit route hop type."; + container label-hop { + description + "Label hop type."; + uses te-label; + } + } + } + } + + grouping explicit-route-hop-with-srlg { + description + "Augments the explicit route entry grouping with Shared Risk + Link Group (SRLG) hop type."; + uses explicit-route-hop { + augment "type" { + description + "Augmentation for a generic explicit route for Shared + Risk Link Group (SRLG) inclusion or exclusion."; + case srlg { + description + "An Shared Risk Link Group (SRLG) value to be + included or excluded."; + container srlg { + description + "Shared Risk Link Group (SRLG) container."; + leaf srlg { + type uint32; + description + "Shared Risk Link Group (SRLG) value."; + } + } + } + } + } + } + + grouping record-route-state { + description + "The Record Route grouping."; + leaf index { + type uint32; + description + "Record Route hop index. + + The index is used to identify an entry in the list. + + The order of entries is defined by the user without relying + on key values."; + } + choice type { + description + "The Record Route entry type."; + case numbered-node-hop { + description + "Numbered node route hop."; + container numbered-node-hop { + must 'node-id-uri or node-id' { + description + "At least one node identifier need to be present."; + } + description + "Numbered node route hop container."; + leaf node-id-uri { + type nw:node-id; + description + "The identifier of a node in the topology."; + } + leaf node-id { + type te-node-id; + description + "The identifier of a node in the TE topology."; + } + leaf-list flags { + type path-attribute-flags; + description + "Path attributes flags."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels + RFC 4090: Fast Reroute Extensions to RSVP-TE for LSP + Tunnels + RFC 4561: Definition of a Record Route Object (RRO) + Node-Id Sub-Object"; + } + } + } + case numbered-link-hop { + description + "Numbered link route hop."; + container numbered-link-hop { + description + "Numbered link route hop container."; + leaf link-tp-id { + type te-tp-id; + mandatory true; + description + "Numbered TE LTP identifier."; + } + leaf-list flags { + type path-attribute-flags; + description + "Path attributes flags."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels + RFC 4090: Fast Reroute Extensions to RSVP-TE for LSP + Tunnels + RFC 4561: Definition of a Record Route Object (RRO) + Node-Id Sub-Object"; + } + } + } + case unnumbered-link-hop { + description + "Unnumbered link route hop."; + container unnumbered-link-hop { + must '(link-tp-id-uri or link-tp-id) and ' + + '(node-id-uri or node-id)' { + description + "At least one node identifier and at least one Link + Termination Point (LTP) identifier need to be + present."; + } + description + "Unnumbered link Record Route hop."; + reference + "RFC 3477: Signalling Unnumbered Links in Resource + ReSerVation Protocol - Traffic Engineering + (RSVP-TE)"; + leaf link-tp-id-uri { + type nt:tp-id; + description + "Link Termination Point (LTP) identifier."; + } + leaf link-tp-id { + type te-tp-id; + description + "TE LTP identifier. + + The combination of the TE link ID and the TE node ID + is used to identify an unnumbered TE link."; + } + leaf node-id-uri { + type nw:node-id; + description + "The identifier of a node in the topology."; + } + leaf node-id { + type te-node-id; + description + "The identifier of a node in the TE topology."; + } + leaf-list flags { + type path-attribute-flags; + description + "Path attributes flags."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels + RFC 4090: Fast Reroute Extensions to RSVP-TE for LSP + Tunnels + RFC 4561: Definition of a Record Route Object (RRO) + Node-Id Sub-Object"; + } + } + } + case label { + description + "The label Record Route entry types."; + container label-hop { + description + "Label route hop type."; + uses te-label; + leaf-list flags { + type path-attribute-flags; + description + "Path attributes flags."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels + RFC 4090: Fast Reroute Extensions to RSVP-TE for LSP + Tunnels + RFC 4561: Definition of a Record Route Object (RRO) + Node-Id Sub-Object"; + } + } + } + } + } + + grouping label-restriction-info { + description + "Label set item information."; + leaf restriction { + type enumeration { + enum inclusive { + description + "The label or label range is inclusive."; + } + enum exclusive { + description + "The label or label range is exclusive."; + } + } + default "inclusive"; + description + "Indicates whether the list item is inclusive or exclusive."; + } + leaf index { + type uint32; + description + "The index of the label restriction list entry."; + } + container label-start { + must "(not(../label-end/te-label/direction) and" + + " not(te-label/direction))" + + " or " + + "(../label-end/te-label/direction = te-label/direction)" + + " or " + + "(not(te-label/direction) and" + + " (../label-end/te-label/direction = 'forward'))" + + " or " + + "(not(../label-end/te-label/direction) and" + + " (te-label/direction = 'forward'))" { + error-message "'label-start' and 'label-end' must have the " + + "same direction."; + } + description + "This is the starting label if a label range is specified. + This is the label value if a single label is specified, + in which case the 'label-end' attribute is not set."; + uses te-label; + } + container label-end { + must "(not(../label-start/te-label/direction) and" + + " not(te-label/direction))" + + " or " + + "(../label-start/te-label/direction = te-label/direction)" + + " or " + + "(not(te-label/direction) and" + + " (../label-start/te-label/direction = 'forward'))" + + " or " + + "(not(../label-start/te-label/direction) and" + + " (te-label/direction = 'forward'))" { + error-message "'label-start' and 'label-end' must have the " + + "same direction."; + } + description + "This is the ending label if a label range is specified. + This attribute is not set if a single label is specified."; + uses te-label; + } + container label-step { + description + "The step increment between labels in the label range. + + The label start/end values MUST be consistent with the sign + of label step. + + For example: + 'label-start' < 'label-end' enforces 'label-step' > 0 + 'label-start' > 'label-end' enforces 'label-step' < 0."; + choice technology { + default "generic"; + description + "Data-plane technology type."; + case generic { + leaf generic { + type int32; + default "1"; + description + "Label range step."; + } + } + } + } + leaf range-bitmap { + type yang:hex-string; + description + "When there are gaps between 'label-start' and 'label-end', + this attribute is used to specify the positions + of the used labels. + + This is represented in big endian as 'hex-string'. + + In case the restriction is 'inclusive', the bit-position is + set if the corresponding mapped label is available. + In this case, if the range-bitmap is not present, all the + labels in the range are available. + + In case the restriction is 'exclusive', the bit-position is + set if the corresponding mapped label is not available. + In this case, if the range-bitmap is not present, all the + labels in the range are not available. + + The most significant byte in the hex-string is the farthest + to the left in the byte sequence. + + Leading zero bytes in the configured value may be omitted + for brevity. + + Each bit position in the 'range-bitmap' 'hex-string' maps + to a label in the range derived from 'label-start'. + + For example, assuming that 'label-start' = 16000 and + 'range-bitmap' = 0x01000001, then: + - bit position (0) is set, and the corresponding mapped + label from the range is 16000 + (0 * 'label-step') or + 16000 for default 'label-step' = 1. + - bit position (24) is set, and the corresponding mapped + label from the range is 16000 + (24 * 'label-step') or + 16024 for default 'label-step' = 1."; + } + } + + grouping label-set-info { + description + "Grouping for the list of label restrictions specifying what + labels may or may not be used."; + container label-restrictions { + description + "The label restrictions container."; + list label-restriction { + key "index"; + description + "The absence of the label restrictions container implies + that all labels are acceptable; otherwise, only restricted + labels are available."; + reference + "RFC 7579: General Network Element Constraint Encoding + for GMPLS-Controlled Networks"; + uses label-restriction-info; + } + } + } + + grouping optimization-metric-entry { + description + "Optimization metrics configuration grouping."; + leaf metric-type { + type identityref { + base path-metric-optimization-type; + } + description + "Identifies the 'metric-type' that the path computation + process uses for optimization."; + } + leaf weight { + type uint8; + default "1"; + description + "TE path metric normalization weight."; + } + container explicit-route-exclude-objects { + when "../metric-type = " + + "'te-types:path-metric-optimize-excludes'"; + description + "Container for the 'exclude route' object list."; + uses path-route-exclude-objects; + } + container explicit-route-include-objects { + when "../metric-type = " + + "'te-types:path-metric-optimize-includes'"; + description + "Container for the 'include route' object list."; + uses path-route-include-objects; + } + } + + grouping common-constraints { + description + "Common constraints grouping that can be set on + a constraint set or directly on the tunnel."; + uses te-bandwidth { + description + "A requested bandwidth to use for path computation."; + } + leaf link-protection { + type identityref { + base link-protection-type; + } + default "te-types:link-protection-unprotected"; + description + "Link protection type required for the links included + in the computed path."; + reference + "RFC 4202: Routing Extensions in Support of + Generalized Multi-Protocol Label Switching + (GMPLS)"; + } + leaf setup-priority { + type uint8 { + range "0..7"; + } + default "7"; + description + "TE LSP requested setup priority."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels"; + } + leaf hold-priority { + type uint8 { + range "0..7"; + } + default "7"; + description + "TE LSP requested hold priority."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels"; + } + leaf signaling-type { + type identityref { + base path-signaling-type; + } + default "te-types:path-setup-rsvp"; + description + "TE tunnel path signaling type."; + } + } + + grouping tunnel-constraints { + description + "Tunnel constraints grouping that can be set on + a constraint set or directly on the tunnel."; + leaf network-id { + type nw:network-id; + description + "The network topology identifier."; + } + uses te-topology-identifier; + uses common-constraints; + } + + grouping path-constraints-route-objects { + description + "List of route entries to be included or excluded when + performing the path computation."; + container explicit-route-objects { + description + "Container for the explicit route object lists."; + list route-object-exclude-always { + key "index"; + ordered-by user; + description + "List of route objects to always exclude from the path + computation."; + leaf index { + type uint32; + description + "Explicit Route Object index. + + The index is used to identify an entry in the list. + + The order of entries is defined by the user without + relying on key values."; + } + uses explicit-route-hop; + } + list route-object-include-exclude { + key "index"; + ordered-by user; + description + "List of route objects to include or exclude in the path + computation."; + leaf explicit-route-usage { + type identityref { + base route-usage-type; + } + default "te-types:route-include-object"; + description + "Indicates whether to include or exclude the + route object. + + The default is to include it."; + } + leaf index { + type uint32; + description + "Route object include-exclude index. + + The index is used to identify an entry in the list. + + The order of entries is defined by the user without + relying on key values."; + } + uses explicit-route-hop-with-srlg; + } + } + } + + grouping path-route-include-objects { + description + "List of route objects to be included when performing + the path computation."; + list route-object-include-object { + key "index"; + ordered-by user; + description + "List of Explicit Route Objects to be included in the + path computation."; + leaf index { + type uint32; + description + "Route object entry index. + + The index is used to identify an entry in the list. + + The order of entries is defined by the user without + relying on key values."; + } + uses explicit-route-hop; + } + } + + grouping path-route-exclude-objects { + description + "List of route objects to be excluded when performing + the path computation."; + list route-object-exclude-object { + key "index"; + ordered-by user; + description + "List of Explicit Route Objects to be excluded in the + path computation."; + leaf index { + type uint32; + description + "Route object entry index. + + The index is used to identify an entry in the list. + + The order of entries is defined by the user without + relying on key values."; + } + uses explicit-route-hop-with-srlg; + } + } + + grouping generic-path-metric-bounds { + description + "TE path metric bounds grouping."; + container path-metric-bounds { + description + "Top-level container for the list of path metric bounds."; + list path-metric-bound { + key "metric-type"; + description + "List of path metric bounds, which can apply to link and + path metrics. + + TE paths which have at least one path metric which + exceeds the specified bounds MUST NOT be selected. + + TE paths that traverse TE links which have at least one + link metric which exceeds the specified bounds MUST NOT + be selected."; + leaf metric-type { + type identityref { + base link-path-metric-type; + } + description + "Identifies an entry in the list of 'metric-type' items + bound for the TE path."; + } + leaf upper-bound { + type uint64; + default "0"; + description + "Upper bound on the specified 'metric-type'. + + A zero indicates an unbounded upper limit for the + specified 'metric-type'. + + The unit of is interpreted in the context of the + 'metric-type' identity."; + } + } + } + } + + grouping generic-path-optimization { + description + "TE generic path optimization grouping."; + container optimizations { + description + "The objective function container that includes + attributes to impose when computing a TE path."; + choice algorithm { + description + "Optimizations algorithm."; + case metric { + if-feature "path-optimization-metric"; + /* Optimize by metric */ + list optimization-metric { + key "metric-type"; + description + "TE path metric type."; + uses optimization-metric-entry; + } + /* Tiebreakers */ + container tiebreakers { + status deprecated; + description + "Container for the list of tiebreakers. + + This container has been deprecated by the tiebreaker + leaf."; + list tiebreaker { + key "tiebreaker-type"; + status deprecated; + description + "The list of tiebreaker criteria to apply on an + equally favored set of paths, in order to pick + the best."; + leaf tiebreaker-type { + type identityref { + base path-metric-type; + } + status deprecated; + description + "Identifies an entry in the list of tiebreakers."; + } + } + } + } + case objective-function { + if-feature "path-optimization-objective-function"; + /* Objective functions */ + container objective-function { + description + "The objective function container that includes + attributes to impose when computing a TE path."; + leaf objective-function-type { + type identityref { + base objective-function-type; + } + default "te-types:of-minimize-cost-path"; + description + "Objective function entry."; + } + } + } + } + } + leaf tiebreaker { + type identityref { + base path-tiebreaker-type; + } + default "te-types:path-tiebreaker-random"; + description + "The tiebreaker criteria to apply on an equally favored set + of paths, in order to pick the best."; + } + } + + grouping generic-path-affinities { + description + "Path affinities grouping."; + container path-affinities-values { + description + "Path affinities represented as values."; + list path-affinities-value { + key "usage"; + description + "List of named affinity constraints."; + leaf usage { + type identityref { + base resource-affinities-type; + } + description + "Identifies an entry in the list of value affinity + constraints."; + } + leaf value { + type admin-groups; + default ""; + description + "The affinity value. + + The default is empty."; + } + } + } + container path-affinity-names { + description + "Path affinities represented as names."; + list path-affinity-name { + key "usage"; + description + "List of named affinity constraints."; + leaf usage { + type identityref { + base resource-affinities-type; + } + description + "Identifies an entry in the list of named affinity + constraints."; + } + list affinity-name { + key "name"; + description + "List of named affinities."; + leaf name { + type string; + description + "Identifies a named affinity entry."; + } + } + } + } + } + + grouping generic-path-srlgs { + description + "Path Shared Risk Link Group (SRLG) grouping."; + container path-srlgs-lists { + description + "Path Shared Risk Link Group (SRLG) properties container."; + list path-srlgs-list { + key "usage"; + description + "List of Shared Risk Link Group (SRLG) values to be + included or excluded."; + leaf usage { + type identityref { + base route-usage-type; + } + description + "Identifies an entry in a list of Shared Risk Link Groups + (SRLGs) to either include or exclude."; + } + leaf-list values { + type srlg; + description + "List of Shared Risk Link Group (SRLG) values."; + } + } + } + container path-srlgs-names { + description + "Container for the list of named Shared Risk Link Groups + (SRLGs)."; + list path-srlgs-name { + key "usage"; + description + "List of named Shared Risk Link Groups (SRLGs) to be + included or excluded."; + leaf usage { + type identityref { + base route-usage-type; + } + description + "Identifies an entry in a list of named Shared Risk Link + Groups (SRLGs) to either include or exclude."; + } + leaf-list names { + type string; + description + "List of named Shared Risk Link Groups (SRLGs)."; + } + } + } + } + + grouping generic-path-disjointness { + description + "Path disjointness grouping."; + leaf disjointness { + type te-path-disjointness; + description + "The type of resource disjointness. + When configured for a primary path, the disjointness level + applies to all secondary LSPs. + + When configured for a secondary path, the disjointness + level overrides the level configured for the primary path."; + } + } + + grouping common-path-constraints-attributes { + description + "Common path constraints configuration grouping."; + uses common-constraints; + uses generic-path-metric-bounds; + uses generic-path-affinities; + uses generic-path-srlgs; + } + + grouping generic-path-constraints { + description + "Global named path constraints configuration grouping."; + container path-constraints { + description + "TE named path constraints container."; + uses common-path-constraints-attributes; + uses generic-path-disjointness; + } + } + + grouping generic-path-properties { + description + "TE generic path properties grouping."; + container path-properties { + config false; + description + "The TE path properties."; + list path-metric { + key "metric-type"; + description + "TE path metric type."; + leaf metric-type { + type identityref { + base path-metric-type; + } + description + "TE path metric type."; + } + leaf accumulative-value { + type uint64; + description + "TE path metric accumulative value."; + } + } + uses generic-path-affinities; + uses generic-path-srlgs; + container path-route-objects { + description + "Container for the list of route objects either returned by + the computation engine or actually used by an LSP."; + list path-route-object { + key "index"; + ordered-by user; + description + "List of route objects either returned by the computation + engine or actually used by an LSP."; + leaf index { + type uint32; + description + "Route object entry index. + + The index is used to identify an entry in the list. + + The order of entries is defined by the user without + relying on key values."; + } + uses explicit-route-hop; + } + } + } + } + + grouping encoding-and-switching-type { + description + "Common grouping to define the LSP encoding and + switching types"; + leaf encoding { + type identityref { + base te-types:lsp-encoding-types; + } + description + "LSP encoding type."; + reference + "RFC 3945: Generalized Multi-Protocol Label Switching (GMPLS) + Architecture"; + } + leaf switching-type { + type identityref { + base te-types:switching-capabilities; + } + description + "LSP switching type."; + reference + "RFC 3945: Generalized Multi-Protocol Label Switching (GMPLS) + Architecture"; + } + } + + grouping te-generic-node-id { + description + "A reusable grouping for a TE generic node identifier."; + leaf id { + type union { + type te-node-id; + type inet:ip-address; + type nw:node-id; + } + description + "The identifier of the node. + + It can be represented as IP address or dotted quad address + or as an URI. + + The type data node disambiguates the union type."; + } + leaf type { + type enumeration { + enum ip { + description + "IP address representation of the node identifier."; + } + enum te-id { + description + "TE identifier of the node"; + } + enum node-id { + description + "URI representation of the node identifier."; + } + } + description + "Type of node identifier representation."; + } + } +} \ No newline at end of file diff --git a/src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-teas-yang-te-34/ietf-te-device.yang b/src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-teas-yang-te-34/ietf-te-device.yang new file mode 100644 index 000000000..f788fa2ea --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-teas-yang-te-34/ietf-te-device.yang @@ -0,0 +1,595 @@ +module ietf-te-device { + yang-version 1.1; + namespace "urn:ietf:params:xml:ns:yang:ietf-te-device"; + + /* Replace with IANA when assigned */ + + prefix te-dev; + + /* Import TE module */ + + import ietf-te { + prefix te; + reference + "RFCXXXX: A YANG Data Model for Traffic Engineering + Tunnels and Interfaces"; + } + + /* Import TE types */ + + import ietf-te-types { + prefix te-types; + reference + "draft-ietf-teas-rfc8776-update: Common YANG Data Types + for Traffic Engineering."; + } + import ietf-interfaces { + prefix if; + reference + "RFC8343: A YANG Data Model for Interface Management"; + } + import ietf-routing-types { + prefix rt-types; + reference + "RFC8294: Common YANG Data Types for the Routing Area"; + } + + organization + "IETF Traffic Engineering Architecture and Signaling (TEAS) + Working Group"; + contact + "WG Web: + WG List: + + Editor: Tarek Saad + + + Editor: Rakesh Gandhi + + + Editor: Vishnu Pavan Beeram + + + Editor: Himanshu Shah + + + Editor: Xufeng Liu + + + Editor: Igor Bryskin + + + Editor: Oscar Gonzalez de Dios + "; + + description + "This module defines a data model for TE device configurations, + state, and RPCs. The model fully conforms to the + Network Management Datastore Architecture (NMDA). + + Copyright (c) 2023 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject to + the license terms contained in, the Revised BSD License set + forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC XXXX + (https://www.rfc-editor.org/info/rfcXXXX); see the RFC itself + for full legal notices."; + + // RFC Ed.: replace XXXX with actual RFC number and remove this + // note. + // RFC Ed.: update the date below with the date of RFC publication + // and remove this note. + + revision 2024-02-02 { + description + "Initial revision for the TE device YANG module."; + reference + "RFCXXXX: A YANG Data Model for Traffic Engineering Tunnels + and Interfaces"; + } + + grouping lsp-device-timers { + description + "Device TE LSP timers configs."; + leaf lsp-install-interval { + type uint32; + units "seconds"; + description + "TE LSP installation delay time."; + } + leaf lsp-cleanup-interval { + type uint32; + units "seconds"; + description + "TE LSP cleanup delay time."; + } + leaf lsp-invalidation-interval { + type uint32; + units "seconds"; + description + "TE LSP path invalidation before taking action delay time."; + } + } + + grouping te-igp-flooding-bandwidth-config { + description + "Configurable items for igp flooding bandwidth + threshold configuration."; + leaf threshold-type { + type enumeration { + enum delta { + description + "'delta' indicates that the local + system should flood IGP updates when a + change in reserved bandwidth >= the specified + delta occurs on the interface."; + } + enum threshold-crossed { + description + "THRESHOLD-CROSSED indicates that + the local system should trigger an update (and + hence flood) the reserved bandwidth when the + reserved bandwidth changes such that it crosses, + or becomes equal to one of the threshold values."; + } + } + description + "The type of threshold that should be used to specify the + values at which bandwidth is flooded. 'delta' indicates that + the local system should flood IGP updates when a change in + reserved bandwidth >= the specified delta occurs on the + interface. Where 'threshold-crossed' is specified, the local + system should trigger an update (and hence flood) the + reserved bandwidth when the reserved bandwidth changes such + that it crosses, or becomes equal to one of the threshold + values."; + } + leaf delta-percentage { + when "../threshold-type = 'delta'" { + description + "The percentage delta can only be specified when the + threshold type is specified to be a percentage delta of + the reserved bandwidth."; + } + type rt-types:percentage; + description + "The percentage of the maximum-reservable-bandwidth + considered as the delta that results in an IGP update + being flooded."; + } + leaf threshold-specification { + when "../threshold-type = 'threshold-crossed'" { + description + "The selection of whether mirrored or separate threshold + values are to be used requires user specified thresholds + to be set."; + } + type enumeration { + enum mirrored-up-down { + description + "mirrored-up-down indicates that a single set of + threshold values should be used for both increasing + and decreasing bandwidth when determining whether + to trigger updated bandwidth values to be flooded + in the IGP TE extensions."; + } + enum separate-up-down { + description + "separate-up-down indicates that a separate + threshold values should be used for the increasing + and decreasing bandwidth when determining whether + to trigger updated bandwidth values to be flooded + in the IGP TE extensions."; + } + } + description + "This value specifies whether a single set of threshold + values should be used for both increasing and decreasing + bandwidth when determining whether to trigger updated + bandwidth values to be flooded in the IGP TE extensions. + 'mirrored-up-down' indicates that a single value (or set of + values) should be used for both increasing and decreasing + values, where 'separate-up-down' specifies that the + increasing and decreasing values will be separately + specified."; + } + leaf-list up-thresholds { + when "../threshold-type = 'threshold-crossed'" + + "and ../threshold-specification = 'separate-up-down'" { + description + "A list of up-thresholds can only be specified when the + bandwidth update is triggered based on crossing a + threshold and separate up and down thresholds are + required."; + } + type rt-types:percentage; + description + "The thresholds (expressed as a percentage of the maximum + reservable bandwidth) at which bandwidth updates are to be + triggered when the bandwidth is increasing."; + } + leaf-list down-thresholds { + when "../threshold-type = 'threshold-crossed'" + + "and ../threshold-specification = 'separate-up-down'" { + description + "A list of down-thresholds can only be specified when the + bandwidth update is triggered based on crossing a + threshold and separate up and down thresholds are + required."; + } + type rt-types:percentage; + description + "The thresholds (expressed as a percentage of the maximum + reservable bandwidth) at which bandwidth updates are to be + triggered when the bandwidth is decreasing."; + } + leaf-list up-down-thresholds { + when "../threshold-type = 'threshold-crossed'" + + "and ../threshold-specification = 'mirrored-up-down'" { + description + "A list of thresholds corresponding to both increasing + and decreasing bandwidths can be specified only when an + update is triggered based on crossing a threshold, and + the same up and down thresholds are required."; + } + type rt-types:percentage; + description + "The thresholds (expressed as a percentage of the maximum + reservable bandwidth of the interface) at which bandwidth + updates are flooded - used both when the bandwidth is + increasing and decreasing."; + } + } + + /** + * TE device augmentations + */ + augment "/te:te" { + description + "TE global container."; + /* TE Interface Configuration Data */ + container interfaces { + description + "Configuration data model for TE interfaces."; + uses te-igp-flooding-bandwidth-config; + list interface { + key "name"; + description + "The list of interfaces enabled for TE."; + leaf name { + type if:interface-ref; + description + "The reference to interface enabled for TE."; + } + /* TE interface parameters */ + leaf te-metric { + type te-types:te-metric; + description + "TE interface metric."; + } + choice admin-group-type { + description + "TE interface administrative groups + representation type."; + case value-admin-groups { + choice value-admin-group-type { + description + "choice of admin-groups."; + case admin-groups { + description + "Administrative group/Resource + class/Color."; + leaf admin-group { + type te-types:admin-group; + description + "TE interface administrative group."; + } + } + case extended-admin-groups { + if-feature "te-types:extended-admin-groups"; + description + "Extended administrative group/Resource + class/Color."; + leaf extended-admin-group { + type te-types:extended-admin-group; + description + "TE interface extended administrative group."; + } + } + } + } + case named-admin-groups { + list named-admin-groups { + if-feature "te-types:extended-admin-groups"; + if-feature "te-types:named-extended-admin-groups"; + key "named-admin-group"; + description + "A list of named admin-group entries."; + leaf named-admin-group { + type leafref { + path "../../../../te:globals/" + + "te:named-admin-groups/te:named-admin-group/" + + "te:name"; + } + description + "A named admin-group entry."; + } + } + } + } + choice srlg-type { + description + "Choice of SRLG configuration."; + case value-srlgs { + list values { + key "value"; + description + "List of SRLG values that + this link is part of."; + leaf value { + type uint32 { + range "0..4294967295"; + } + description + "Value of the SRLG"; + } + } + } + case named-srlgs { + list named-srlgs { + if-feature "te-types:named-srlg-groups"; + key "named-srlg"; + description + "A list of named SRLG entries."; + leaf named-srlg { + type leafref { + path "../../../../te:globals/" + + "te:named-srlgs/te:named-srlg/te:name"; + } + description + "A named SRLG entry."; + } + } + } + } + uses te-igp-flooding-bandwidth-config; + list switching-capabilities { + key "switching-capability"; + description + "List of interface capabilities for this interface."; + leaf switching-capability { + type identityref { + base te-types:switching-capabilities; + } + description + "Switching Capability for this interface."; + } + leaf encoding { + type identityref { + base te-types:lsp-encoding-types; + } + description + "Encoding supported by this interface."; + } + } + container te-advertisements-state { + config false; + description + "TE interface advertisements state container."; + leaf flood-interval { + type uint32; + description + "The periodic flooding interval."; + } + leaf last-flooded-time { + type uint32; + units "seconds"; + description + "Time elapsed since last flooding in seconds."; + } + leaf next-flooded-time { + type uint32; + units "seconds"; + description + "Time remained for next flooding in seconds."; + } + leaf last-flooded-trigger { + type enumeration { + enum link-up { + description + "Link-up flooding trigger."; + } + enum link-down { + description + "Link-down flooding trigger."; + } + enum threshold-up { + description + "Bandwidth reservation up threshold."; + } + enum threshold-down { + description + "Bandwidth reservation down threshold."; + } + enum bandwidth-change { + description + "Bandwidth capacity change."; + } + enum user-initiated { + description + "Initiated by user."; + } + enum srlg-change { + description + "SRLG property change."; + } + enum periodic-timer { + description + "Periodic timer expired."; + } + } + default "periodic-timer"; + description + "Trigger for the last flood."; + } + list advertised-level-areas { + key "level-area"; + description + "List of level-areas that the TE interface is + advertised in."; + leaf level-area { + type uint32; + description + "The IGP area or level where the TE interface link + state is advertised in."; + } + } + } + } + } + } + + /* TE globals device augmentation */ + + augment "/te:te/te:globals" { + description + "Global TE device specific configuration parameters."; + uses lsp-device-timers; + } + + /* TE tunnels device configuration augmentation */ + + augment "/te:te/te:tunnels/te:tunnel" { + description + "Tunnel device dependent augmentation."; + leaf path-invalidation-action { + type identityref { + base te-types:path-invalidation-action-type; + } + description + "Tunnel path invalidation action."; + } + uses lsp-device-timers; + } + + /* TE LSPs device state augmentation */ + + augment "/te:te/te:lsps/te:lsp" { + description + "TE LSP device dependent augmentation."; + container lsp-timers { + when "../te:origin-type = 'ingress'" { + description + "Applicable to ingress LSPs only."; + } + description + "Ingress LSP timers."; + leaf uptime { + type uint32; + units "seconds"; + description + "The LSP uptime."; + } + leaf time-to-install { + type uint32; + units "seconds"; + description + "The time remaining for a new LSP to be instantiated + in forwarding to carry traffic."; + } + leaf time-to-destroy { + type uint32; + units "seconds"; + description + "The time remaining for a existing LSP to be deleted + from forwarding."; + } + } + container downstream-info { + when "../te:origin-type != 'egress'" { + description + "Downstream information of the LSP."; + } + description + "downstream information."; + leaf nhop { + type te-types:te-tp-id; + description + "downstream next-hop address."; + } + leaf outgoing-interface { + type if:interface-ref; + description + "downstream interface."; + } + container neighbor { + uses te-types:te-generic-node-id; + description + "downstream neighbor address."; + } + leaf label { + type rt-types:generalized-label; + description + "downstream label."; + } + } + container upstream-info { + when "../te:origin-type != 'ingress'" { + description + "Upstream information of the LSP."; + } + description + "upstream information."; + leaf phop { + type te-types:te-tp-id; + description + "upstream next-hop or previous-hop address."; + } + container neighbor { + uses te-types:te-generic-node-id; + description + "upstream neighbor address."; + } + leaf label { + type rt-types:generalized-label; + description + "upstream label."; + } + } + } + + /* TE interfaces RPCs/execution Data */ + + rpc link-state-update { + description + "Triggers a link state update for the specific interface."; + input { + choice filter-type { + mandatory true; + description + "Filter choice."; + case match-all { + leaf all { + type empty; + mandatory true; + description + "Match all TE interfaces."; + } + } + case match-one-interface { + leaf interface { + type if:interface-ref; + description + "Match a specific TE interface."; + } + } + } + } + } +} \ No newline at end of file diff --git a/src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-teas-yang-te-34/ietf-te.yang b/src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-teas-yang-te-34/ietf-te.yang new file mode 100644 index 000000000..48b160305 --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/yang/draft-ietf-teas-yang-te-34/ietf-te.yang @@ -0,0 +1,1516 @@ +module ietf-te { + yang-version 1.1; + namespace "urn:ietf:params:xml:ns:yang:ietf-te"; + + /* Replace with IANA when assigned */ + + prefix te; + + /* Import TE generic types */ + import ietf-te-types { + prefix te-types; + reference + "draft-ietf-teas-rfc8776-update: Common YANG Data Types + for Traffic Engineering."; + } + import ietf-yang-types { + prefix yang; + reference + "RFC6991: Common YANG Data Types."; + } + + import ietf-network { + prefix "nw"; + reference "RFC 8345: A YANG Data Model for Network Topologies"; + } + + import ietf-network-topology { + prefix "nt"; + reference "RFC 8345: A YANG Data Model for Network Topologies"; + } + + organization + "IETF Traffic Engineering Architecture and Signaling (TEAS) + Working Group."; + contact + "WG Web: + WG List: + + Editor: Tarek Saad + + + Editor: Rakesh Gandhi + + + Editor: Vishnu Pavan Beeram + + + Editor: Himanshu Shah + + + Editor: Xufeng Liu + + + Editor: Igor Bryskin + + + Editor: Oscar Gonzalez de Dios + "; + + description + "YANG data module for TE configuration, state, and RPCs. + The model fully conforms to the Network Management + Datastore Architecture (NMDA). + + Copyright (c) 2023 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject to + the license terms contained in, the Revised BSD License set + forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC XXXX + (https://www.rfc-editor.org/info/rfcXXXX); see the RFC itself + for full legal notices."; + + // RFC Ed.: replace XXXX with actual RFC number and remove this + // note. + // RFC Ed.: update the date below with the date of RFC publication + // and remove this note. + + revision 2024-02-02 { + description + "Initial revision for the TE generic YANG module."; + reference + "RFCXXXX: A YANG Data Model for Traffic Engineering Tunnels + and Interfaces."; + } + + typedef tunnel-ref { + type leafref { + path "/te:te/te:tunnels/te:tunnel/te:name"; + require-instance false; + } + description + "This type is used by data models that need to reference + configured TE tunnel."; + } + + /** + * TE tunnel generic groupings + */ + + grouping path-common-properties { + description + "Common path attributes."; + leaf name { + type string; + description + "TE path name."; + } + leaf path-computation-method { + type identityref { + base te-types:path-computation-method; + } + default "te-types:path-locally-computed"; + description + "The method used for computing the path, either + locally computed, queried from a server or not + computed at all (explicitly configured)."; + } + container path-computation-server { + when "derived-from-or-self(../path-computation-method, " + + "'te-types:path-externally-queried')" { + description + "The path-computation server when the path is + externally queried."; + } + uses te-types:te-generic-node-id; + description + "Address of the external path computation + server."; + } + leaf compute-only { + type empty; + description + "When present, the path is computed and updated whenever + the topology is updated. No resources are committed + or reserved in the network."; + } + leaf use-path-computation { + when "derived-from-or-self(../path-computation-method, " + + "'te-types:path-locally-computed')"; + type boolean; + default "true"; + description + "When 'true' indicates the path is dynamically computed + and/or validated against the Traffic-Engineering Database + (TED), and when 'false' indicates no path expansion or + validation against the TED is required."; + } + leaf lockdown { + type empty; + description + "When present, indicates no reoptimization to be attempted + for this path."; + } + leaf path-scope { + type identityref { + base te-types:path-scope-type; + } + default "te-types:path-scope-end-to-end"; + config false; + description + "Indicates whether the path is a segment or portion of + of the full path., or is the an end-to-end path for + the TE Tunnel."; + } + } + + /* This grouping is re-used in path-computation rpc */ + grouping path-compute-info { + description + "Attributes used for path computation request."; + uses tunnel-associations-properties; + uses te-types:generic-path-optimization; + leaf named-path-constraint { + if-feature "te-types:named-path-constraints"; + type leafref { + path "/te:te/te:globals/te:named-path-constraints/" + + "te:named-path-constraint/te:name"; + } + description + "Reference to a globally defined named path constraint set."; + } + uses path-constraints-common; + } + + /* This grouping is re-used in path-computation rpc */ + grouping path-forward-properties { + description + "The path preference."; + leaf preference { + type uint8 { + range "1..255"; + } + default "1"; + description + "Specifies a preference for this path. The lower the number + higher the preference."; + } + leaf co-routed { + when "/te:te/te:tunnels/te:tunnel/te:bidirectional = 'true'" { + description + "Applicable to bidirectional tunnels only."; + } + type boolean; + default "false"; + description + "Indicates whether the reverse path must to be co-routed + with the primary."; + } + } + + /* This grouping is re-used in path-computation rpc */ + grouping k-requested-paths { + description + "The k-shortest paths requests."; + leaf k-requested-paths { + type uint8; + default "1"; + description + "The number of k-shortest-paths requested from the path + computation server and returned sorted by its optimization + objective."; + } + } + + grouping path-state { + description + "TE per path state parameters."; + uses path-computation-response; + container lsp-provisioning-error-infos { + config false; + description + "LSP provisioning error information."; + list lsp-provisioning-error-info { + description + "List of LSP provisioning error info entries."; + leaf error-reason { + type identityref { + base te-types:lsp-provisioning-error-reason; + } + description + "LSP provision error type."; + } + leaf error-description { + type string; + description + "The textual representation of the error occurred during + path computation."; + } + leaf error-timestamp { + type yang:date-and-time; + description + "Timestamp of when the reported error occurred."; + } + leaf error-node-id { + type te-types:te-node-id; + description + "Node identifier of node where error occurred."; + } + leaf error-link-id { + type te-types:te-tp-id; + description + "Link ID where the error occurred."; + } + leaf lsp-id { + type uint16; + description + "The LSP-ID for which path computation was performed."; + } + } + } + container lsps { + config false; + description + "The TE LSPs container."; + list lsp { + key "node lsp-id"; + description + "List of LSPs associated with the tunnel."; + leaf tunnel-name { + type leafref { + path "/te:te/te:lsps/te:lsp/te:tunnel-name"; + } + description "TE tunnel name."; + } + leaf node { + type leafref { + path "/te:te/te:lsps/te:lsp[tunnel-name=" + + "current()/../te:tunnel-name][lsp-id=" + + "current()/../te:lsp-id]/te:node"; + } + description "The node where the LSP state resides on."; + } + leaf lsp-id { + type leafref { + path "/te:te/te:lsps/te:lsp[tunnel-name=" + + "current()/../tunnel-name]/te:lsp-id"; + } + description "The TE LSP identifier."; + } + } + } + } + + /* This grouping is re-used in path-computation rpc */ + grouping path-computation-response { + description + "Attributes reported by path computation response."; + container computed-paths-properties { + config false; + description + "Computed path properties container."; + list computed-path-properties { + key "k-index"; + description + "List of computed paths."; + leaf k-index { + type uint8; + description + "The k-th path returned from the computation server. + A lower k value path is more optimal than higher k + value path(s)"; + } + uses te-types:generic-path-properties { + augment "path-properties" { + description + "additional path properties returned by path + computation."; + uses te-types:te-bandwidth; + leaf disjointness-type { + type te-types:te-path-disjointness; + config false; + description + "The type of resource disjointness. + When reported for a primary path, it represents the + minimum level of disjointness of all the secondary + paths. When reported for a secondary path, it + represents the disjointness of the secondary path."; + } + } + } + } + } + container computed-path-error-infos { + config false; + description + "Path computation information container."; + list computed-path-error-info { + description + "List of path computation info entries."; + leaf error-description { + type string; + description + "Textual representation of the error that occurred + during path computation."; + } + leaf error-timestamp { + type yang:date-and-time; + description + "Timestamp of last path computation attempt."; + } + leaf error-reason { + type identityref { + base te-types:path-computation-error-reason; + } + description + "Reason for the path computation error."; + } + } + } + } + + grouping protection-restoration-properties { + description + "Protection and restoration parameters."; + container protection { + description + "Protection parameters."; + leaf protection-type { + type identityref { + base te-types:lsp-protection-type; + } + default "te-types:lsp-protection-unprotected"; + description + "LSP protection type."; + } + leaf protection-reversion-disable { + type boolean; + default "false"; + description + "Disable protection reversion to working path."; + } + leaf hold-off-time { + type uint32; + units "milli-seconds"; + description + "The time between the declaration of an SF or SD condition + and the initialization of the protection switching + algorithm."; + reference + "RFC4427"; + } + leaf wait-to-revert { + type uint16; + units "seconds"; + description + "Time to wait before attempting LSP reversion."; + reference + "RFC4427"; + } + leaf aps-signal-id { + type uint8 { + range "1..255"; + } + default "1"; + description + "The APS signal number used to reference the traffic of + this tunnel. The default value for normal traffic is 1. + The default value for extra-traffic is 255. If not + specified, non-default values can be assigned by the + server, if and only if, the server controls both + endpoints."; + reference + "ITU_G.808.1"; + } + } + container restoration { + description + "Restoration parameters."; + leaf restoration-type { + type identityref { + base te-types:lsp-restoration-type; + } + description + "LSP restoration type."; + } + leaf restoration-scheme { + type identityref { + base te-types:restoration-scheme-type; + } + description + "LSP restoration scheme."; + } + leaf restoration-reversion-disable { + type boolean; + default "false"; + description + "Disable restoration reversion to working path."; + } + leaf hold-off-time { + type uint32; + units "milli-seconds"; + description + "The time between the declaration of an SF or SD condition + and the initialization of the protection switching + algorithm."; + reference + "RFC4427"; + } + leaf wait-to-restore { + type uint16; + units "seconds"; + description + "Time to wait before attempting LSP restoration."; + reference + "RFC4427"; + } + leaf wait-to-revert { + type uint16; + units "seconds"; + description + "Time to wait before attempting LSP reversion."; + reference + "RFC4427"; + } + } + } + + grouping tunnel-associations-properties { + description + "TE tunnel association grouping."; + container association-objects { + description + "TE tunnel associations."; + list association-object { + key "association-key"; + unique "type id source/id source/type"; + description + "List of association base objects."; + reference + "RFC4872"; + leaf association-key { + type string; + description + "Association key used to identify a specific + association in the list"; + } + leaf type { + type identityref { + base te-types:association-type; + } + description + "Association type."; + reference + "RFC4872"; + } + leaf id { + type uint16; + description + "Association identifier."; + reference + "RFC4872"; + } + container source { + uses te-types:te-generic-node-id; + description + "Association source."; + reference + "RFC4872"; + } + } + list association-object-extended { + key "association-key"; + unique + "type id source/id source/type global-source extended-id"; + description + "List of extended association objects."; + reference + "RFC6780"; + leaf association-key { + type string; + description + "Association key used to identify a specific + association in the list"; + } + leaf type { + type identityref { + base te-types:association-type; + } + description + "Association type."; + reference + "RFC4872, RFC6780"; + } + leaf id { + type uint16; + description + "Association identifier."; + reference + "RFC4872, RFC6780"; + } + container source { + uses te-types:te-generic-node-id; + description + "Association source."; + reference + "RFC4872, RFC6780"; + } + leaf global-source { + type uint32; + description + "Association global source."; + reference + "RFC6780"; + } + leaf extended-id { + type yang:hex-string; + description + "Association extended identifier."; + reference + "RFC6780"; + } + } + } + } + + grouping tunnel-end-point { + description + "Common grouping used to specify the tunnel source and + destination end-points."; + leaf node-id { + type nw:node-id; + description + "The TE tunnel end-point node identifier"; + } + leaf te-node-id { + type te-types:te-node-id; + description + "The TE tunnel end-point TE node identifier"; + } + leaf tunnel-tp-id { + when "../node-id or ../te-node-id" { + description + "The TE tunnel termination point identifier is local to + a node"; + } + type binary; + description + "The TE tunnel end-point TE tunnel termination point + identifier"; + } + } + + /* This grouping is re-used in path-computation rpc */ + grouping tunnel-common-attributes { + description + "Common grouping to define the TE tunnel parameters"; + container source { + description + "TE tunnel source end-point."; + uses tunnel-end-point; + } + container destination { + description + "TE tunnel destination end-point."; + uses tunnel-end-point; + } + leaf bidirectional { + type boolean; + default "false"; + description + "Indicates a bidirectional tunnel"; + } + } + + /* This grouping is re-used in path-computation rpc */ + grouping tunnel-hierarchy-properties { + description + "A grouping for TE tunnel hierarchy information."; + container hierarchy { + description + "Container for TE hierarchy related information."; + container dependency-tunnels { + description + "List of tunnels that this tunnel can be potentially + dependent on."; + list dependency-tunnel { + key "name"; + description + "A tunnel entry that this tunnel can potentially depend + on."; + leaf name { + type tunnel-ref; + description + "Dependency tunnel name. The tunnel may not have been + instantiated yet."; + } + uses te-types:encoding-and-switching-type; + } + } + container hierarchical-link { + description + "Identifies a hierarchical link (in client layer) + that this tunnel is associated with. By default, the + topology of the hierarchical link is the same topology of + the tunnel;"; + reference + "RFC4206"; + leaf enable { + type boolean; + default "false"; + description + "Enables the hierarchical link properties supported by + this tunnel"; + } + leaf local-node-id { + type nw:node-id; + description + "The local node identifier."; + } + leaf local-te-node-id { + type te-types:te-node-id; + description + "The local TE node identifier."; + } + leaf local-link-tp-id { + type nt:tp-id; + description + "The local link termination point identifier."; + reference + "RFC8345"; + } + leaf local-te-link-tp-id { + type te-types:te-tp-id; + description + "The local TE link termination point identifier."; + } + leaf remote-node-id { + type nw:node-id; + description + "The remote node identifier."; + } + leaf remote-link-tp-id { + type nt:tp-id; + description + "The remote link termination point identifier."; + reference + "RFC8345"; + } + leaf remote-te-link-tp-id { + type te-types:te-tp-id; + description + "The remote TE link termination point identifier."; + } + leaf remote-te-node-id { + type te-types:te-node-id; + description + "Remote TE node identifier."; + } + leaf link-id { + type nt:link-id; + config false; + description + "A network topology assigned identifier to the link"; + reference + "RFC8345"; + } + leaf network-id { + type nw:network-id; + description + "The network topology identifier where the hierarchical + link supported by this TE tunnel is instantiated."; + } + uses te-types:te-topology-identifier { + description + "The TE topology identifier where the hierarchical link + supported by this TE tunnel is instantiated."; + } + } + } + } + + grouping path-constraints-common { + description + "Global named path constraints configuration + grouping."; + uses te-types:common-path-constraints-attributes; + uses te-types:generic-path-disjointness; + uses te-types:path-constraints-route-objects; + container path-in-segment { + presence "The end-to-end tunnel starts in a previous domain; + this tunnel is a segment in the current domain."; + description + "If an end-to-end tunnel crosses multiple domains using + the same technology, some additional constraints have to be + taken in consideration in each domain. + This TE tunnel segment is stitched to the upstream TE tunnel + segment."; + uses te-types:label-set-info; + } + container path-out-segment { + presence + "The end-to-end tunnel is not terminated in this domain; + this tunnel is a segment in the current domain."; + description + "If an end-to-end tunnel crosses multiple domains using + the same technology, some additional constraints have to be + taken in consideration in each domain. + This TE tunnel segment is stitched to the downstream TE + tunnel segment."; + uses te-types:label-set-info; + } + } + + /** + * TE container + */ + + container te { + description + "TE global container."; + leaf enable { + type boolean; + description + "Enables the TE component features."; + } + + /* TE Global Data */ + container globals { + description + "Globals TE system-wide configuration data container."; + container named-admin-groups { + description + "TE named admin groups container."; + list named-admin-group { + if-feature "te-types:extended-admin-groups"; + if-feature "te-types:named-extended-admin-groups"; + key "name"; + description + "List of named TE admin-groups."; + leaf name { + type string; + description + "A string name that uniquely identifies a TE + interface named admin-group."; + } + leaf bit-position { + type uint32; + description + "Bit position representing the administrative group."; + reference + "RFC3209 and RFC7308"; + } + + } + } + container named-srlgs { + description + "TE named SRLGs container."; + list named-srlg { + if-feature "te-types:named-srlg-groups"; + key "name"; + description + "A list of named SRLG groups."; + leaf name { + type string; + description + "A string name that uniquely identifies a TE + interface named SRLG."; + } + leaf value { + type te-types:srlg; + description + "An SRLG value."; + } + leaf cost { + type uint32; + description + "SRLG associated cost. Used during path to append + the path cost when traversing a link with this SRLG."; + } + } + } + container named-path-constraints { + description + "TE named path constraints container."; + list named-path-constraint { + if-feature "te-types:named-path-constraints"; + key "name"; + leaf name { + type string; + description + "A string name that uniquely identifies a + path constraint set."; + } + uses path-constraints-common; + description + "A list of named path constraints."; + } + } + } + + /* TE Tunnel Data */ + container tunnels { + description + "Tunnels TE configuration data container."; + list tunnel { + key "name"; + description + "The list of TE tunnels."; + leaf name { + type string; + description + "TE tunnel name."; + } + leaf alias { + type string; + description + "An alternate name of the TE tunnel that can be modified + anytime during its lifetime."; + } + leaf identifier { + type uint32; + description + "TE tunnel Identifier."; + reference + "RFC3209"; + } + leaf color { + type uint32; + description "The color associated with the TE tunnel."; + reference "RFC9012"; + } + leaf description { + type string; + default "None"; + description + "Textual description for this TE tunnel."; + } + leaf admin-state { + type identityref { + base te-types:tunnel-admin-state-type; + } + default "te-types:tunnel-admin-state-up"; + description + "TE tunnel administrative state."; + } + leaf operational-state { + type identityref { + base te-types:tunnel-state-type; + } + config false; + description + "TE tunnel operational state."; + } + uses te-types:encoding-and-switching-type; + uses tunnel-common-attributes; + container controller { + description + "Contains tunnel data relevant to external controller(s). + This target node may be augmented by external module(s), + for example, to add data for PCEP initiated and/or + delegated tunnels."; + leaf protocol-origin { + type identityref { + base te-types:protocol-origin-type; + } + description + "The protocol origin for instantiating the tunnel."; + } + leaf controller-entity-id { + type string; + description + "An identifier unique within the scope of visibility + that associated with the entity that controls the + tunnel."; + reference "RFC8232"; + } + } + leaf reoptimize-timer { + type uint16; + units "seconds"; + description + "Frequency of reoptimization of a traffic engineered + LSP."; + } + uses tunnel-associations-properties; + uses protection-restoration-properties; + uses te-types:tunnel-constraints; + uses tunnel-hierarchy-properties; + container primary-paths { + description + "The set of primary paths."; + reference "RFC4872"; + list primary-path { + key "name"; + description + "List of primary paths for this tunnel."; + leaf active { + type boolean; + config false; + description + "Indicates an active path that + has been selected from the primary paths list."; + } + uses path-common-properties; + uses path-forward-properties; + uses k-requested-paths; + uses path-compute-info; + uses path-state; + container primary-reverse-path { + when "../../../te:bidirectional = 'true'"; + description + "The reverse primary path properties."; + uses path-common-properties; + uses path-compute-info; + uses path-state; + container candidate-secondary-reverse-paths { + description + "The set of referenced candidate reverse secondary + paths from the full set of secondary reverse paths + which may be used for this primary path."; + list candidate-secondary-reverse-path { + key "secondary-reverse-path"; + ordered-by user; + description + "List of candidate secondary reverse path(s)"; + leaf secondary-reverse-path { + type leafref { + path "../../../../../../" + + "te:secondary-reverse-paths/" + + "te:secondary-reverse-path/te:name"; + } + description + "A reference to the secondary reverse path that + may be utilized when the containing primary + reverse path is in use."; + } + leaf active { + type boolean; + config false; + description + "Indicates an active path that has been + selected from the secondary reverse paths + list."; + } + } + } + } + container candidate-secondary-paths { + description + "The set of candidate secondary paths which may be + used for this primary path. When secondary paths are + specified in the list the path of the secondary LSP + in use must be restricted to those paths + referenced. + The priority of the secondary paths is specified + within the list. Higher priority values are less + preferred - that is to say that a path with priority + 0 is the most preferred path. In the case that the + list is empty, any secondary path may be + utilised when the current primary path is in use."; + list candidate-secondary-path { + key "secondary-path"; + ordered-by user; + description + "List of candidate secondary paths for this + tunnel."; + leaf secondary-path { + type leafref { + path "../../../../../te:secondary-paths/" + + "te:secondary-path/te:name"; + } + description + "A reference to the secondary path that may be + utilised when the containing primary path is + in use."; + } + leaf active { + type boolean; + config false; + description + "Indicates an active path that has been selected + from the candidate secondary paths."; + } + } + } + } + } + container secondary-paths { + description + "The set of secondary paths."; + reference "RFC4872"; + list secondary-path { + key "name"; + description + "List of secondary paths for this tunnel."; + uses path-common-properties; + leaf preference { + type uint8 { + range "1..255"; + } + default "1"; + description + "Specifies a preference for this path. The lower the + number higher the preference."; + } + leaf secondary-reverse-path { + type leafref { + path "../../../" + + "te:secondary-reverse-paths/" + + "te:secondary-reverse-path/te:name"; + } + description + "A reference to the reverse secondary path when + co-routed with the secondary path."; + } + uses path-compute-info; + uses protection-restoration-properties; + uses path-state; + } + } + container secondary-reverse-paths { + description + "The set of secondary reverse paths."; + list secondary-reverse-path { + key "name"; + description + "List of secondary paths for this tunnel."; + uses path-common-properties; + leaf preference { + type uint8 { + range "1..255"; + } + default "1"; + description + "Specifies a preference for this path. The lower the + number higher the preference. Paths that have the + same preference will be activated together."; + } + uses path-compute-info; + uses protection-restoration-properties; + uses path-state; + } + } + action tunnel-action { + description + "Action commands to manipulate the TE tunnel state."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels, + Section 2.5"; + input { + leaf action-type { + type identityref { + base te-types:tunnel-action-type; + } + description + "The action to be invoked on the TE tunnel."; + } + } + output { + leaf action-result { + type identityref { + base te-types:te-action-result; + } + description + "The result of the tunnel action operation."; + } + } + } + action protection-external-commands { + description + "Actions to manipulate the protection external + commands of the TE tunnel."; + reference + "RFC 4427: Recovery (Protection and Restoration) + Terminology for Generalized Multi-Protocol Label + Switching (GMPLS)"; + input { + leaf protection-external-command { + type identityref { + base te-types:protection-external-commands; + } + description + "Protection external command."; + } + leaf protection-group-ingress-node { + type boolean; + default "true"; + description + "When 'true', indicates that the action is + applied on ingress node. + By default, the action applies to the ingress node + only."; + } + leaf protection-group-egress-node { + type boolean; + default "false"; + description + "When set to 'true', indicates that the action is + applied on egress node. + By default, the action applies to the ingress node + only."; + } + leaf path-name { + type string; + description + "The name of the path that the external command + applies to."; + } + leaf path-type { + type te-types:path-type; + description + "The type of the path that the external command + applies to."; + } + leaf traffic-type { + type enumeration { + enum normal-traffic { + description + "The manual-switch or forced-switch command + applies to the normal traffic (this Tunnel)."; + } + enum null-traffic { + description + "The manual-switch or forced-switch command + applies to the null traffic."; + } + enum extra-traffic { + description + "The manual-switch or forced-switch command + applies to the extra traffic (the extra-traffic + Tunnel sharing protection bandwidth with this + Tunnel)."; + } + } + description + "Indicates whether the manual-switch or forced-switch + commands applies to the normal traffic, the null + traffic or the extra-traffic."; + reference + "RFC4427"; + } + leaf extra-traffic-tunnel-ref { + type tunnel-ref; + description + "In case there are multiple extra-traffic tunnels + sharing protection bandwidth with this Tunnel + (m:n protection), represents which extra-traffic + Tunnel the manual-switch or forced-switch to + extra-traffic command applies to."; + } + } + } + } + } + + /* TE LSPs Data */ + container lsps { + config false; + description + "TE LSPs state container."; + list lsp { + key "tunnel-name lsp-id node"; + unique "source destination tunnel-id lsp-id " + + "extended-tunnel-id"; + description + "List of LSPs associated with the tunnel."; + leaf tunnel-name { + type string; + description "The TE tunnel name."; + } + leaf lsp-id { + type uint16; + description + "Identifier used in the SENDER_TEMPLATE and the + FILTER_SPEC that can be changed to allow a sender to + share resources with itself."; + reference + "RFC3209"; + } + leaf node { + type te-types:te-node-id; + description + "The node where the TE LSP state resides on."; + } + leaf source { + type te-types:te-node-id; + description + "Tunnel sender address extracted from + SENDER_TEMPLATE object."; + reference + "RFC3209"; + } + leaf destination { + type te-types:te-node-id; + description + "The tunnel endpoint address."; + reference + "RFC3209"; + } + leaf tunnel-id { + type uint16; + description + "The tunnel identifier that remains + constant over the life of the tunnel."; + reference + "RFC3209"; + } + leaf extended-tunnel-id { + type yang:dotted-quad; + description + "The LSP Extended Tunnel ID."; + reference + "RFC3209"; + } + leaf operational-state { + type identityref { + base te-types:lsp-state-type; + } + description + "The LSP operational state."; + } + leaf signaling-type { + type identityref { + base te-types:path-signaling-type; + } + description + "The signaling protocol used to set up this LSP."; + } + leaf origin-type { + type enumeration { + enum ingress { + description + "Origin ingress."; + } + enum egress { + description + "Origin egress."; + } + enum transit { + description + "Origin transit."; + } + } + description + "The origin of the LSP relative to the location of the + local switch in the path."; + } + leaf lsp-resource-status { + type enumeration { + enum primary { + description + "A primary LSP is a fully established LSP for which + the resource allocation has been committed at the + data plane."; + } + enum secondary { + description + "A secondary LSP is an LSP that has been provisioned + in the control plane only; e.g. resource allocation + has not been committed at the data plane."; + } + } + description + "LSP resource allocation state."; + reference + "RFC4872, section 4.2.1"; + } + leaf lockout-of-normal { + type boolean; + description + "When set to 'true', it represents a lockout of normal + traffic external command. When set to 'false', it + represents a clear lockout of normal traffic external + command. The lockout of normal traffic command applies + to this Tunnel."; + reference + "RFC4427"; + } + leaf freeze { + type boolean; + description + "When set to 'true', it represents a freeze external + command. When set to 'false', it represents a clear + freeze external command. The freeze command applies to + all the Tunnels which are sharing the protection + resources with this Tunnel."; + reference + "RFC4427"; + } + leaf lsp-protection-role { + type enumeration { + enum working { + description + "A working LSP must be a primary LSP whilst a + protecting LSP can be either a primary or a + secondary LSP. Also, known as protected LSPs when + working LSPs are associated with protecting LSPs."; + } + enum protecting { + description + "A secondary LSP is an LSP that has been provisioned + in the control plane only; e.g. resource allocation + has not been committed at the data plane."; + } + } + description + "LSP role type."; + reference + "RFC4872, section 4.2.1"; + } + leaf lsp-protection-state { + type identityref { + base te-types:lsp-protection-state; + } + config false; + description + "The reported protection state controlling which + tunnel is using the resources of the protecting LSP."; + } + leaf protection-group-ingress-node-id { + type te-types:te-node-id; + description + "Indicates the te-node-id of the protection group + ingress node when the APS state represents an external + command (LoP, SF, MS) applied to it or a WTR timer + running on it. If the external command is not applied to + the ingress node or the WTR timer is not running on it, + this attribute is not specified. A value 0.0.0.0 is used + when the te-node-id of the protection group ingress node + is unknown (e.g., because the ingress node is outside + the scope of control of the server)"; + } + leaf protection-group-egress-node-id { + type te-types:te-node-id; + description + "Indicates the te-node-id of the protection group egress + node when the APS state represents an external command + (LoP, SF, MS) applied to it or a WTR timer running on + it. If the external command is not applied to the + ingress node or the WTR timer is not running on it, this + attribute is not specified. A value 0.0.0.0 is used when + the te-node-id of the protection group ingress node is + unknown (e.g., because the ingress node is outside the + scope of control of the server)"; + } + container lsp-actual-route-information { + description + "RSVP recorded route object information."; + list lsp-actual-route-information { + when "../../origin-type = 'ingress'" { + description + "Applicable on ingress LSPs only."; + } + key "index"; + description + "Record route list entry."; + uses te-types:record-route-state; + } + } + } + } + } + + /* TE Tunnel RPCs/execution Data */ + + rpc tunnels-path-compute { + description + "This RPC is a generic API whose + input and output parameters are expected to be provided by + augments to this module."; + reference + "RFC 4655: A Path Computation Element (PCE)-Based + Architecture."; + input { + container path-compute-info { + /* + * An external path compute module may augment this + * target. + */ + description + "RPC input information."; + } + } + output { + container path-compute-result { + /* + * An external path compute module may augment this + * target. + */ + description + "RPC output information."; + } + } + } + + rpc tunnels-actions { + description + "RPC that manipulates the state of a TE tunnel."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels, + Section 2.5"; + input { + container tunnel-info { + description + "TE tunnel information."; + choice filter-type { + mandatory true; + description + "Filter choice."; + case all-tunnels { + leaf all { + type empty; + mandatory true; + description + "When present, applies the action on all TE + tunnels."; + } + } + case one-tunnel { + leaf tunnel { + type tunnel-ref; + description + "Apply action on the specific TE tunnel."; + } + } + } + } + container action-info { + description + "TE tunnel action information."; + leaf action { + type identityref { + base te-types:tunnel-action-type; + } + description + "The action type."; + } + leaf disruptive { + when "derived-from-or-self(../action, " + + "'te-types:tunnel-action-reoptimize')"; + type empty; + description + "When present, specifies whether or not the + reoptimization + action is allowed to be disruptive."; + } + } + } + output { + leaf action-result { + type identityref { + base te-types:te-action-result; + } + description + "The result of the tunnel action operation."; + } + } + } +} \ No newline at end of file diff --git a/src/tests/tools/mock_nce_fan_ctrl/yang/draft-layer1-types/ietf-layer1-types.yang b/src/tests/tools/mock_nce_fan_ctrl/yang/draft-layer1-types/ietf-layer1-types.yang new file mode 100644 index 000000000..ba3820b72 --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/yang/draft-layer1-types/ietf-layer1-types.yang @@ -0,0 +1,1361 @@ +module ietf-layer1-types { + yang-version 1.1; + namespace "urn:ietf:params:xml:ns:yang:ietf-layer1-types"; + prefix "l1-types"; + + import ietf-routing-types { + prefix rt-types; + reference + "RFC 8294: Common YANG Data Types for the Routing Area"; + } + + organization + "IETF CCAMP Working Group"; + contact + "WG Web: + WG List: + + Editor: Haomian Zheng + + + Editor: Italo Busi + "; + + description + "This module defines Layer 1 YANG types. The model fully conforms + to the Network Management Datastore Architecture (NMDA). + + Copyright (c) 2024 IETF Trust and the persons + identified as authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Revised BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC XXXX; see + the RFC itself for full legal notices. + + The key words 'MUST', 'MUST NOT', 'REQUIRED', 'SHALL', 'SHALL + NOT', 'SHOULD', 'SHOULD NOT', 'RECOMMENDED', 'NOT RECOMMENDED', + 'MAY', and 'OPTIONAL' in this document are to be interpreted as + described in BCP 14 (RFC 2119) (RFC 8174) when, and only when, + they appear in all capitals, as shown here."; + + revision "2024-02-22" { + description + "Initial Version"; + reference + "RFC XXXX: A YANG Data Model for Layer 1 Types"; + // RFC Editor: replace RFC XXXX with actual RFC number, + // update date information and remove this note. + } + + /* + * Identities + */ + + identity tributary-slot-granularity { + description + "Tributary Slot Granularity (TSG)."; + reference + "ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN)"; + } + + identity tsg-1.25G { + base tributary-slot-granularity; + description + "1.25G tributary slot granularity."; + } + + identity tsg-2.5G { + base tributary-slot-granularity; + description + "2.5G tributary slot granularity."; + } + + identity tsg-5G { + base tributary-slot-granularity; + description + "5G tributary slot granularity."; + } + + identity odu-type { + description + "Base identity from which specific Optical Data Unit (ODU) + type is derived."; + reference + "RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN)"; + } + + identity ODU0 { + base odu-type; + description + "ODU0 type (1.24Gb/s)."; + reference + "RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN)"; + } + + identity ODU1 { + base odu-type; + description + "ODU1 type (2.49Gb/s)."; + reference + "RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN)"; + } + + identity ODU2 { + base odu-type; + description + "ODU2 type (10.03Gb/s)."; + reference + "RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN)"; + } + + identity ODU2e { + base odu-type; + description + "ODU2e type (10.39Gb/s)."; + reference + "RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN)"; + } + + identity ODU3 { + base odu-type; + description + "ODU3 type (40.31Gb/s)."; + reference + "RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN)"; + } + + identity ODU4 { + base odu-type; + description + "ODU4 type (104.79Gb/s)."; + reference + "RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN)"; + } + + identity ODUflex { + base odu-type; + description + "ODUflex type (flexible bit rate, not resizable). + + It could be used for any type of ODUflex, including + ODUflex(CBR), ODUflex(GFP), ODUflex(GFP,n,k), ODUflex(IMP,s), + ODUflex(IMP) and ODUflex(FlexE-aware)."; + reference + "RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN)"; + } + identity ODUflex-resizable { + base odu-type; + description + "ODUflex protocol (flexible bit rate, resizable). + + It could be used only for ODUflex(GFP,n,k)."; + reference + "RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN)"; + } + + identity protocol { + description + "Base identity from which specific protocol is derived."; + reference + "MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity Ethernet { + base protocol; + description + "Ethernet protocol."; + reference + "MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity Fibre-Channel { + base protocol; + description + "Fibre-Channel (FC) protocol."; + reference + "MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity SDH { + base protocol; + description + "SDH protocol."; + reference + "MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity SONET { + base protocol; + description + "SONET protocol."; + reference + "MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity client-signal { + description + "Base identity from which specific Constant Bit Rate (CBR) + client signal is derived"; + } + + identity coding-func { + description + "Base identity from which specific coding function + is derived."; + reference + "MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity ETH-1Gb { + base client-signal; + description + "Client signal type of 1GbE."; + reference + "IEEE 802.3-2018, Clause 36: IEEE Standard for Ethernet + + RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN)"; + } + + identity ETH-10Gb-LAN { + base client-signal; + description + "Client signal type of ETH-10Gb-LAN (10.3 Gb/s)."; + reference + "IEEE 802.3-2018, Clause 49: IEEE Standard for Ethernet + + RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN)"; + } + + identity ETH-10Gb-WAN { + base client-signal; + description + "Client signal type of ETH-10Gb-WAN (9.95 Gb/s)."; + reference + "IEEE 802.3-2018, Clause 50: IEEE Standard for Ethernet + + RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN)"; + } + + identity ETH-40Gb { + base client-signal; + description + "Client signal type of 40GbE."; + reference + "IEEE 802.3-2018, Clause 82: IEEE Standard for Ethernet + + RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN)"; + } + + identity ETH-100Gb { + base client-signal; + description + "Client signal type of 100GbE."; + reference + "IEEE 802.3-2018, Clause 82: IEEE Standard for Ethernet + + RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN)"; + } + + identity STM-1 { + base client-signal; + base coding-func; + description + "Client signal type of STM-1; + STM-1 G.707 (N=1) coding function."; + reference + "ITU-T G.707 v7.0 (01/2007): Network node interface for the + synchronous digital hierarchy (SDH) + + RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN) + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity STM-4 { + base client-signal; + base coding-func; + description + "Client signal type of STM-4; + STM-4 G.707 (N=4) coding function."; + reference + "ITU-T G.707 v7.0 (01/2007): Network node interface for the + synchronous digital hierarchy (SDH) + + RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN) + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity STM-16 { + base client-signal; + base coding-func; + description + "Client signal type of STM-16; + STM-16 G.707 (N=16) coding function."; + reference + "ITU-T G.707 v7.0 (01/2007): Network node interface for the + synchronous digital hierarchy (SDH) + + RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN) + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity STM-64 { + base client-signal; + base coding-func; + description + "Client signal type of STM-64; + STM-64 G.707 (N=64) coding function."; + reference + "ITU-T G.707 v7.0 (01/2007): Network node interface for the + synchronous digital hierarchy (SDH) + + RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN) + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity STM-256 { + base client-signal; + base coding-func; + description + "Client signal type of STM-256; + STM-256 G.707 (N=256) coding function."; + reference + "ITU-T G.707 v7.0 (01/2007): Network node interface for the + synchronous digital hierarchy (SDH) + + RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN) + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity OC-3 { + base client-signal; + base coding-func; + description + "Client signal type of OC3; + OC-3 GR-253-CORE (N=3) coding function."; + reference + "ANSI T1.105-2001: Synchronous Optical Network (SONET) + Basic Description including Multiplex Structure, Rates, + and Formats + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity OC-12 { + base client-signal; + base coding-func; + description + "Client signal type of OC12; + OC-12 GR-253-CORE (N=12) coding function."; + reference + "ANSI T1.105-2001: Synchronous Optical Network (SONET) + Basic Description including Multiplex Structure, Rates, + and Formats + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity OC-48 { + base client-signal; + base coding-func; + description + "Client signal type of OC48; + OC-48 GR-253-CORE (N=48) coding function."; + reference + "ANSI T1.105-2001: Synchronous Optical Network (SONET) + Basic Description including Multiplex Structure, Rates, + and Formats + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity OC-192 { + base client-signal; + base coding-func; + description + "Client signal type of OC192; + OC-192 GR-253-CORE (N=192) coding function."; + reference + "ANSI T1.105-2001: Synchronous Optical Network (SONET) + Basic Description including Multiplex Structure, Rates, + and Formats + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity OC-768 { + base client-signal; + base coding-func; + description + "Client signal type of OC768; + OC-768 GR-253-CORE (N=768) coding function."; + reference + "ANSI T1.105-2001: Synchronous Optical Network (SONET) + Basic Description including Multiplex Structure, Rates, + and Formats + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity FC-100 { + base client-signal; + base coding-func; + description + "Client signal type of Fibre Channel FC-100; + FC-100 FC-FS-2 (1.0625 Gb/s) coding function."; + reference + "ANSI INCITS 230-1994 R1999): Information Technology - + Fibre Channel - Physical and Signaling Interface (FC-PH) + + RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN) + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity FC-200 { + base client-signal; + base coding-func; + description + "Client signal type of Fibre Channel FC-200; + FC-200 FC-FS-2 (2.125 Gb/s) coding function."; + reference + "ANSI INCITS 230-1994 R1999): Information Technology - + Fibre Channel - Physical and Signaling Interface (FC-PH) + + RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN) + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity FC-400 { + base client-signal; + base coding-func; + description + "Client signal type of Fibre Channel FC-400; + FC-400 FC-FS-2 (4.250 Gb/s) coding function."; + reference + "ANSI INCITS 230-1994 R1999): Information Technology - + Fibre Channel - Physical and Signaling Interface (FC-PH) + + RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN) + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity FC-800 { + base client-signal; + base coding-func; + description + "Client signal type of Fibre Channel FC-800; + FC-800 FC-FS-2 (8.500 Gb/s) coding function."; + reference + "ANSI INCITS 230-1994 R1999): Information Technology - + Fibre Channel - Physical and Signaling Interface (FC-PH) + + RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN) + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity FC-1200 { + base client-signal; + base coding-func; + description + "Client signal type of Fibre Channel FC-1200; + FC-1200 FC-10GFC (10.51875 Gb/s) coding function."; + reference + "ANSI INCITS 230-1994 R1999): Information Technology - + Fibre Channel - Physical and Signaling Interface (FC-PH) + + RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN) + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity FC-1600 { + base client-signal; + base coding-func; + description + "Client signal type of Fibre Channel FC-1600; + FC-1600 FC-FS-3 (14.025 Gb/s) coding function."; + reference + "ANSI INCITS 230-1994 R1999): Information Technology - + Fibre Channel - Physical and Signaling Interface (FC-PH) + + RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN) + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity FC-3200 { + base client-signal; + base coding-func; + description + "Client signal type of Fibre Channel FC-3200; + FC-3200 FC-FS-4 (28.05 Gb/s) coding function."; + reference + "ANSI INCITS 230-1994 R1999): Information Technology - + Fibre Channel - Physical and Signaling Interface (FC-PH) + + RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks + + ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN) + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity ETH-1000X { + base coding-func; + description + "1000BASE-X PCS clause 36 coding function."; + reference + "IEEE 802.3-2018, Clause 36: IEEE Standard for Ethernet + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity ETH-10GW { + base coding-func; + description + "IEEE 802.3-2018, Clause 50: IEEE Standard for Ethernet + + 10GBASE-W (WAN PHY) PCS clause 49 and WIS clause 50 + coding function."; + reference + "MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity ETH-10GR { + base coding-func; + description + "10GBASE-R (LAN PHY) PCS clause 49 coding function."; + reference + "IEEE 802.3-2018, Clause 49: IEEE Standard for Ethernet + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity ETH-40GR { + base coding-func; + description + "40GBASE-R PCS clause 82 coding function."; + reference + "IEEE 802.3-2018, Clause 82: IEEE Standard for Ethernet + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity ETH-100GR { + base coding-func; + description + "100GBASE-R PCS clause 82 coding function."; + reference + "IEEE 802.3-2018, Clause 82: IEEE Standard for Ethernet + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity optical-interface-func { + description + "Base identity from which optical-interface-function + is derived."; + reference + "MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity SX-PMD-1000 { + base optical-interface-func; + description + "SX-PMD-clause-38 Optical Interface function for + 1000BASE-X PCS-36."; + reference + "IEEE 802.3-2018, Clause 38: IEEE Standard for Ethernet + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity LX-PMD-1000 { + base optical-interface-func; + description + "LX-PMD-clause-38 Optical Interface function for + 1000BASE-X PCS-36."; + reference + "IEEE 802.3-2018, Clause 38: IEEE Standard for Ethernet + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity LX10-PMD-1000 { + base optical-interface-func; + description + "LX10-PMD-clause-59 Optical Interface function for + 1000BASE-X PCS-36."; + reference + "IEEE 802.3-2018, Clause 59: IEEE Standard for Ethernet + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity BX10-PMD-1000 { + base optical-interface-func; + description + "BX10-PMD-clause-59 Optical Interface function for + 1000BASE-X PCS-36."; + reference + "IEEE 802.3-2018, Clause 59: IEEE Standard for Ethernet + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity LW-PMD-10G { + base optical-interface-func; + description + "LW-PMD-clause-52 Optical Interface function for + 10GBASE-W PCS-49-WIS-50."; + reference + "IEEE 802.3-2018, Clause 52: IEEE Standard for Ethernet + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity EW-PMD-10G { + base optical-interface-func; + description + "EW-PMD-clause-52 Optical Interface function for + 10GBASE-W PCS-49-WIS-50."; + reference + "IEEE 802.3-2018, Clause 52: IEEE Standard for Ethernet + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity LR-PMD-10G { + base optical-interface-func; + description + "LR-PMD-clause-52 Optical Interface function for + 10GBASE-R PCS-49."; + reference + "IEEE 802.3-2018, Clause 52: IEEE Standard for Ethernet + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity ER-PMD-10G { + base optical-interface-func; + description + "ER-PMD-clause-52 Optical Interface function for + 10GBASE-R PCS-49."; + reference + "IEEE 802.3-2018, Clause 52: IEEE Standard for Ethernet + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity LR4-PMD-40G { + base optical-interface-func; + description + "LR4-PMD-clause-87 Optical Interface function for + 40GBASE-R PCS-82."; + reference + "IEEE 802.3-2018, Clause 87: IEEE Standard for Ethernet + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity ER4-PMD-40G { + base optical-interface-func; + description + "ER4-PMD-clause-87 Optical Interface function for + 40GBASE-R PCS-82."; + reference + "IEEE 802.3-2018, Clause 87: IEEE Standard for Ethernet + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity FR-PMD-40G { + base optical-interface-func; + description + "FR-PMD-clause-89 Optical Interface function for + 40GBASE-R PCS-82."; + reference + "IEEE 802.3-2018, Clause 89: IEEE Standard for Ethernet + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + identity LR4-PMD-100G { + base optical-interface-func; + description + "LR4-PMD-clause-88 Optical Interface function for + 100GBASE-R PCS-82."; + reference + "IEEE 802.3-2018, Clause 88: IEEE Standard for Ethernet + + MEF63: Subscriber Layer 1 Service Attributes"; + } + identity ER4-PMD-100G { + base optical-interface-func; + description + "ER4-PMD-clause-88 Optical Interface function for + 100GBASE-R PCS-82."; + reference + "IEEE 802.3-2018, Clause 88: IEEE Standard for Ethernet + + MEF63: Subscriber Layer 1 Service Attributes"; + } + + /* + * Typedefs + */ + + typedef otn-tpn { + type uint16 { + range "1..4095"; + } + description + "Tributary Port Number (TPN) for OTN."; + reference + "RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks."; + } + + typedef otn-ts { + type uint16 { + range "1..4095"; + } + description + "Tributary Slot (TS) for OTN."; + reference + "RFC7139: GMPLS Signaling Extensions for Control of Evolving + G.709 Optical Transport Networks."; + } + + typedef otn-label-range-type { + type enumeration { + enum trib-slot { + description + "Defines a range of OTN tributary slots (TS)."; + } + enum trib-port { + description + "Defines a range of OTN tributary ports (TPN)."; + } + } + description + "Defines the type of OTN label range: TS or TPN."; + } + + typedef gfp-k { + type enumeration { + enum 2 { + description + "The ODU2.ts rate (1,249,177.230 kbit/s) is used + to compute the rate of an ODUflex(GFP,n,2)."; + } + enum 3 { + description + "The ODU3.ts rate (1,254,470.354 kbit/s) is used + to compute the rate of an ODUflex(GFP,n,3)."; + } + enum 4 { + description + "The ODU4.ts rate (1,301,467.133 kbit/s) is used + to compute the rate of an ODUflex(GFP,n,4)."; + } + } + description + "The ODUk.ts used to compute the rate of an ODUflex(GFP,n,k)."; + reference + "ITU-T G.709 v6.0 (06/2020), Table 7-8 and L.7: Interfaces for + the Optical Transport Network (OTN)"; + } + + typedef flexe-client-rate { + type union { + type uint16; + type enumeration { + enum "10G" { + description + "Represents a 10G FlexE Client signal (s=2)."; + } + enum "40G" { + description + "Represents a 40G FlexE Client signal (s=8)."; + } + } + } + description + "The FlexE Client signal rate (s x 5,156,250.000 kbit/s) + used to compute the rate of an ODUflex(IMP, s). + + Valid values for s are s=2 (10G), s=4 (40G) and + s=5 x n (n x 25G). + + In the first two cases an enumeration value + (either 10G or 40G) is used, while in the latter case + the value of n is used."; + reference + "ITU-T G.709 v6.0 (06/2020), Table 7-2: Interfaces for the + Optical Transport Network (OTN)"; + } + + typedef odtu-flex-type { + type enumeration { + enum "2" { + description + "The ODTU2.ts ODTU type."; + } + enum "3" { + description + "The ODTU3.ts ODTU type."; + } + enum "4" { + description + "The ODTU4.ts ODTU type."; + } + enum "Cn" { + description + "The ODTUCn.ts ODTU type."; + } + } + description + "The type of Optical Data Tributary Unit (ODTU), + whose nominal bitrate is used to compute the number of + Tributary Slots (TS) required by an ODUflex LSP, according to + the (19-1a) and (20-1a) formulas defined in G.709."; + reference + "ITU-T G.709 v6.0 (06/2020), Table 7-7, clause 19.6 and + clause 20.5: Interfaces for the Optical Transport + Network (OTN)"; + } + + typedef bandwidth-scientific-notation { + type string { + pattern + '0(\.0?)?([eE](\+)?0?)?|' + + '[1-9](\.[0-9]{0,6})?[eE](\+)?(9[0-6]|[1-8][0-9]|0?[0-9])?'; + } + units "bps"; + description + "Bandwidth values, expressed using the scientific notation + in bits per second. + + The encoding format is the external decimal-significant + character sequences specified in IEEE 754 and ISO/IEC 9899:1999 + for 32-bit decimal floating-point numbers: + (-1)**(S) * 10**(Exponent) * (Significant), + where Significant uses 7 digits. + + An implementation for this representation MAY use decimal32 + or binary32. The range of the Exponent is from -95 to +96 + for decimal32, and from -38 to +38 for binary32. + As a bandwidth value, the format is restricted to be + normalized, non-negative, and non-fraction: + n.dddddde{+}dd, N.DDDDDDE{+}DD, 0e0 or 0E0, + where 'd' and 'D' are decimal digits; 'n' and 'N' are + non-zero decimal digits; 'e' and 'E' indicate a power of ten. + Some examples are 0e0, 1e10, and 9.953e9."; + reference + "IEEE Std 754-2001: IEEE Standard for Floating-Point + Arithmetic + + ISO/IEC 9899:1999: Information technology - Programming + Languages - C"; + } + + /* + * Groupings + */ + + grouping otn-link-bandwidth { + description + "Bandwidth attributes for OTN links."; + container otn-bandwidth { + description + "Bandwidth attributes for OTN links."; + list odulist { + key "odu-type"; + description + "OTN bandwidth definition"; + leaf odu-type { + type identityref { + base odu-type; + } + description "ODU type"; + } + leaf number { + type uint16; + description "Number of ODUs."; + } + leaf ts-number { + when 'derived-from-or-self(../odu-type,"ODUflex") or + derived-from-or-self(../odu-type, + "ODUflex-resizable")' { + description + "Applicable when odu-type is ODUflex or + ODUflex-resizable."; + } + type uint16 { + range "1..4095"; + } + description + "The number of Tributary Slots (TS) that + could be used by all the ODUflex LSPs."; + } + } + } + } + + grouping otn-path-bandwidth { + description + "Bandwidth attributes for OTN paths."; + container otn-bandwidth { + description + "Bandwidth attributes for OTN paths."; + leaf odu-type { + type identityref { + base odu-type; + } + description "ODU type"; + } + choice oduflex-type { + when 'derived-from-or-self(./odu-type,"ODUflex") or + derived-from-or-self(./odu-type, + "ODUflex-resizable")' { + description + "Applicable when odu-type is ODUflex or + ODUflex-resizable."; + } + description + "Types of ODUflex used to compute the ODUflex + nominal bit rate."; + reference + "ITU-T G.709 v6.0 (06/2020), Table 7-2: Interfaces for the + Optical Transport Network (OTN)"; + case generic { + leaf nominal-bit-rate { + type union { + type l1-types:bandwidth-scientific-notation; + type rt-types:bandwidth-ieee-float32; + } + mandatory true; + description + "Nominal ODUflex bit rate."; + } + } + case cbr { + leaf client-type { + type identityref { + base client-signal; + } + mandatory true; + description + "The type of Constant Bit Rate (CBR) client signal + of an ODUflex(CBR)."; + } + } + case gfp-n-k { + leaf gfp-n { + type uint8 { + range "1..80"; + } + mandatory true; + description + "The value of n for an ODUflex(GFP,n,k)."; + reference + "ITU-T G.709 v6.0 (06/2020), Tables 7-8 and L.7: + Interfaces for the Optical Transport Network (OTN)"; + } + leaf gfp-k { + type gfp-k; + description + "The value of k for an ODUflex(GFP,n,k). + + If omitted, it is calculated from the value of gfp-n + as described in Table 7-8 of G.709."; + reference + "ITU-T G.709 v6.0 (06/2020), Tables 7-8 and L.7: + Interfaces for the Optical Transport Network (OTN)"; + } + } + case flexe-client { + leaf flexe-client { + type flexe-client-rate; + mandatory true; + description + "The rate of the FlexE-client for an ODUflex(IMP,s)."; + } + } + case flexe-aware { + leaf flexe-aware-n { + type uint16; + mandatory true; + description + "The rate of FlexE-aware client signal + for ODUflex(FlexE-aware)"; + } + } + case packet { + leaf opuflex-payload-rate { + type union { + type l1-types:bandwidth-scientific-notation; + type rt-types:bandwidth-ieee-float32; + } + mandatory true; + description + "Either the GFP-F encapsulated packet client nominal + bit rate for an ODUflex(GFP) or the 64b/66b encoded + packet client nominal bit rate for an ODUflex(IMP)."; + } + } + } + } + } + + grouping otn-max-path-bandwidth { + description + "Maximum bandwidth attributes for OTN paths."; + container otn-bandwidth { + description + "Maximum bandwidth attributes for OTN paths."; + leaf odu-type { + type identityref { + base odu-type; + } + description "ODU type."; + } + leaf max-ts-number { + when 'derived-from-or-self(../odu-type,"ODUflex") or + derived-from-or-self(../odu-type, + "ODUflex-resizable")' { + description + "Applicable when odu-type is ODUflex or + ODUflex-resizable."; + } + type uint16 { + range "1..4095"; + } + description + "The maximum number of Tributary Slots (TS) that could be + used by an ODUflex LSP."; + } + } + } + + grouping otn-label-range-info { + description + "Label range information for OTN. + + This grouping SHOULD be used together with the + otn-label-start-end and otn-label-step groupings to provide + OTN technology-specific label information to the models which + use the label-restriction-info grouping defined in the module + ietf-te-types."; + container otn-label-range { + description + "Label range information for OTN."; + leaf range-type { + type otn-label-range-type; + description "The type of range (e.g., TPN or TS) + to which the label range applies"; + } + leaf tsg { + type identityref { + base tributary-slot-granularity; + } + description + "Tributary slot granularity (TSG) to which the label range + applies. + + This leaf MUST be present when the range-type is TS. + + This leaf MAY be omitted when mapping an ODUk over an OTUk + Link. In this case the range-type is tpn, with only one + entry (ODUk), and the tpn range has only one value (1)."; + reference + "ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN)"; + } + leaf-list odu-type-list { + type identityref { + base odu-type; + } + description + "List of ODU types to which the label range applies. + + An Empty odu-type-list means that the label range + applies to all the supported ODU types."; + } + leaf priority { + type uint8 { + range 0..7; + } + description + "Priority in Interface Switching Capability + Descriptor (ISCD)."; + reference + "RFC4203: OSPF Extensions in Support of Generalized + Multi-Protocol Label Switching (GMPLS)"; + } + } + } + + grouping otn-label-start-end { + description + "The OTN label-start or label-end used to specify an OTN label + range. + + This grouping is dependent on the range-type defined in the + otn-label-range-info grouping. + + This grouping SHOULD be used together with the + otn-label-range-info and otn-label-step groupings to provide + OTN technology-specific label information to the models which + use the label-restriction-info grouping defined in the module + ietf-te-types."; + container otn-label { + description + "Label start or label end for OTN. + + It is either a TPN or a TS depending on the OTN label range + type specified in the 'range-type' leaf defined in the + otn-label-range-info grouping."; + leaf tpn { + when "../../../../otn-label-range/range-type = + 'trib-port'" { + description + "Valid only when range-type represented by + trib-port."; + } + type otn-tpn; + description + "Tributary Port Number (TPN)."; + reference + "RFC7139: GMPLS Signaling Extensions for Control of + Evolving G.709 Optical Transport Networks"; + } + leaf ts { + when "../../../../otn-label-range/range-type = + 'trib-slot'" { + description + "Valid only when range-type represented by + trib-slot."; + } + type otn-ts; + description + "Tributary Slot (TS) number."; + reference + "RFC7139: GMPLS Signaling Extensions for Control of + Evolving G.709 Optical Transport Networks"; + } + } + } + + grouping otn-label-hop { + description "OTN Label"; + reference + "RFC7139, section 6: GMPLS Signaling Extensions for Control of + Evolving G.709 Optical Transport Networks"; + container otn-label { + description + "Label hop for OTN."; + leaf tpn { + type otn-tpn; + description + "Tributary Port Number (TPN)."; + reference + "RFC7139: GMPLS Signaling Extensions for Control of + Evolving G.709 Optical Transport Networks"; + } + leaf tsg { + type identityref { + base tributary-slot-granularity; + } + description "Tributary Slot Granularity (TSG)."; + reference + "ITU-T G.709 v6.0 (06/2020): Interfaces for the Optical + Transport Network (OTN)"; + } + leaf ts-list { + type string { + pattern "([1-9][0-9]{0,3}(-[1-9][0-9]{0,3})?" + + "(,[1-9][0-9]{0,3}(-[1-9][0-9]{0,3})?)*)"; + } + description + "A list of available Tributary Slots (TS) ranging + between 1 and 4095. If multiple values or + ranges are given, they all MUST be disjoint + and MUST be in ascending order. + For example 1-20,25,50-1000."; + reference + "RFC 7139: GMPLS Signaling Extensions for Control + of Evolving G.709 Optical Transport Networks"; + } + } + } + + grouping otn-label-step { + description + "Label step for OTN. + + This grouping is dependent on the range-type defined in the + otn-label-range-info grouping. + + This grouping SHOULD be used together with the + otn-label-range-info and otn-label-start-end groupings to + provide OTN technology-specific label information to the + models which use the label-restriction-info grouping defined + in the module ietf-te-types."; + container otn-label-step { + description + "Label step for OTN. + + It is either a TPN or a TS depending on the OTN label range + type specified in the 'range-type' leaf defined in the + otn-label-range-info grouping."; + leaf tpn { + when "../../../otn-label-range/range-type = + 'trib-port'" { + description + "Valid only when range-type represented by + trib-port."; + } + type otn-tpn; + description + "Label step which represents possible increments for + Tributary Port Number (TPN)."; + reference + "RFC7139: GMPLS Signaling Extensions for Control of + Evolving G.709 Optical Transport Networks"; + } + leaf ts { + when "../../../otn-label-range/range-type = + 'trib-slot'" { + description + "Valid only when range-type represented by + trib-slot"; + } + type otn-ts; + description + "Label step which represents possible increments for + Tributary Slot (TS) number."; + reference + "RFC7139: GMPLS Signaling Extensions for Control of + Evolving G.709 Optical Transport Networks"; + } + } + } +} \ No newline at end of file diff --git a/src/tests/tools/mock_nce_fan_ctrl/yang/rfc6991/ietf-inet-types.yang b/src/tests/tools/mock_nce_fan_ctrl/yang/rfc6991/ietf-inet-types.yang new file mode 100644 index 000000000..a1ef0dfaa --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/yang/rfc6991/ietf-inet-types.yang @@ -0,0 +1,458 @@ +module ietf-inet-types { + + namespace "urn:ietf:params:xml:ns:yang:ietf-inet-types"; + prefix "inet"; + + organization + "IETF NETMOD (NETCONF Data Modeling Language) Working Group"; + + contact + "WG Web: + WG List: + + WG Chair: David Kessens + + + WG Chair: Juergen Schoenwaelder + + + Editor: Juergen Schoenwaelder + "; + + description + "This module contains a collection of generally useful derived + YANG data types for Internet addresses and related things. + + Copyright (c) 2013 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (http://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC 6991; see + the RFC itself for full legal notices."; + + revision 2013-07-15 { + description + "This revision adds the following new data types: + - ip-address-no-zone + - ipv4-address-no-zone + - ipv6-address-no-zone"; + reference + "RFC 6991: Common YANG Data Types"; + } + + revision 2010-09-24 { + description + "Initial revision."; + reference + "RFC 6021: Common YANG Data Types"; + } + + /*** collection of types related to protocol fields ***/ + + typedef ip-version { + type enumeration { + enum unknown { + value "0"; + description + "An unknown or unspecified version of the Internet + protocol."; + } + enum ipv4 { + value "1"; + description + "The IPv4 protocol as defined in RFC 791."; + } + enum ipv6 { + value "2"; + description + "The IPv6 protocol as defined in RFC 2460."; + } + } + description + "This value represents the version of the IP protocol. + + In the value set and its semantics, this type is equivalent + to the InetVersion textual convention of the SMIv2."; + reference + "RFC 791: Internet Protocol + RFC 2460: Internet Protocol, Version 6 (IPv6) Specification + RFC 4001: Textual Conventions for Internet Network Addresses"; + } + + typedef dscp { + type uint8 { + range "0..63"; + } + description + "The dscp type represents a Differentiated Services Code Point + that may be used for marking packets in a traffic stream. + In the value set and its semantics, this type is equivalent + to the Dscp textual convention of the SMIv2."; + reference + "RFC 3289: Management Information Base for the Differentiated + Services Architecture + RFC 2474: Definition of the Differentiated Services Field + (DS Field) in the IPv4 and IPv6 Headers + RFC 2780: IANA Allocation Guidelines For Values In + the Internet Protocol and Related Headers"; + } + + typedef ipv6-flow-label { + type uint32 { + range "0..1048575"; + } + description + "The ipv6-flow-label type represents the flow identifier or Flow + Label in an IPv6 packet header that may be used to + discriminate traffic flows. + + In the value set and its semantics, this type is equivalent + to the IPv6FlowLabel textual convention of the SMIv2."; + reference + "RFC 3595: Textual Conventions for IPv6 Flow Label + RFC 2460: Internet Protocol, Version 6 (IPv6) Specification"; + } + + typedef port-number { + type uint16 { + range "0..65535"; + } + description + "The port-number type represents a 16-bit port number of an + Internet transport-layer protocol such as UDP, TCP, DCCP, or + SCTP. Port numbers are assigned by IANA. A current list of + all assignments is available from . + + Note that the port number value zero is reserved by IANA. In + situations where the value zero does not make sense, it can + be excluded by subtyping the port-number type. + In the value set and its semantics, this type is equivalent + to the InetPortNumber textual convention of the SMIv2."; + reference + "RFC 768: User Datagram Protocol + RFC 793: Transmission Control Protocol + RFC 4960: Stream Control Transmission Protocol + RFC 4340: Datagram Congestion Control Protocol (DCCP) + RFC 4001: Textual Conventions for Internet Network Addresses"; + } + + /*** collection of types related to autonomous systems ***/ + + typedef as-number { + type uint32; + description + "The as-number type represents autonomous system numbers + which identify an Autonomous System (AS). An AS is a set + of routers under a single technical administration, using + an interior gateway protocol and common metrics to route + packets within the AS, and using an exterior gateway + protocol to route packets to other ASes. IANA maintains + the AS number space and has delegated large parts to the + regional registries. + + Autonomous system numbers were originally limited to 16 + bits. BGP extensions have enlarged the autonomous system + number space to 32 bits. This type therefore uses an uint32 + base type without a range restriction in order to support + a larger autonomous system number space. + + In the value set and its semantics, this type is equivalent + to the InetAutonomousSystemNumber textual convention of + the SMIv2."; + reference + "RFC 1930: Guidelines for creation, selection, and registration + of an Autonomous System (AS) + RFC 4271: A Border Gateway Protocol 4 (BGP-4) + RFC 4001: Textual Conventions for Internet Network Addresses + RFC 6793: BGP Support for Four-Octet Autonomous System (AS) + Number Space"; + } + + /*** collection of types related to IP addresses and hostnames ***/ + + typedef ip-address { + type union { + type inet:ipv4-address; + type inet:ipv6-address; + } + description + "The ip-address type represents an IP address and is IP + version neutral. The format of the textual representation + implies the IP version. This type supports scoped addresses + by allowing zone identifiers in the address format."; + reference + "RFC 4007: IPv6 Scoped Address Architecture"; + } + + typedef ipv4-address { + type string { + pattern + '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}' + + '([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])' + + '(%[\p{N}\p{L}]+)?'; + } + description + "The ipv4-address type represents an IPv4 address in + dotted-quad notation. The IPv4 address may include a zone + index, separated by a % sign. + + The zone index is used to disambiguate identical address + values. For link-local addresses, the zone index will + typically be the interface index number or the name of an + interface. If the zone index is not present, the default + zone of the device will be used. + + The canonical format for the zone index is the numerical + format"; + } + + typedef ipv6-address { + type string { + pattern '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}' + + '((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|' + + '(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\.){3}' + + '(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))' + + '(%[\p{N}\p{L}]+)?'; + pattern '(([^:]+:){6}(([^:]+:[^:]+)|(.*\..*)))|' + + '((([^:]+:)*[^:]+)?::(([^:]+:)*[^:]+)?)' + + '(%.+)?'; + } + description + "The ipv6-address type represents an IPv6 address in full, + mixed, shortened, and shortened-mixed notation. The IPv6 + address may include a zone index, separated by a % sign. + + The zone index is used to disambiguate identical address + values. For link-local addresses, the zone index will + typically be the interface index number or the name of an + interface. If the zone index is not present, the default + zone of the device will be used. + + The canonical format of IPv6 addresses uses the textual + representation defined in Section 4 of RFC 5952. The + canonical format for the zone index is the numerical + format as described in Section 11.2 of RFC 4007."; + reference + "RFC 4291: IP Version 6 Addressing Architecture + RFC 4007: IPv6 Scoped Address Architecture + RFC 5952: A Recommendation for IPv6 Address Text + Representation"; + } + + typedef ip-address-no-zone { + type union { + type inet:ipv4-address-no-zone; + type inet:ipv6-address-no-zone; + } + description + "The ip-address-no-zone type represents an IP address and is + IP version neutral. The format of the textual representation + implies the IP version. This type does not support scoped + addresses since it does not allow zone identifiers in the + address format."; + reference + "RFC 4007: IPv6 Scoped Address Architecture"; + } + + typedef ipv4-address-no-zone { + type inet:ipv4-address { + pattern '[0-9\.]*'; + } + description + "An IPv4 address without a zone index. This type, derived from + ipv4-address, may be used in situations where the zone is + known from the context and hence no zone index is needed."; + } + + typedef ipv6-address-no-zone { + type inet:ipv6-address { + pattern '[0-9a-fA-F:\.]*'; + } + description + "An IPv6 address without a zone index. This type, derived from + ipv6-address, may be used in situations where the zone is + known from the context and hence no zone index is needed."; + reference + "RFC 4291: IP Version 6 Addressing Architecture + RFC 4007: IPv6 Scoped Address Architecture + RFC 5952: A Recommendation for IPv6 Address Text + Representation"; + } + + typedef ip-prefix { + type union { + type inet:ipv4-prefix; + type inet:ipv6-prefix; + } + description + "The ip-prefix type represents an IP prefix and is IP + version neutral. The format of the textual representations + implies the IP version."; + } + + typedef ipv4-prefix { + type string { + pattern + '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}' + + '([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])' + + '/(([0-9])|([1-2][0-9])|(3[0-2]))'; + } + description + "The ipv4-prefix type represents an IPv4 address prefix. + The prefix length is given by the number following the + slash character and must be less than or equal to 32. + + A prefix length value of n corresponds to an IP address + mask that has n contiguous 1-bits from the most + significant bit (MSB) and all other bits set to 0. + + The canonical format of an IPv4 prefix has all bits of + the IPv4 address set to zero that are not part of the + IPv4 prefix."; + } + + typedef ipv6-prefix { + type string { + pattern '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}' + + '((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|' + + '(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\.){3}' + + '(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))' + + '(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'; + pattern '(([^:]+:){6}(([^:]+:[^:]+)|(.*\..*)))|' + + '((([^:]+:)*[^:]+)?::(([^:]+:)*[^:]+)?)' + + '(/.+)'; + } + + description + "The ipv6-prefix type represents an IPv6 address prefix. + The prefix length is given by the number following the + slash character and must be less than or equal to 128. + + A prefix length value of n corresponds to an IP address + mask that has n contiguous 1-bits from the most + significant bit (MSB) and all other bits set to 0. + + The IPv6 address should have all bits that do not belong + to the prefix set to zero. + + The canonical format of an IPv6 prefix has all bits of + the IPv6 address set to zero that are not part of the + IPv6 prefix. Furthermore, the IPv6 address is represented + as defined in Section 4 of RFC 5952."; + reference + "RFC 5952: A Recommendation for IPv6 Address Text + Representation"; + } + + /*** collection of domain name and URI types ***/ + + typedef domain-name { + type string { + pattern + '((([a-zA-Z0-9_]([a-zA-Z0-9\-_]){0,61})?[a-zA-Z0-9]\.)*' + + '([a-zA-Z0-9_]([a-zA-Z0-9\-_]){0,61})?[a-zA-Z0-9]\.?)' + + '|\.'; + length "1..253"; + } + description + "The domain-name type represents a DNS domain name. The + name SHOULD be fully qualified whenever possible. + + Internet domain names are only loosely specified. Section + 3.5 of RFC 1034 recommends a syntax (modified in Section + 2.1 of RFC 1123). The pattern above is intended to allow + for current practice in domain name use, and some possible + future expansion. It is designed to hold various types of + domain names, including names used for A or AAAA records + (host names) and other records, such as SRV records. Note + that Internet host names have a stricter syntax (described + in RFC 952) than the DNS recommendations in RFCs 1034 and + 1123, and that systems that want to store host names in + schema nodes using the domain-name type are recommended to + adhere to this stricter standard to ensure interoperability. + + The encoding of DNS names in the DNS protocol is limited + to 255 characters. Since the encoding consists of labels + prefixed by a length bytes and there is a trailing NULL + byte, only 253 characters can appear in the textual dotted + notation. + + The description clause of schema nodes using the domain-name + type MUST describe when and how these names are resolved to + IP addresses. Note that the resolution of a domain-name value + may require to query multiple DNS records (e.g., A for IPv4 + and AAAA for IPv6). The order of the resolution process and + which DNS record takes precedence can either be defined + explicitly or may depend on the configuration of the + resolver. + + Domain-name values use the US-ASCII encoding. Their canonical + format uses lowercase US-ASCII characters. Internationalized + domain names MUST be A-labels as per RFC 5890."; + reference + "RFC 952: DoD Internet Host Table Specification + RFC 1034: Domain Names - Concepts and Facilities + RFC 1123: Requirements for Internet Hosts -- Application + and Support + RFC 2782: A DNS RR for specifying the location of services + (DNS SRV) + RFC 5890: Internationalized Domain Names in Applications + (IDNA): Definitions and Document Framework"; + } + + typedef host { + type union { + type inet:ip-address; + type inet:domain-name; + } + description + "The host type represents either an IP address or a DNS + domain name."; + } + + typedef uri { + type string; + description + "The uri type represents a Uniform Resource Identifier + (URI) as defined by STD 66. + + Objects using the uri type MUST be in US-ASCII encoding, + and MUST be normalized as described by RFC 3986 Sections + 6.2.1, 6.2.2.1, and 6.2.2.2. All unnecessary + percent-encoding is removed, and all case-insensitive + characters are set to lowercase except for hexadecimal + digits, which are normalized to uppercase as described in + Section 6.2.2.1. + + The purpose of this normalization is to help provide + unique URIs. Note that this normalization is not + sufficient to provide uniqueness. Two URIs that are + textually distinct after this normalization may still be + equivalent. + + Objects using the uri type may restrict the schemes that + they permit. For example, 'data:' and 'urn:' schemes + might not be appropriate. + + A zero-length URI is not a valid URI. This can be used to + express 'URI absent' where required. + + In the value set and its semantics, this type is equivalent + to the Uri SMIv2 textual convention defined in RFC 5017."; + reference + "RFC 3986: Uniform Resource Identifier (URI): Generic Syntax + RFC 3305: Report from the Joint W3C/IETF URI Planning Interest + Group: Uniform Resource Identifiers (URIs), URLs, + and Uniform Resource Names (URNs): Clarifications + and Recommendations + RFC 5017: MIB Textual Conventions for Uniform Resource + Identifiers (URIs)"; + } + +} \ No newline at end of file diff --git a/src/tests/tools/mock_nce_fan_ctrl/yang/rfc6991/ietf-yang-types.yang b/src/tests/tools/mock_nce_fan_ctrl/yang/rfc6991/ietf-yang-types.yang new file mode 100644 index 000000000..f6624fed8 --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/yang/rfc6991/ietf-yang-types.yang @@ -0,0 +1,474 @@ +module ietf-yang-types { + + namespace "urn:ietf:params:xml:ns:yang:ietf-yang-types"; + prefix "yang"; + + organization + "IETF NETMOD (NETCONF Data Modeling Language) Working Group"; + + contact + "WG Web: + WG List: + + WG Chair: David Kessens + + + WG Chair: Juergen Schoenwaelder + + + Editor: Juergen Schoenwaelder + "; + + description + "This module contains a collection of generally useful derived + YANG data types. + + Copyright (c) 2013 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (http://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC 6991; see + the RFC itself for full legal notices."; + + revision 2013-07-15 { + description + "This revision adds the following new data types: + - yang-identifier + - hex-string + - uuid + - dotted-quad"; + reference + "RFC 6991: Common YANG Data Types"; + } + + revision 2010-09-24 { + description + "Initial revision."; + reference + "RFC 6021: Common YANG Data Types"; + } + + /*** collection of counter and gauge types ***/ + + typedef counter32 { + type uint32; + description + "The counter32 type represents a non-negative integer + that monotonically increases until it reaches a + maximum value of 2^32-1 (4294967295 decimal), when it + wraps around and starts increasing again from zero. + + Counters have no defined 'initial' value, and thus, a + single value of a counter has (in general) no information + content. Discontinuities in the monotonically increasing + value normally occur at re-initialization of the + management system, and at other times as specified in the + description of a schema node using this type. If such + other times can occur, for example, the creation of + a schema node of type counter32 at times other than + re-initialization, then a corresponding schema node + should be defined, with an appropriate type, to indicate + the last discontinuity. + + The counter32 type should not be used for configuration + schema nodes. A default statement SHOULD NOT be used in + combination with the type counter32. + + In the value set and its semantics, this type is equivalent + to the Counter32 type of the SMIv2."; + reference + "RFC 2578: Structure of Management Information Version 2 + (SMIv2)"; + } + + typedef zero-based-counter32 { + type yang:counter32; + default "0"; + description + "The zero-based-counter32 type represents a counter32 + that has the defined 'initial' value zero. + + A schema node of this type will be set to zero (0) on creation + and will thereafter increase monotonically until it reaches + a maximum value of 2^32-1 (4294967295 decimal), when it + wraps around and starts increasing again from zero. + + Provided that an application discovers a new schema node + of this type within the minimum time to wrap, it can use the + 'initial' value as a delta. It is important for a management + station to be aware of this minimum time and the actual time + between polls, and to discard data if the actual time is too + long or there is no defined minimum time. + + In the value set and its semantics, this type is equivalent + to the ZeroBasedCounter32 textual convention of the SMIv2."; + reference + "RFC 4502: Remote Network Monitoring Management Information + Base Version 2"; + } + + typedef counter64 { + type uint64; + description + "The counter64 type represents a non-negative integer + that monotonically increases until it reaches a + maximum value of 2^64-1 (18446744073709551615 decimal), + when it wraps around and starts increasing again from zero. + + Counters have no defined 'initial' value, and thus, a + single value of a counter has (in general) no information + content. Discontinuities in the monotonically increasing + value normally occur at re-initialization of the + management system, and at other times as specified in the + description of a schema node using this type. If such + other times can occur, for example, the creation of + a schema node of type counter64 at times other than + re-initialization, then a corresponding schema node + should be defined, with an appropriate type, to indicate + the last discontinuity. + + The counter64 type should not be used for configuration + schema nodes. A default statement SHOULD NOT be used in + combination with the type counter64. + + In the value set and its semantics, this type is equivalent + to the Counter64 type of the SMIv2."; + reference + "RFC 2578: Structure of Management Information Version 2 + (SMIv2)"; + } + + typedef zero-based-counter64 { + type yang:counter64; + default "0"; + description + "The zero-based-counter64 type represents a counter64 that + has the defined 'initial' value zero. + + A schema node of this type will be set to zero (0) on creation + and will thereafter increase monotonically until it reaches + a maximum value of 2^64-1 (18446744073709551615 decimal), + when it wraps around and starts increasing again from zero. + + Provided that an application discovers a new schema node + of this type within the minimum time to wrap, it can use the + 'initial' value as a delta. It is important for a management + station to be aware of this minimum time and the actual time + between polls, and to discard data if the actual time is too + long or there is no defined minimum time. + + In the value set and its semantics, this type is equivalent + to the ZeroBasedCounter64 textual convention of the SMIv2."; + reference + "RFC 2856: Textual Conventions for Additional High Capacity + Data Types"; + } + + typedef gauge32 { + type uint32; + description + "The gauge32 type represents a non-negative integer, which + may increase or decrease, but shall never exceed a maximum + value, nor fall below a minimum value. The maximum value + cannot be greater than 2^32-1 (4294967295 decimal), and + the minimum value cannot be smaller than 0. The value of + a gauge32 has its maximum value whenever the information + being modeled is greater than or equal to its maximum + value, and has its minimum value whenever the information + being modeled is smaller than or equal to its minimum value. + If the information being modeled subsequently decreases + below (increases above) the maximum (minimum) value, the + gauge32 also decreases (increases). + + In the value set and its semantics, this type is equivalent + to the Gauge32 type of the SMIv2."; + reference + "RFC 2578: Structure of Management Information Version 2 + (SMIv2)"; + } + + typedef gauge64 { + type uint64; + description + "The gauge64 type represents a non-negative integer, which + may increase or decrease, but shall never exceed a maximum + value, nor fall below a minimum value. The maximum value + cannot be greater than 2^64-1 (18446744073709551615), and + the minimum value cannot be smaller than 0. The value of + a gauge64 has its maximum value whenever the information + being modeled is greater than or equal to its maximum + value, and has its minimum value whenever the information + being modeled is smaller than or equal to its minimum value. + If the information being modeled subsequently decreases + below (increases above) the maximum (minimum) value, the + gauge64 also decreases (increases). + + In the value set and its semantics, this type is equivalent + to the CounterBasedGauge64 SMIv2 textual convention defined + in RFC 2856"; + reference + "RFC 2856: Textual Conventions for Additional High Capacity + Data Types"; + } + + /*** collection of identifier-related types ***/ + + typedef object-identifier { + type string { + pattern '(([0-1](\.[1-3]?[0-9]))|(2\.(0|([1-9]\d*))))' + + '(\.(0|([1-9]\d*)))*'; + } + description + "The object-identifier type represents administratively + assigned names in a registration-hierarchical-name tree. + + Values of this type are denoted as a sequence of numerical + non-negative sub-identifier values. Each sub-identifier + value MUST NOT exceed 2^32-1 (4294967295). Sub-identifiers + are separated by single dots and without any intermediate + whitespace. + + The ASN.1 standard restricts the value space of the first + sub-identifier to 0, 1, or 2. Furthermore, the value space + of the second sub-identifier is restricted to the range + 0 to 39 if the first sub-identifier is 0 or 1. Finally, + the ASN.1 standard requires that an object identifier + has always at least two sub-identifiers. The pattern + captures these restrictions. + + Although the number of sub-identifiers is not limited, + module designers should realize that there may be + implementations that stick with the SMIv2 limit of 128 + sub-identifiers. + + This type is a superset of the SMIv2 OBJECT IDENTIFIER type + since it is not restricted to 128 sub-identifiers. Hence, + this type SHOULD NOT be used to represent the SMIv2 OBJECT + IDENTIFIER type; the object-identifier-128 type SHOULD be + used instead."; + reference + "ISO9834-1: Information technology -- Open Systems + Interconnection -- Procedures for the operation of OSI + Registration Authorities: General procedures and top + arcs of the ASN.1 Object Identifier tree"; + } + + typedef object-identifier-128 { + type object-identifier { + pattern '\d*(\.\d*){1,127}'; + } + description + "This type represents object-identifiers restricted to 128 + sub-identifiers. + + In the value set and its semantics, this type is equivalent + to the OBJECT IDENTIFIER type of the SMIv2."; + reference + "RFC 2578: Structure of Management Information Version 2 + (SMIv2)"; + } + + typedef yang-identifier { + type string { + length "1..max"; + pattern '[a-zA-Z_][a-zA-Z0-9\-_.]*'; + pattern '.|..|[^xX].*|.[^mM].*|..[^lL].*'; + } + description + "A YANG identifier string as defined by the 'identifier' + rule in Section 12 of RFC 6020. An identifier must + start with an alphabetic character or an underscore + followed by an arbitrary sequence of alphabetic or + numeric characters, underscores, hyphens, or dots. + + A YANG identifier MUST NOT start with any possible + combination of the lowercase or uppercase character + sequence 'xml'."; + reference + "RFC 6020: YANG - A Data Modeling Language for the Network + Configuration Protocol (NETCONF)"; + } + + /*** collection of types related to date and time***/ + + typedef date-and-time { + type string { + pattern '\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d+)?' + + '(Z|[\+\-]\d{2}:\d{2})'; + } + description + "The date-and-time type is a profile of the ISO 8601 + standard for representation of dates and times using the + Gregorian calendar. The profile is defined by the + date-time production in Section 5.6 of RFC 3339. + + The date-and-time type is compatible with the dateTime XML + schema type with the following notable exceptions: + + (a) The date-and-time type does not allow negative years. + + (b) The date-and-time time-offset -00:00 indicates an unknown + time zone (see RFC 3339) while -00:00 and +00:00 and Z + all represent the same time zone in dateTime. + + (c) The canonical format (see below) of data-and-time values + differs from the canonical format used by the dateTime XML + schema type, which requires all times to be in UTC using + the time-offset 'Z'. + + This type is not equivalent to the DateAndTime textual + convention of the SMIv2 since RFC 3339 uses a different + separator between full-date and full-time and provides + higher resolution of time-secfrac. + + The canonical format for date-and-time values with a known time + zone uses a numeric time zone offset that is calculated using + the device's configured known offset to UTC time. A change of + the device's offset to UTC time will cause date-and-time values + to change accordingly. Such changes might happen periodically + in case a server follows automatically daylight saving time + (DST) time zone offset changes. The canonical format for + date-and-time values with an unknown time zone (usually + referring to the notion of local time) uses the time-offset + -00:00."; + reference + "RFC 3339: Date and Time on the Internet: Timestamps + RFC 2579: Textual Conventions for SMIv2 + XSD-TYPES: XML Schema Part 2: Datatypes Second Edition"; + } + + typedef timeticks { + type uint32; + description + "The timeticks type represents a non-negative integer that + represents the time, modulo 2^32 (4294967296 decimal), in + hundredths of a second between two epochs. When a schema + node is defined that uses this type, the description of + the schema node identifies both of the reference epochs. + + In the value set and its semantics, this type is equivalent + to the TimeTicks type of the SMIv2."; + reference + "RFC 2578: Structure of Management Information Version 2 + (SMIv2)"; + } + + typedef timestamp { + type yang:timeticks; + description + "The timestamp type represents the value of an associated + timeticks schema node at which a specific occurrence + happened. The specific occurrence must be defined in the + description of any schema node defined using this type. When + the specific occurrence occurred prior to the last time the + associated timeticks attribute was zero, then the timestamp + value is zero. Note that this requires all timestamp values + to be reset to zero when the value of the associated timeticks + attribute reaches 497+ days and wraps around to zero. + + The associated timeticks schema node must be specified + in the description of any schema node using this type. + + In the value set and its semantics, this type is equivalent + to the TimeStamp textual convention of the SMIv2."; + reference + "RFC 2579: Textual Conventions for SMIv2"; + } + + /*** collection of generic address types ***/ + + typedef phys-address { + type string { + pattern '([0-9a-fA-F]{2}(:[0-9a-fA-F]{2})*)?'; + } + + description + "Represents media- or physical-level addresses represented + as a sequence octets, each octet represented by two hexadecimal + numbers. Octets are separated by colons. The canonical + representation uses lowercase characters. + + In the value set and its semantics, this type is equivalent + to the PhysAddress textual convention of the SMIv2."; + reference + "RFC 2579: Textual Conventions for SMIv2"; + } + + typedef mac-address { + type string { + pattern '[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'; + } + description + "The mac-address type represents an IEEE 802 MAC address. + The canonical representation uses lowercase characters. + + In the value set and its semantics, this type is equivalent + to the MacAddress textual convention of the SMIv2."; + reference + "IEEE 802: IEEE Standard for Local and Metropolitan Area + Networks: Overview and Architecture + RFC 2579: Textual Conventions for SMIv2"; + } + + /*** collection of XML-specific types ***/ + + typedef xpath1.0 { + type string; + description + "This type represents an XPATH 1.0 expression. + + When a schema node is defined that uses this type, the + description of the schema node MUST specify the XPath + context in which the XPath expression is evaluated."; + reference + "XPATH: XML Path Language (XPath) Version 1.0"; + } + + /*** collection of string types ***/ + + typedef hex-string { + type string { + pattern '([0-9a-fA-F]{2}(:[0-9a-fA-F]{2})*)?'; + } + description + "A hexadecimal string with octets represented as hex digits + separated by colons. The canonical representation uses + lowercase characters."; + } + + typedef uuid { + type string { + pattern '[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-' + + '[0-9a-fA-F]{4}-[0-9a-fA-F]{12}'; + } + description + "A Universally Unique IDentifier in the string representation + defined in RFC 4122. The canonical representation uses + lowercase characters. + + The following is an example of a UUID in string representation: + f81d4fae-7dec-11d0-a765-00a0c91e6bf6 + "; + reference + "RFC 4122: A Universally Unique IDentifier (UUID) URN + Namespace"; + } + + typedef dotted-quad { + type string { + pattern + '(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}' + + '([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])'; + } + description + "An unsigned 32-bit number expressed in the dotted-quad + notation, i.e., four octets written as decimal numbers + and separated with the '.' (full stop) character."; + } +} \ No newline at end of file diff --git a/src/tests/tools/mock_nce_fan_ctrl/yang/rfc8294/iana-routing-types.yang b/src/tests/tools/mock_nce_fan_ctrl/yang/rfc8294/iana-routing-types.yang new file mode 100644 index 000000000..e57ebd239 --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/yang/rfc8294/iana-routing-types.yang @@ -0,0 +1,471 @@ +module iana-routing-types { + namespace "urn:ietf:params:xml:ns:yang:iana-routing-types"; + prefix iana-rt-types; + + organization + "IANA"; + contact + "Internet Assigned Numbers Authority + + Postal: ICANN + 12025 Waterfront Drive, Suite 300 + Los Angeles, CA 90094-2536 + United States of America + Tel: +1 310 301 5800 + "; + + description + "This module contains a collection of YANG data types + considered defined by IANA and used for routing + protocols. + + Copyright (c) 2017 IETF Trust and the persons + identified as authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC 8294; see + the RFC itself for full legal notices."; + + revision 2017-12-04 { + description "Initial revision."; + reference + "RFC 8294: Common YANG Data Types for the Routing Area. + Section 4."; + } + + /*** Collection of IANA types related to routing ***/ + /*** IANA Address Family enumeration ***/ + + typedef address-family { + type enumeration { + enum ipv4 { + value 1; + description + "IPv4 Address Family."; + } + + enum ipv6 { + value 2; + description + "IPv6 Address Family."; + } + + enum nsap { + value 3; + description + "OSI Network Service Access Point (NSAP) Address Family."; + } + + enum hdlc { + value 4; + description + "High-Level Data Link Control (HDLC) Address Family."; + } + + enum bbn1822 { + value 5; + description + "Bolt, Beranek, and Newman Report 1822 (BBN 1822) + Address Family."; + } + + enum ieee802 { + value 6; + description + "IEEE 802 Committee Address Family + (aka Media Access Control (MAC) address)."; + } + + enum e163 { + value 7; + description + "ITU-T E.163 Address Family."; + } + enum e164 { + value 8; + description + "ITU-T E.164 (Switched Multimegabit Data Service (SMDS), + Frame Relay, ATM) Address Family."; + } + + enum f69 { + value 9; + description + "ITU-T F.69 (Telex) Address Family."; + } + + enum x121 { + value 10; + description + "ITU-T X.121 (X.25, Frame Relay) Address Family."; + } + + enum ipx { + value 11; + description + "Novell Internetwork Packet Exchange (IPX) + Address Family."; + } + + enum appletalk { + value 12; + description + "Apple AppleTalk Address Family."; + } + + enum decnet-iv { + value 13; + description + "Digital Equipment DECnet Phase IV Address Family."; + } + + enum vines { + value 14; + description + "Banyan Vines Address Family."; + } + + enum e164-nsap { + value 15; + description + "ITU-T E.164 with NSAP sub-address Address Family."; + } + + enum dns { + value 16; + description + "Domain Name System (DNS) Address Family."; + } + + enum distinguished-name { + value 17; + description + "Distinguished Name Address Family."; + } + + enum as-num { + value 18; + description + "Autonomous System (AS) Number Address Family."; + } + + enum xtp-v4 { + value 19; + description + "Xpress Transport Protocol (XTP) over IPv4 + Address Family."; + } + + enum xtp-v6 { + value 20; + description + "XTP over IPv6 Address Family."; + } + + enum xtp-native { + value 21; + description + "XTP native mode Address Family."; + } + + enum fc-port { + value 22; + description + "Fibre Channel (FC) World-Wide Port Name Address Family."; + } + enum fc-node { + value 23; + description + "FC World-Wide Node Name Address Family."; + } + + enum gwid { + value 24; + description + "ATM Gateway Identifier (GWID) Number Address Family."; + } + + enum l2vpn { + value 25; + description + "Layer 2 VPN (L2VPN) Address Family."; + } + + enum mpls-tp-section-eid { + value 26; + description + "MPLS Transport Profile (MPLS-TP) Section Endpoint + Identifier Address Family."; + } + + enum mpls-tp-lsp-eid { + value 27; + description + "MPLS-TP Label Switched Path (LSP) Endpoint Identifier + Address Family."; + } + + enum mpls-tp-pwe-eid { + value 28; + description + "MPLS-TP Pseudowire Endpoint Identifier Address Family."; + } + + enum mt-v4 { + value 29; + description + "Multi-Topology IPv4 Address Family."; + } + + enum mt-v6 { + value 30; + description + "Multi-Topology IPv6 Address Family."; + } + + enum eigrp-common-sf { + value 16384; + description + "Enhanced Interior Gateway Routing Protocol (EIGRP) + Common Service Family Address Family."; + } + + enum eigrp-v4-sf { + value 16385; + description + "EIGRP IPv4 Service Family Address Family."; + } + + enum eigrp-v6-sf { + value 16386; + description + "EIGRP IPv6 Service Family Address Family."; + } + + enum lcaf { + value 16387; + description + "Locator/ID Separation Protocol (LISP) + Canonical Address Format (LCAF) Address Family."; + } + + enum bgp-ls { + value 16388; + description + "Border Gateway Protocol - Link State (BGP-LS) + Address Family."; + } + + enum mac-48 { + value 16389; + description + "IEEE 48-bit MAC Address Family."; + } + + enum mac-64 { + value 16390; + description + "IEEE 64-bit MAC Address Family."; + } + + enum trill-oui { + value 16391; + description + "Transparent Interconnection of Lots of Links (TRILL) + IEEE Organizationally Unique Identifier (OUI) + Address Family."; + } + + enum trill-mac-24 { + value 16392; + description + "TRILL final 3 octets of 48-bit MAC Address Family."; + } + + enum trill-mac-40 { + value 16393; + description + "TRILL final 5 octets of 64-bit MAC Address Family."; + } + + enum ipv6-64 { + value 16394; + description + "First 8 octets (64 bits) of IPv6 address + Address Family."; + } + + enum trill-rbridge-port-id { + value 16395; + description + "TRILL Routing Bridge (RBridge) Port ID Address Family."; + } + + enum trill-nickname { + value 16396; + description + "TRILL Nickname Address Family."; + } + } + + description + "Enumeration containing all the IANA-defined + Address Families."; + + } + + /*** Subsequent Address Family Identifiers (SAFIs) ***/ + /*** for multiprotocol BGP enumeration ***/ + + typedef bgp-safi { + type enumeration { + enum unicast-safi { + value 1; + description + "Unicast SAFI."; + } + + enum multicast-safi { + value 2; + description + "Multicast SAFI."; + } + + enum labeled-unicast-safi { + value 4; + description + "Labeled Unicast SAFI."; + } + + enum multicast-vpn-safi { + value 5; + description + "Multicast VPN SAFI."; + } + + enum pseudowire-safi { + value 6; + description + "Multi-segment Pseudowire VPN SAFI."; + } + + enum tunnel-encap-safi { + value 7; + description + "Tunnel Encap SAFI."; + } + + enum mcast-vpls-safi { + value 8; + description + "Multicast Virtual Private LAN Service (VPLS) SAFI."; + } + + enum tunnel-safi { + value 64; + description + "Tunnel SAFI."; + } + + enum vpls-safi { + value 65; + description + "VPLS SAFI."; + } + + enum mdt-safi { + value 66; + description + "Multicast Distribution Tree (MDT) SAFI."; + } + + enum v4-over-v6-safi { + value 67; + description + "IPv4 over IPv6 SAFI."; + } + + enum v6-over-v4-safi { + value 68; + description + "IPv6 over IPv4 SAFI."; + } + + enum l1-vpn-auto-discovery-safi { + value 69; + description + "Layer 1 VPN Auto-Discovery SAFI."; + } + + enum evpn-safi { + value 70; + description + "Ethernet VPN (EVPN) SAFI."; + } + + enum bgp-ls-safi { + value 71; + description + "BGP-LS SAFI."; + } + + enum bgp-ls-vpn-safi { + value 72; + description + "BGP-LS VPN SAFI."; + } + + enum sr-te-safi { + value 73; + description + "Segment Routing - Traffic Engineering (SR-TE) SAFI."; + } + + enum labeled-vpn-safi { + value 128; + description + "MPLS Labeled VPN SAFI."; + } + + enum multicast-mpls-vpn-safi { + value 129; + description + "Multicast for BGP/MPLS IP VPN SAFI."; + } + + enum route-target-safi { + value 132; + description + "Route Target SAFI."; + } + + enum ipv4-flow-spec-safi { + value 133; + description + "IPv4 Flow Specification SAFI."; + } + + enum vpnv4-flow-spec-safi { + value 134; + description + "IPv4 VPN Flow Specification SAFI."; + } + + enum vpn-auto-discovery-safi { + value 140; + description + "VPN Auto-Discovery SAFI."; + } + } + description + "Enumeration for BGP SAFI."; + reference + "RFC 4760: Multiprotocol Extensions for BGP-4."; + } +} \ No newline at end of file diff --git a/src/tests/tools/mock_nce_fan_ctrl/yang/rfc8294/ietf-routing-types.yang b/src/tests/tools/mock_nce_fan_ctrl/yang/rfc8294/ietf-routing-types.yang new file mode 100644 index 000000000..65c83bc84 --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/yang/rfc8294/ietf-routing-types.yang @@ -0,0 +1,771 @@ +module ietf-routing-types { + namespace "urn:ietf:params:xml:ns:yang:ietf-routing-types"; + prefix rt-types; + + import ietf-yang-types { + prefix yang; + } + import ietf-inet-types { + prefix inet; + } + + organization + "IETF RTGWG - Routing Area Working Group"; + contact + "WG Web: + WG List: + + Editors: Xufeng Liu + + Yingzhen Qu + + Acee Lindem + + Christian Hopps + + Lou Berger + "; + + description + "This module contains a collection of YANG data types + considered generally useful for routing protocols. + + Copyright (c) 2017 IETF Trust and the persons + identified as authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC 8294; see + the RFC itself for full legal notices."; + revision 2017-12-04 { + description "Initial revision."; + reference + "RFC 8294: Common YANG Data Types for the Routing Area. + Section 3."; + } + + /*** Identities related to MPLS/GMPLS ***/ + + identity mpls-label-special-purpose-value { + description + "Base identity for deriving identities describing + special-purpose Multiprotocol Label Switching (MPLS) label + values."; + reference + "RFC 7274: Allocating and Retiring Special-Purpose MPLS + Labels."; + } + + identity ipv4-explicit-null-label { + base mpls-label-special-purpose-value; + description + "This identity represents the IPv4 Explicit NULL Label."; + reference + "RFC 3032: MPLS Label Stack Encoding. Section 2.1."; + } + + identity router-alert-label { + base mpls-label-special-purpose-value; + description + "This identity represents the Router Alert Label."; + reference + "RFC 3032: MPLS Label Stack Encoding. Section 2.1."; + } + + identity ipv6-explicit-null-label { + base mpls-label-special-purpose-value; + description + "This identity represents the IPv6 Explicit NULL Label."; + reference + "RFC 3032: MPLS Label Stack Encoding. Section 2.1."; + } + + identity implicit-null-label { + base mpls-label-special-purpose-value; + description + "This identity represents the Implicit NULL Label."; + reference + "RFC 3032: MPLS Label Stack Encoding. Section 2.1."; + } + + identity entropy-label-indicator { + base mpls-label-special-purpose-value; + description + "This identity represents the Entropy Label Indicator."; + reference + "RFC 6790: The Use of Entropy Labels in MPLS Forwarding. + Sections 3 and 10.1."; + } + + identity gal-label { + base mpls-label-special-purpose-value; + description + "This identity represents the Generic Associated Channel + (G-ACh) Label (GAL)."; + reference + "RFC 5586: MPLS Generic Associated Channel. + Sections 4 and 10."; + } + + identity oam-alert-label { + base mpls-label-special-purpose-value; + description + "This identity represents the OAM Alert Label."; + reference + "RFC 3429: Assignment of the 'OAM Alert Label' for + Multiprotocol Label Switching Architecture (MPLS) + Operation and Maintenance (OAM) Functions. + Sections 3 and 6."; + } + + identity extension-label { + base mpls-label-special-purpose-value; + description + "This identity represents the Extension Label."; + reference + "RFC 7274: Allocating and Retiring Special-Purpose MPLS + Labels. Sections 3.1 and 5."; + } + + /*** Collection of types related to routing ***/ + + typedef router-id { + type yang:dotted-quad; + description + "A 32-bit number in the dotted-quad format assigned to each + router. This number uniquely identifies the router within + an Autonomous System."; + } + + /*** Collection of types related to VPNs ***/ + + typedef route-target { + type string { + pattern + '(0:(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|' + + '6[0-4][0-9]{3}|' + + '[1-5][0-9]{4}|[1-9][0-9]{0,3}|0):(429496729[0-5]|' + + '42949672[0-8][0-9]|' + + '4294967[01][0-9]{2}|429496[0-6][0-9]{3}|' + + '42949[0-5][0-9]{4}|' + + '4294[0-8][0-9]{5}|429[0-3][0-9]{6}|' + + '42[0-8][0-9]{7}|4[01][0-9]{8}|' + + '[1-3][0-9]{9}|[1-9][0-9]{0,8}|0))|' + + '(1:((([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|' + + '25[0-5])\.){3}([0-9]|[1-9][0-9]|' + + '1[0-9]{2}|2[0-4][0-9]|25[0-5])):(6553[0-5]|' + + '655[0-2][0-9]|' + + '65[0-4][0-9]{2}|6[0-4][0-9]{3}|' + + '[1-5][0-9]{4}|[1-9][0-9]{0,3}|0))|' + + '(2:(429496729[0-5]|42949672[0-8][0-9]|' + + '4294967[01][0-9]{2}|' + + '429496[0-6][0-9]{3}|42949[0-5][0-9]{4}|' + + '4294[0-8][0-9]{5}|' + + '429[0-3][0-9]{6}|42[0-8][0-9]{7}|4[01][0-9]{8}|' + + '[1-3][0-9]{9}|[1-9][0-9]{0,8}|0):' + + '(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|' + + '6[0-4][0-9]{3}|' + + '[1-5][0-9]{4}|[1-9][0-9]{0,3}|0))|' + + '(6(:[a-fA-F0-9]{2}){6})|' + + '(([3-57-9a-fA-F]|[1-9a-fA-F][0-9a-fA-F]{1,3}):' + + '[0-9a-fA-F]{1,12})'; + } + + description + "A Route Target is an 8-octet BGP extended community + initially identifying a set of sites in a BGP VPN + (RFC 4364). However, it has since taken on a more general + role in BGP route filtering. A Route Target consists of two + or three fields: a 2-octet Type field, an administrator + field, and, optionally, an assigned number field. + + According to the data formats for types 0, 1, 2, and 6 as + defined in RFC 4360, RFC 5668, and RFC 7432, the encoding + pattern is defined as: + + 0:2-octet-asn:4-octet-number + 1:4-octet-ipv4addr:2-octet-number + 2:4-octet-asn:2-octet-number + 6:6-octet-mac-address + + Additionally, a generic pattern is defined for future + Route Target types: + + 2-octet-other-hex-number:6-octet-hex-number + + Some valid examples are 0:100:100, 1:1.1.1.1:100, + 2:1234567890:203, and 6:26:00:08:92:78:00."; + reference + "RFC 4360: BGP Extended Communities Attribute. + RFC 4364: BGP/MPLS IP Virtual Private Networks (VPNs). + RFC 5668: 4-Octet AS Specific BGP Extended Community. + RFC 7432: BGP MPLS-Based Ethernet VPN."; + } + + typedef ipv6-route-target { + type string { + pattern + '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}' + + '((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|' + + '(((25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])\.){3}' + + '(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])))' + + ':' + + '(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|' + + '6[0-4][0-9]{3}|' + + '[1-5][0-9]{4}|[1-9][0-9]{0,3}|0)'; + pattern '((([^:]+:){6}(([^:]+:[^:]+)|(.*\..*)))|' + + '((([^:]+:)*[^:]+)?::(([^:]+:)*[^:]+)?))' + + ':' + + '(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|' + + '6[0-4][0-9]{3}|' + + '[1-5][0-9]{4}|[1-9][0-9]{0,3}|0)'; + } + description + "An IPv6 Route Target is a 20-octet BGP IPv6 Address + Specific Extended Community serving the same function + as a standard 8-octet Route Target, except that it only + allows an IPv6 address as the global administrator. + The format is . + + Two valid examples are 2001:db8::1:6544 and + 2001:db8::5eb1:791:6b37:17958."; + reference + "RFC 5701: IPv6 Address Specific BGP Extended Community + Attribute."; + } + + typedef route-target-type { + type enumeration { + enum import { + value 0; + description + "The Route Target applies to route import."; + } + enum export { + value 1; + description + "The Route Target applies to route export."; + } + + enum both { + value 2; + description + "The Route Target applies to both route import and + route export."; + } + } + description + "Indicates the role a Route Target takes in route filtering."; + reference + "RFC 4364: BGP/MPLS IP Virtual Private Networks (VPNs)."; + } + + typedef route-distinguisher { + type string { + pattern + '(0:(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|' + + '6[0-4][0-9]{3}|' + + '[1-5][0-9]{4}|[1-9][0-9]{0,3}|0):(429496729[0-5]|' + + '42949672[0-8][0-9]|' + + '4294967[01][0-9]{2}|429496[0-6][0-9]{3}|' + + '42949[0-5][0-9]{4}|' + + '4294[0-8][0-9]{5}|429[0-3][0-9]{6}|' + + '42[0-8][0-9]{7}|4[01][0-9]{8}|' + + '[1-3][0-9]{9}|[1-9][0-9]{0,8}|0))|' + + '(1:((([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|' + + '25[0-5])\.){3}([0-9]|[1-9][0-9]|' + + '1[0-9]{2}|2[0-4][0-9]|25[0-5])):(6553[0-5]|' + + '655[0-2][0-9]|' + + '65[0-4][0-9]{2}|6[0-4][0-9]{3}|' + + '[1-5][0-9]{4}|[1-9][0-9]{0,3}|0))|' + + '(2:(429496729[0-5]|42949672[0-8][0-9]|' + + '4294967[01][0-9]{2}|' + + '429496[0-6][0-9]{3}|42949[0-5][0-9]{4}|' + + '4294[0-8][0-9]{5}|' + + '429[0-3][0-9]{6}|42[0-8][0-9]{7}|4[01][0-9]{8}|' + + '[1-3][0-9]{9}|[1-9][0-9]{0,8}|0):' + + '(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|' + + '6[0-4][0-9]{3}|' + + '[1-5][0-9]{4}|[1-9][0-9]{0,3}|0))|' + + '(6(:[a-fA-F0-9]{2}){6})|' + + '(([3-57-9a-fA-F]|[1-9a-fA-F][0-9a-fA-F]{1,3}):' + + '[0-9a-fA-F]{1,12})'; + } + + description + "A Route Distinguisher is an 8-octet value used to + distinguish routes from different BGP VPNs (RFC 4364). + A Route Distinguisher will have the same format as a + Route Target as per RFC 4360 and will consist of + two or three fields: a 2-octet Type field, an administrator + field, and, optionally, an assigned number field. + + According to the data formats for types 0, 1, 2, and 6 as + defined in RFC 4360, RFC 5668, and RFC 7432, the encoding + pattern is defined as: + + 0:2-octet-asn:4-octet-number + 1:4-octet-ipv4addr:2-octet-number + 2:4-octet-asn:2-octet-number + 6:6-octet-mac-address + + Additionally, a generic pattern is defined for future + route discriminator types: + + 2-octet-other-hex-number:6-octet-hex-number + + Some valid examples are 0:100:100, 1:1.1.1.1:100, + 2:1234567890:203, and 6:26:00:08:92:78:00."; + reference + "RFC 4360: BGP Extended Communities Attribute. + RFC 4364: BGP/MPLS IP Virtual Private Networks (VPNs). + RFC 5668: 4-Octet AS Specific BGP Extended Community. + RFC 7432: BGP MPLS-Based Ethernet VPN."; + } + + typedef route-origin { + type string { + pattern + '(0:(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|' + + '6[0-4][0-9]{3}|' + + '[1-5][0-9]{4}|[1-9][0-9]{0,3}|0):(429496729[0-5]|' + + '42949672[0-8][0-9]|' + + '4294967[01][0-9]{2}|429496[0-6][0-9]{3}|' + + '42949[0-5][0-9]{4}|' + + '4294[0-8][0-9]{5}|429[0-3][0-9]{6}|' + + '42[0-8][0-9]{7}|4[01][0-9]{8}|' + + '[1-3][0-9]{9}|[1-9][0-9]{0,8}|0))|' + + '(1:((([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|' + + '25[0-5])\.){3}([0-9]|[1-9][0-9]|' + + '1[0-9]{2}|2[0-4][0-9]|25[0-5])):(6553[0-5]|' + + '655[0-2][0-9]|' + + '65[0-4][0-9]{2}|6[0-4][0-9]{3}|' + + '[1-5][0-9]{4}|[1-9][0-9]{0,3}|0))|' + + '(2:(429496729[0-5]|42949672[0-8][0-9]|' + + '4294967[01][0-9]{2}|' + + '429496[0-6][0-9]{3}|42949[0-5][0-9]{4}|' + + '4294[0-8][0-9]{5}|' + + '429[0-3][0-9]{6}|42[0-8][0-9]{7}|4[01][0-9]{8}|' + + '[1-3][0-9]{9}|[1-9][0-9]{0,8}|0):' + + '(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|' + + '6[0-4][0-9]{3}|' + + '[1-5][0-9]{4}|[1-9][0-9]{0,3}|0))|' + + '(6(:[a-fA-F0-9]{2}){6})|' + + '(([3-57-9a-fA-F]|[1-9a-fA-F][0-9a-fA-F]{1,3}):' + + '[0-9a-fA-F]{1,12})'; + } + description + "A Route Origin is an 8-octet BGP extended community + identifying the set of sites where the BGP route + originated (RFC 4364). A Route Origin will have the same + format as a Route Target as per RFC 4360 and will consist + of two or three fields: a 2-octet Type field, an + administrator field, and, optionally, an assigned number + field. + + According to the data formats for types 0, 1, 2, and 6 as + defined in RFC 4360, RFC 5668, and RFC 7432, the encoding + pattern is defined as: + + 0:2-octet-asn:4-octet-number + 1:4-octet-ipv4addr:2-octet-number + 2:4-octet-asn:2-octet-number + 6:6-octet-mac-address + Additionally, a generic pattern is defined for future + Route Origin types: + + 2-octet-other-hex-number:6-octet-hex-number + + Some valid examples are 0:100:100, 1:1.1.1.1:100, + 2:1234567890:203, and 6:26:00:08:92:78:00."; + reference + "RFC 4360: BGP Extended Communities Attribute. + RFC 4364: BGP/MPLS IP Virtual Private Networks (VPNs). + RFC 5668: 4-Octet AS Specific BGP Extended Community. + RFC 7432: BGP MPLS-Based Ethernet VPN."; + } + + typedef ipv6-route-origin { + type string { + pattern + '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}' + + '((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|' + + '(((25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])\.){3}' + + '(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9]?[0-9])))' + + ':' + + '(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|' + + '6[0-4][0-9]{3}|' + + '[1-5][0-9]{4}|[1-9][0-9]{0,3}|0)'; + pattern '((([^:]+:){6}(([^:]+:[^:]+)|(.*\..*)))|' + + '((([^:]+:)*[^:]+)?::(([^:]+:)*[^:]+)?))' + + ':' + + '(6553[0-5]|655[0-2][0-9]|65[0-4][0-9]{2}|' + + '6[0-4][0-9]{3}|' + + '[1-5][0-9]{4}|[1-9][0-9]{0,3}|0)'; + } + description + "An IPv6 Route Origin is a 20-octet BGP IPv6 Address + Specific Extended Community serving the same function + as a standard 8-octet route, except that it only allows + an IPv6 address as the global administrator. The format + is . + + Two valid examples are 2001:db8::1:6544 and + 2001:db8::5eb1:791:6b37:17958."; + reference + "RFC 5701: IPv6 Address Specific BGP Extended Community + Attribute."; + } + + /*** Collection of types common to multicast ***/ + + typedef ipv4-multicast-group-address { + type inet:ipv4-address { + pattern '(2((2[4-9])|(3[0-9]))\.).*'; + } + description + "This type represents an IPv4 multicast group address, + which is in the range of 224.0.0.0 to 239.255.255.255."; + reference + "RFC 1112: Host Extensions for IP Multicasting."; + } + + typedef ipv6-multicast-group-address { + type inet:ipv6-address { + pattern '(([fF]{2}[0-9a-fA-F]{2}):).*'; + } + description + "This type represents an IPv6 multicast group address, + which is in the range of ff00::/8."; + reference + "RFC 4291: IP Version 6 Addressing Architecture. Section 2.7. + RFC 7346: IPv6 Multicast Address Scopes."; + } + + typedef ip-multicast-group-address { + type union { + type ipv4-multicast-group-address; + type ipv6-multicast-group-address; + } + description + "This type represents a version-neutral IP multicast group + address. The format of the textual representation implies + the IP version."; + } + + typedef ipv4-multicast-source-address { + type union { + type enumeration { + enum * { + description + "Any source address."; + } + } + type inet:ipv4-address; + } + description + "Multicast source IPv4 address type."; + } + + typedef ipv6-multicast-source-address { + type union { + type enumeration { + enum * { + description + "Any source address."; + } + } + type inet:ipv6-address; + } + description + "Multicast source IPv6 address type."; + } + + /*** Collection of types common to protocols ***/ + + typedef bandwidth-ieee-float32 { + type string { + pattern + '0[xX](0((\.0?)?[pP](\+)?0?|(\.0?))|' + + '1(\.([0-9a-fA-F]{0,5}[02468aAcCeE]?)?)?[pP](\+)?(12[0-7]|' + + '1[01][0-9]|0?[0-9]?[0-9])?)'; + } + description + "Bandwidth in IEEE 754 floating-point 32-bit binary format: + (-1)**(S) * 2**(Exponent-127) * (1 + Fraction), + where Exponent uses 8 bits and Fraction uses 23 bits. + The units are octets per second. + The encoding format is the external hexadecimal-significant + character sequences specified in IEEE 754 and ISO/IEC C99. + The format is restricted to be normalized, non-negative, and + non-fraction: 0x1.hhhhhhp{+}d, 0X1.HHHHHHP{+}D, or 0x0p0, + where 'h' and 'H' are hexadecimal digits and 'd' and 'D' are + integers in the range of [0..127]. + When six hexadecimal digits are used for 'hhhhhh' or + 'HHHHHH', the least significant digit must be an even + number. 'x' and 'X' indicate hexadecimal; 'p' and 'P' + indicate a power of two. Some examples are 0x0p0, 0x1p10, + and 0x1.abcde2p+20."; + reference + "IEEE Std 754-2008: IEEE Standard for Floating-Point + Arithmetic. + ISO/IEC C99: Information technology - Programming + Languages - C."; + } + + typedef link-access-type { + type enumeration { + enum broadcast { + description + "Specify broadcast multi-access network."; + } + enum non-broadcast-multiaccess { + description + "Specify Non-Broadcast Multi-Access (NBMA) network."; + } + enum point-to-multipoint { + description + "Specify point-to-multipoint network."; + } + enum point-to-point { + description + "Specify point-to-point network."; + } + } + description + "Link access type."; + } + + typedef timer-multiplier { + type uint8; + description + "The number of timer value intervals that should be + interpreted as a failure."; + } + + typedef timer-value-seconds16 { + type union { + type uint16 { + range "1..65535"; + } + type enumeration { + enum infinity { + description + "The timer is set to infinity."; + } + enum not-set { + description + "The timer is not set."; + } + } + } + units "seconds"; + description + "Timer value type, in seconds (16-bit range)."; + } + + typedef timer-value-seconds32 { + type union { + type uint32 { + range "1..4294967295"; + } + type enumeration { + enum infinity { + description + "The timer is set to infinity."; + } + enum not-set { + description + "The timer is not set."; + } + } + } + units "seconds"; + description + "Timer value type, in seconds (32-bit range)."; + } + + typedef timer-value-milliseconds { + type union { + type uint32 { + range "1..4294967295"; + } + type enumeration { + enum infinity { + description + "The timer is set to infinity."; + } + enum not-set { + description + "The timer is not set."; + } + } + } + units "milliseconds"; + description + "Timer value type, in milliseconds."; + } + + typedef percentage { + type uint8 { + range "0..100"; + } + description + "Integer indicating a percentage value."; + } + + typedef timeticks64 { + type uint64; + description + "This type is based on the timeticks type defined in + RFC 6991, but with 64-bit width. It represents the time, + modulo 2^64, in hundredths of a second between two epochs."; + reference + "RFC 6991: Common YANG Data Types."; + } + + typedef uint24 { + type uint32 { + range "0..16777215"; + } + description + "24-bit unsigned integer."; + } + + /*** Collection of types related to MPLS/GMPLS ***/ + + typedef generalized-label { + type binary; + description + "Generalized Label. Nodes sending and receiving the + Generalized Label are aware of the link-specific + label context and type."; + reference + "RFC 3471: Generalized Multi-Protocol Label Switching (GMPLS) + Signaling Functional Description. Section 3.2."; + } + + typedef mpls-label-special-purpose { + type identityref { + base mpls-label-special-purpose-value; + } + description + "This type represents the special-purpose MPLS label values."; + reference + "RFC 3032: MPLS Label Stack Encoding. + RFC 7274: Allocating and Retiring Special-Purpose MPLS + Labels."; + } + + typedef mpls-label-general-use { + type uint32 { + range "16..1048575"; + } + description + "The 20-bit label value in an MPLS label stack as specified + in RFC 3032. This label value does not include the + encodings of Traffic Class and TTL (Time to Live). + The label range specified by this type is for general use, + with special-purpose MPLS label values excluded."; + reference + "RFC 3032: MPLS Label Stack Encoding."; + } + + typedef mpls-label { + type union { + type mpls-label-special-purpose; + type mpls-label-general-use; + } + description + "The 20-bit label value in an MPLS label stack as specified + in RFC 3032. This label value does not include the + encodings of Traffic Class and TTL."; + reference + "RFC 3032: MPLS Label Stack Encoding."; + } + + /*** Groupings **/ + + grouping mpls-label-stack { + description + "This grouping specifies an MPLS label stack. The label + stack is encoded as a list of label stack entries. The + list key is an identifier that indicates the relative + ordering of each entry, with the lowest-value identifier + corresponding to the top of the label stack."; + container mpls-label-stack { + description + "Container for a list of MPLS label stack entries."; + list entry { + key "id"; + description + "List of MPLS label stack entries."; + leaf id { + type uint8; + description + "Identifies the entry in a sequence of MPLS label + stack entries. An entry with a smaller identifier + value precedes an entry with a larger identifier + value in the label stack. The value of this ID has + no semantic meaning other than relative ordering + and referencing the entry."; + } + leaf label { + type rt-types:mpls-label; + description + "Label value."; + } + + leaf ttl { + type uint8; + description + "Time to Live (TTL)."; + reference + "RFC 3032: MPLS Label Stack Encoding."; + } + leaf traffic-class { + type uint8 { + range "0..7"; + } + description + "Traffic Class (TC)."; + reference + "RFC 5462: Multiprotocol Label Switching (MPLS) Label + Stack Entry: 'EXP' Field Renamed to 'Traffic Class' + Field."; + } + } + } + } + + grouping vpn-route-targets { + description + "A grouping that specifies Route Target import-export rules + used in BGP-enabled VPNs."; + reference + "RFC 4364: BGP/MPLS IP Virtual Private Networks (VPNs). + RFC 4664: Framework for Layer 2 Virtual Private Networks + (L2VPNs)."; + list vpn-target { + key "route-target"; + description + "List of Route Targets."; + leaf route-target { + type rt-types:route-target; + description + "Route Target value."; + } + leaf route-target-type { + type rt-types:route-target-type; + mandatory true; + description + "Import/export type of the Route Target."; + } + } + } +} \ No newline at end of file diff --git a/src/tests/tools/mock_nce_fan_ctrl/yang/rfc8343/ietf-interfaces.yang b/src/tests/tools/mock_nce_fan_ctrl/yang/rfc8343/ietf-interfaces.yang new file mode 100644 index 000000000..96d416753 --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/yang/rfc8343/ietf-interfaces.yang @@ -0,0 +1,1123 @@ +module ietf-interfaces { + yang-version 1.1; + namespace "urn:ietf:params:xml:ns:yang:ietf-interfaces"; + prefix if; + + import ietf-yang-types { + prefix yang; + } + + organization + "IETF NETMOD (Network Modeling) Working Group"; + + contact + "WG Web: + WG List: + + Editor: Martin Bjorklund + "; + + description + "This module contains a collection of YANG definitions for + managing network interfaces. + + Copyright (c) 2018 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC 8343; see + the RFC itself for full legal notices."; + + revision 2018-02-20 { + description + "Updated to support NMDA."; + reference + "RFC 8343: A YANG Data Model for Interface Management"; + } + + revision 2014-05-08 { + description + "Initial revision."; + reference + "RFC 7223: A YANG Data Model for Interface Management"; + } + + /* + * Typedefs + */ + + typedef interface-ref { + type leafref { + path "/if:interfaces/if:interface/if:name"; + } + description + "This type is used by data models that need to reference + interfaces."; + } + + /* + * Identities + */ + + identity interface-type { + description + "Base identity from which specific interface types are + derived."; + } + + /* + * Features + */ + + feature arbitrary-names { + description + "This feature indicates that the device allows user-controlled + interfaces to be named arbitrarily."; + } + feature pre-provisioning { + description + "This feature indicates that the device supports + pre-provisioning of interface configuration, i.e., it is + possible to configure an interface whose physical interface + hardware is not present on the device."; + } + feature if-mib { + description + "This feature indicates that the device implements + the IF-MIB."; + reference + "RFC 2863: The Interfaces Group MIB"; + } + + /* + * Data nodes + */ + + container interfaces { + description + "Interface parameters."; + + list interface { + key "name"; + + description + "The list of interfaces on the device. + + The status of an interface is available in this list in the + operational state. If the configuration of a + system-controlled interface cannot be used by the system + (e.g., the interface hardware present does not match the + interface type), then the configuration is not applied to + the system-controlled interface shown in the operational + state. If the configuration of a user-controlled interface + cannot be used by the system, the configured interface is + not instantiated in the operational state. + + System-controlled interfaces created by the system are + always present in this list in the operational state, + whether or not they are configured."; + + leaf name { + type string; + description + "The name of the interface. + + A device MAY restrict the allowed values for this leaf, + possibly depending on the type of the interface. + For system-controlled interfaces, this leaf is the + device-specific name of the interface. + + If a client tries to create configuration for a + system-controlled interface that is not present in the + operational state, the server MAY reject the request if + the implementation does not support pre-provisioning of + interfaces or if the name refers to an interface that can + never exist in the system. A Network Configuration + Protocol (NETCONF) server MUST reply with an rpc-error + with the error-tag 'invalid-value' in this case. + + If the device supports pre-provisioning of interface + configuration, the 'pre-provisioning' feature is + advertised. + + If the device allows arbitrarily named user-controlled + interfaces, the 'arbitrary-names' feature is advertised. + + When a configured user-controlled interface is created by + the system, it is instantiated with the same name in the + operational state. + + A server implementation MAY map this leaf to the ifName + MIB object. Such an implementation needs to use some + mechanism to handle the differences in size and characters + allowed between this leaf and ifName. The definition of + such a mechanism is outside the scope of this document."; + reference + "RFC 2863: The Interfaces Group MIB - ifName"; + } + + leaf description { + type string; + description + "A textual description of the interface. + + A server implementation MAY map this leaf to the ifAlias + MIB object. Such an implementation needs to use some + mechanism to handle the differences in size and characters + allowed between this leaf and ifAlias. The definition of + such a mechanism is outside the scope of this document. + + Since ifAlias is defined to be stored in non-volatile + storage, the MIB implementation MUST map ifAlias to the + value of 'description' in the persistently stored + configuration."; + reference + "RFC 2863: The Interfaces Group MIB - ifAlias"; + } + + leaf type { + type identityref { + base interface-type; + } + mandatory true; + description + "The type of the interface. + + When an interface entry is created, a server MAY + initialize the type leaf with a valid value, e.g., if it + is possible to derive the type from the name of the + interface. + + If a client tries to set the type of an interface to a + value that can never be used by the system, e.g., if the + type is not supported or if the type does not match the + name of the interface, the server MUST reject the request. + A NETCONF server MUST reply with an rpc-error with the + error-tag 'invalid-value' in this case."; + reference + "RFC 2863: The Interfaces Group MIB - ifType"; + } + + leaf enabled { + type boolean; + default "true"; + description + "This leaf contains the configured, desired state of the + interface. + + Systems that implement the IF-MIB use the value of this + leaf in the intended configuration to set + IF-MIB.ifAdminStatus to 'up' or 'down' after an ifEntry + has been initialized, as described in RFC 2863. + + Changes in this leaf in the intended configuration are + reflected in ifAdminStatus."; + reference + "RFC 2863: The Interfaces Group MIB - ifAdminStatus"; + } + + leaf link-up-down-trap-enable { + if-feature if-mib; + type enumeration { + enum enabled { + value 1; + description + "The device will generate linkUp/linkDown SNMP + notifications for this interface."; + } + enum disabled { + value 2; + description + "The device will not generate linkUp/linkDown SNMP + notifications for this interface."; + } + } + description + "Controls whether linkUp/linkDown SNMP notifications + should be generated for this interface. + + If this node is not configured, the value 'enabled' is + operationally used by the server for interfaces that do + not operate on top of any other interface (i.e., there are + no 'lower-layer-if' entries), and 'disabled' otherwise."; + reference + "RFC 2863: The Interfaces Group MIB - + ifLinkUpDownTrapEnable"; + } + + leaf admin-status { + if-feature if-mib; + type enumeration { + enum up { + value 1; + description + "Ready to pass packets."; + } + enum down { + value 2; + description + "Not ready to pass packets and not in some test mode."; + } + enum testing { + value 3; + description + "In some test mode."; + } + } + config false; + mandatory true; + description + "The desired state of the interface. + + This leaf has the same read semantics as ifAdminStatus."; + reference + "RFC 2863: The Interfaces Group MIB - ifAdminStatus"; + } + + leaf oper-status { + type enumeration { + enum up { + value 1; + description + "Ready to pass packets."; + } + enum down { + value 2; + + description + "The interface does not pass any packets."; + } + enum testing { + value 3; + description + "In some test mode. No operational packets can + be passed."; + } + enum unknown { + value 4; + description + "Status cannot be determined for some reason."; + } + enum dormant { + value 5; + description + "Waiting for some external event."; + } + enum not-present { + value 6; + description + "Some component (typically hardware) is missing."; + } + enum lower-layer-down { + value 7; + description + "Down due to state of lower-layer interface(s)."; + } + } + config false; + mandatory true; + description + "The current operational state of the interface. + + This leaf has the same semantics as ifOperStatus."; + reference + "RFC 2863: The Interfaces Group MIB - ifOperStatus"; + } + + leaf last-change { + type yang:date-and-time; + config false; + description + "The time the interface entered its current operational + state. If the current state was entered prior to the + last re-initialization of the local network management + subsystem, then this node is not present."; + reference + "RFC 2863: The Interfaces Group MIB - ifLastChange"; + } + + leaf if-index { + if-feature if-mib; + type int32 { + range "1..2147483647"; + } + config false; + mandatory true; + description + "The ifIndex value for the ifEntry represented by this + interface."; + reference + "RFC 2863: The Interfaces Group MIB - ifIndex"; + } + + leaf phys-address { + type yang:phys-address; + config false; + description + "The interface's address at its protocol sub-layer. For + example, for an 802.x interface, this object normally + contains a Media Access Control (MAC) address. The + interface's media-specific modules must define the bit + and byte ordering and the format of the value of this + object. For interfaces that do not have such an address + (e.g., a serial line), this node is not present."; + reference + "RFC 2863: The Interfaces Group MIB - ifPhysAddress"; + } + + leaf-list higher-layer-if { + type interface-ref; + config false; + description + "A list of references to interfaces layered on top of this + interface."; + reference + "RFC 2863: The Interfaces Group MIB - ifStackTable"; + } + + leaf-list lower-layer-if { + type interface-ref; + config false; + + description + "A list of references to interfaces layered underneath this + interface."; + reference + "RFC 2863: The Interfaces Group MIB - ifStackTable"; + } + + leaf speed { + type yang:gauge64; + units "bits/second"; + config false; + description + "An estimate of the interface's current bandwidth in bits + per second. For interfaces that do not vary in + bandwidth or for those where no accurate estimation can + be made, this node should contain the nominal bandwidth. + For interfaces that have no concept of bandwidth, this + node is not present."; + reference + "RFC 2863: The Interfaces Group MIB - + ifSpeed, ifHighSpeed"; + } + + container statistics { + config false; + description + "A collection of interface-related statistics objects."; + + leaf discontinuity-time { + type yang:date-and-time; + mandatory true; + description + "The time on the most recent occasion at which any one or + more of this interface's counters suffered a + discontinuity. If no such discontinuities have occurred + since the last re-initialization of the local management + subsystem, then this node contains the time the local + management subsystem re-initialized itself."; + } + + leaf in-octets { + type yang:counter64; + description + "The total number of octets received on the interface, + including framing characters. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCInOctets"; + } + + leaf in-unicast-pkts { + type yang:counter64; + description + "The number of packets, delivered by this sub-layer to a + higher (sub-)layer, that were not addressed to a + multicast or broadcast address at this sub-layer. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCInUcastPkts"; + } + + leaf in-broadcast-pkts { + type yang:counter64; + description + "The number of packets, delivered by this sub-layer to a + higher (sub-)layer, that were addressed to a broadcast + address at this sub-layer. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCInBroadcastPkts"; + } + + leaf in-multicast-pkts { + type yang:counter64; + description + "The number of packets, delivered by this sub-layer to a + higher (sub-)layer, that were addressed to a multicast + address at this sub-layer. For a MAC-layer protocol, + this includes both Group and Functional addresses. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCInMulticastPkts"; + } + + leaf in-discards { + type yang:counter32; + description + "The number of inbound packets that were chosen to be + discarded even though no errors had been detected to + prevent their being deliverable to a higher-layer + protocol. One possible reason for discarding such a + packet could be to free up buffer space. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifInDiscards"; + } + + leaf in-errors { + type yang:counter32; + description + "For packet-oriented interfaces, the number of inbound + packets that contained errors preventing them from being + deliverable to a higher-layer protocol. For character- + oriented or fixed-length interfaces, the number of + inbound transmission units that contained errors + preventing them from being deliverable to a higher-layer + protocol. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifInErrors"; + } + + leaf in-unknown-protos { + type yang:counter32; + + description + "For packet-oriented interfaces, the number of packets + received via the interface that were discarded because + of an unknown or unsupported protocol. For + character-oriented or fixed-length interfaces that + support protocol multiplexing, the number of + transmission units received via the interface that were + discarded because of an unknown or unsupported protocol. + For any interface that does not support protocol + multiplexing, this counter is not present. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifInUnknownProtos"; + } + + leaf out-octets { + type yang:counter64; + description + "The total number of octets transmitted out of the + interface, including framing characters. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCOutOctets"; + } + + leaf out-unicast-pkts { + type yang:counter64; + description + "The total number of packets that higher-level protocols + requested be transmitted and that were not addressed + to a multicast or broadcast address at this sub-layer, + including those that were discarded or not sent. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCOutUcastPkts"; + } + + leaf out-broadcast-pkts { + type yang:counter64; + description + "The total number of packets that higher-level protocols + requested be transmitted and that were addressed to a + broadcast address at this sub-layer, including those + that were discarded or not sent. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCOutBroadcastPkts"; + } + + leaf out-multicast-pkts { + type yang:counter64; + description + "The total number of packets that higher-level protocols + requested be transmitted and that were addressed to a + multicast address at this sub-layer, including those + that were discarded or not sent. For a MAC-layer + protocol, this includes both Group and Functional + addresses. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCOutMulticastPkts"; + } + + leaf out-discards { + type yang:counter32; + description + "The number of outbound packets that were chosen to be + discarded even though no errors had been detected to + prevent their being transmitted. One possible reason + for discarding such a packet could be to free up buffer + space. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifOutDiscards"; + } + + leaf out-errors { + type yang:counter32; + description + "For packet-oriented interfaces, the number of outbound + packets that could not be transmitted because of errors. + For character-oriented or fixed-length interfaces, the + number of outbound transmission units that could not be + transmitted because of errors. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifOutErrors"; + } + } + + } + } + + /* + * Legacy typedefs + */ + + typedef interface-state-ref { + type leafref { + path "/if:interfaces-state/if:interface/if:name"; + } + status deprecated; + description + "This type is used by data models that need to reference + the operationally present interfaces."; + } + + /* + * Legacy operational state data nodes + */ + + container interfaces-state { + config false; + status deprecated; + description + "Data nodes for the operational state of interfaces."; + + list interface { + key "name"; + status deprecated; + + description + "The list of interfaces on the device. + + System-controlled interfaces created by the system are + always present in this list, whether or not they are + configured."; + + leaf name { + type string; + status deprecated; + description + "The name of the interface. + + A server implementation MAY map this leaf to the ifName + MIB object. Such an implementation needs to use some + mechanism to handle the differences in size and characters + allowed between this leaf and ifName. The definition of + such a mechanism is outside the scope of this document."; + reference + "RFC 2863: The Interfaces Group MIB - ifName"; + } + + leaf type { + type identityref { + base interface-type; + } + mandatory true; + status deprecated; + description + "The type of the interface."; + reference + "RFC 2863: The Interfaces Group MIB - ifType"; + } + + leaf admin-status { + if-feature if-mib; + type enumeration { + enum up { + value 1; + description + "Ready to pass packets."; + } + enum down { + value 2; + description + "Not ready to pass packets and not in some test mode."; + } + enum testing { + value 3; + description + "In some test mode."; + } + } + mandatory true; + status deprecated; + description + "The desired state of the interface. + + This leaf has the same read semantics as ifAdminStatus."; + reference + "RFC 2863: The Interfaces Group MIB - ifAdminStatus"; + } + + leaf oper-status { + type enumeration { + enum up { + value 1; + description + "Ready to pass packets."; + } + enum down { + value 2; + description + "The interface does not pass any packets."; + } + enum testing { + value 3; + description + "In some test mode. No operational packets can + be passed."; + } + enum unknown { + value 4; + description + "Status cannot be determined for some reason."; + } + enum dormant { + value 5; + description + "Waiting for some external event."; + } + enum not-present { + value 6; + description + "Some component (typically hardware) is missing."; + } + enum lower-layer-down { + value 7; + description + "Down due to state of lower-layer interface(s)."; + } + } + mandatory true; + status deprecated; + description + "The current operational state of the interface. + + This leaf has the same semantics as ifOperStatus."; + reference + "RFC 2863: The Interfaces Group MIB - ifOperStatus"; + } + + leaf last-change { + type yang:date-and-time; + status deprecated; + description + "The time the interface entered its current operational + state. If the current state was entered prior to the + last re-initialization of the local network management + subsystem, then this node is not present."; + reference + "RFC 2863: The Interfaces Group MIB - ifLastChange"; + } + + leaf if-index { + if-feature if-mib; + type int32 { + range "1..2147483647"; + } + mandatory true; + status deprecated; + description + "The ifIndex value for the ifEntry represented by this + interface."; + + reference + "RFC 2863: The Interfaces Group MIB - ifIndex"; + } + + leaf phys-address { + type yang:phys-address; + status deprecated; + description + "The interface's address at its protocol sub-layer. For + example, for an 802.x interface, this object normally + contains a Media Access Control (MAC) address. The + interface's media-specific modules must define the bit + and byte ordering and the format of the value of this + object. For interfaces that do not have such an address + (e.g., a serial line), this node is not present."; + reference + "RFC 2863: The Interfaces Group MIB - ifPhysAddress"; + } + + leaf-list higher-layer-if { + type interface-state-ref; + status deprecated; + description + "A list of references to interfaces layered on top of this + interface."; + reference + "RFC 2863: The Interfaces Group MIB - ifStackTable"; + } + + leaf-list lower-layer-if { + type interface-state-ref; + status deprecated; + description + "A list of references to interfaces layered underneath this + interface."; + reference + "RFC 2863: The Interfaces Group MIB - ifStackTable"; + } + + leaf speed { + type yang:gauge64; + units "bits/second"; + status deprecated; + description + "An estimate of the interface's current bandwidth in bits + per second. For interfaces that do not vary in + bandwidth or for those where no accurate estimation can + + be made, this node should contain the nominal bandwidth. + For interfaces that have no concept of bandwidth, this + node is not present."; + reference + "RFC 2863: The Interfaces Group MIB - + ifSpeed, ifHighSpeed"; + } + + container statistics { + status deprecated; + description + "A collection of interface-related statistics objects."; + + leaf discontinuity-time { + type yang:date-and-time; + mandatory true; + status deprecated; + description + "The time on the most recent occasion at which any one or + more of this interface's counters suffered a + discontinuity. If no such discontinuities have occurred + since the last re-initialization of the local management + subsystem, then this node contains the time the local + management subsystem re-initialized itself."; + } + + leaf in-octets { + type yang:counter64; + status deprecated; + description + "The total number of octets received on the interface, + including framing characters. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCInOctets"; + } + + leaf in-unicast-pkts { + type yang:counter64; + status deprecated; + description + "The number of packets, delivered by this sub-layer to a + higher (sub-)layer, that were not addressed to a + multicast or broadcast address at this sub-layer. + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCInUcastPkts"; + } + + leaf in-broadcast-pkts { + type yang:counter64; + status deprecated; + description + "The number of packets, delivered by this sub-layer to a + higher (sub-)layer, that were addressed to a broadcast + address at this sub-layer. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCInBroadcastPkts"; + } + + leaf in-multicast-pkts { + type yang:counter64; + status deprecated; + description + "The number of packets, delivered by this sub-layer to a + higher (sub-)layer, that were addressed to a multicast + address at this sub-layer. For a MAC-layer protocol, + this includes both Group and Functional addresses. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCInMulticastPkts"; + } + + leaf in-discards { + type yang:counter32; + status deprecated; + + description + "The number of inbound packets that were chosen to be + discarded even though no errors had been detected to + prevent their being deliverable to a higher-layer + protocol. One possible reason for discarding such a + packet could be to free up buffer space. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifInDiscards"; + } + + leaf in-errors { + type yang:counter32; + status deprecated; + description + "For packet-oriented interfaces, the number of inbound + packets that contained errors preventing them from being + deliverable to a higher-layer protocol. For character- + oriented or fixed-length interfaces, the number of + inbound transmission units that contained errors + preventing them from being deliverable to a higher-layer + protocol. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifInErrors"; + } + + leaf in-unknown-protos { + type yang:counter32; + status deprecated; + description + "For packet-oriented interfaces, the number of packets + received via the interface that were discarded because + of an unknown or unsupported protocol. For + character-oriented or fixed-length interfaces that + support protocol multiplexing, the number of + transmission units received via the interface that were + discarded because of an unknown or unsupported protocol. + For any interface that does not support protocol + multiplexing, this counter is not present. + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifInUnknownProtos"; + } + + leaf out-octets { + type yang:counter64; + status deprecated; + description + "The total number of octets transmitted out of the + interface, including framing characters. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCOutOctets"; + } + + leaf out-unicast-pkts { + type yang:counter64; + status deprecated; + description + "The total number of packets that higher-level protocols + requested be transmitted and that were not addressed + to a multicast or broadcast address at this sub-layer, + including those that were discarded or not sent. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifHCOutUcastPkts"; + } + + leaf out-broadcast-pkts { + type yang:counter64; + status deprecated; + + description + "The total number of packets that higher-level protocols + requested be transmitted and that were addressed to a + broadcast address at this sub-layer, including those + that were discarded or not sent. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCOutBroadcastPkts"; + } + + leaf out-multicast-pkts { + type yang:counter64; + status deprecated; + description + "The total number of packets that higher-level protocols + requested be transmitted and that were addressed to a + multicast address at this sub-layer, including those + that were discarded or not sent. For a MAC-layer + protocol, this includes both Group and Functional + addresses. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - + ifHCOutMulticastPkts"; + } + + leaf out-discards { + type yang:counter32; + status deprecated; + description + "The number of outbound packets that were chosen to be + discarded even though no errors had been detected to + prevent their being transmitted. One possible reason + for discarding such a packet could be to free up buffer + space. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifOutDiscards"; + } + + leaf out-errors { + type yang:counter32; + status deprecated; + description + "For packet-oriented interfaces, the number of outbound + packets that could not be transmitted because of errors. + For character-oriented or fixed-length interfaces, the + number of outbound transmission units that could not be + transmitted because of errors. + + Discontinuities in the value of this counter can occur + at re-initialization of the management system and at + other times as indicated by the value of + 'discontinuity-time'."; + reference + "RFC 2863: The Interfaces Group MIB - ifOutErrors"; + } + } + } + } +} \ No newline at end of file diff --git a/src/tests/tools/mock_nce_fan_ctrl/yang/rfc8345/ietf-network-topology.yang b/src/tests/tools/mock_nce_fan_ctrl/yang/rfc8345/ietf-network-topology.yang new file mode 100644 index 000000000..df3685827 --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/yang/rfc8345/ietf-network-topology.yang @@ -0,0 +1,294 @@ +module ietf-network-topology { + yang-version 1.1; + namespace "urn:ietf:params:xml:ns:yang:ietf-network-topology"; + prefix nt; + + import ietf-inet-types { + prefix inet; + reference + "RFC 6991: Common YANG Data Types"; + } + import ietf-network { + prefix nw; + reference + "RFC 8345: A YANG Data Model for Network Topologies"; + } + + organization + "IETF I2RS (Interface to the Routing System) Working Group"; + + contact + "WG Web: + WG List: + + Editor: Alexander Clemm + + + Editor: Jan Medved + + + Editor: Robert Varga + + + Editor: Nitin Bahadur + + + Editor: Hariharan Ananthakrishnan + + + Editor: Xufeng Liu + "; + + description + "This module defines a common base model for a network topology, + augmenting the base network data model with links to connect + nodes, as well as termination points to terminate links + on nodes. + + Copyright (c) 2018 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC 8345; + see the RFC itself for full legal notices."; + + revision 2018-02-26 { + description + "Initial revision."; + reference + "RFC 8345: A YANG Data Model for Network Topologies"; + } + + typedef link-id { + type inet:uri; + description + "An identifier for a link in a topology. The precise + structure of the link-id will be up to the implementation. + The identifier SHOULD be chosen such that the same link in a + real network topology will always be identified through the + same identifier, even if the data model is instantiated in + separate datastores. An implementation MAY choose to capture + semantics in the identifier -- for example, to indicate the + type of link and/or the type of topology of which the link is + a part."; + } + + typedef tp-id { + type inet:uri; + description + "An identifier for termination points on a node. The precise + structure of the tp-id will be up to the implementation. + The identifier SHOULD be chosen such that the same termination + point in a real network topology will always be identified + through the same identifier, even if the data model is + instantiated in separate datastores. An implementation MAY + choose to capture semantics in the identifier -- for example, + to indicate the type of termination point and/or the type of + node that contains the termination point."; + } + + grouping link-ref { + description + "This grouping can be used to reference a link in a specific + network. Although it is not used in this module, it is + defined here for the convenience of augmenting modules."; + leaf link-ref { + type leafref { + path "/nw:networks/nw:network[nw:network-id=current()/../"+ + "network-ref]/nt:link/nt:link-id"; + require-instance false; + } + description + "A type for an absolute reference to a link instance. + (This type should not be used for relative references. + In such a case, a relative path should be used instead.)"; + } + uses nw:network-ref; + } + + grouping tp-ref { + description + "This grouping can be used to reference a termination point + in a specific node. Although it is not used in this module, + it is defined here for the convenience of augmenting + modules."; + leaf tp-ref { + type leafref { + path "/nw:networks/nw:network[nw:network-id=current()/../"+ + "network-ref]/nw:node[nw:node-id=current()/../"+ + "node-ref]/nt:termination-point/nt:tp-id"; + require-instance false; + } + description + "A type for an absolute reference to a termination point. + (This type should not be used for relative references. + In such a case, a relative path should be used instead.)"; + } + uses nw:node-ref; + } + + augment "/nw:networks/nw:network" { + description + "Add links to the network data model."; + list link { + key "link-id"; + description + "A network link connects a local (source) node and + a remote (destination) node via a set of the respective + node's termination points. It is possible to have several + links between the same source and destination nodes. + Likewise, a link could potentially be re-homed between + termination points. Therefore, in order to ensure that we + would always know to distinguish between links, every link + is identified by a dedicated link identifier. Note that a + link models a point-to-point link, not a multipoint link."; + leaf link-id { + type link-id; + description + "The identifier of a link in the topology. + A link is specific to a topology to which it belongs."; + } + container source { + description + "This container holds the logical source of a particular + link."; + leaf source-node { + type leafref { + path "../../../nw:node/nw:node-id"; + require-instance false; + } + description + "Source node identifier. Must be in the same topology."; + } + leaf source-tp { + type leafref { + path "../../../nw:node[nw:node-id=current()/../"+ + "source-node]/termination-point/tp-id"; + require-instance false; + } + description + "This termination point is located within the source node + and terminates the link."; + } + } + + container destination { + description + "This container holds the logical destination of a + particular link."; + leaf dest-node { + type leafref { + path "../../../nw:node/nw:node-id"; + require-instance false; + } + description + "Destination node identifier. Must be in the same + network."; + } + leaf dest-tp { + type leafref { + path "../../../nw:node[nw:node-id=current()/../"+ + "dest-node]/termination-point/tp-id"; + require-instance false; + } + description + "This termination point is located within the + destination node and terminates the link."; + } + } + list supporting-link { + key "network-ref link-ref"; + description + "Identifies the link or links on which this link depends."; + leaf network-ref { + type leafref { + path "../../../nw:supporting-network/nw:network-ref"; + require-instance false; + } + description + "This leaf identifies in which underlay topology + the supporting link is present."; + } + + leaf link-ref { + type leafref { + path "/nw:networks/nw:network[nw:network-id=current()/"+ + "../network-ref]/link/link-id"; + require-instance false; + } + description + "This leaf identifies a link that is a part + of this link's underlay. Reference loops in which + a link identifies itself as its underlay, either + directly or transitively, are not allowed."; + } + } + } + } + augment "/nw:networks/nw:network/nw:node" { + description + "Augments termination points that terminate links. + Termination points can ultimately be mapped to interfaces."; + list termination-point { + key "tp-id"; + description + "A termination point can terminate a link. + Depending on the type of topology, a termination point + could, for example, refer to a port or an interface."; + leaf tp-id { + type tp-id; + description + "Termination point identifier."; + } + list supporting-termination-point { + key "network-ref node-ref tp-ref"; + description + "This list identifies any termination points on which a + given termination point depends or onto which it maps. + Those termination points will themselves be contained + in a supporting node. This dependency information can be + inferred from the dependencies between links. Therefore, + this item is not separately configurable. Hence, no + corresponding constraint needs to be articulated. + The corresponding information is simply provided by the + implementing system."; + + leaf network-ref { + type leafref { + path "../../../nw:supporting-node/nw:network-ref"; + require-instance false; + } + description + "This leaf identifies in which topology the + supporting termination point is present."; + } + leaf node-ref { + type leafref { + path "../../../nw:supporting-node/nw:node-ref"; + require-instance false; + } + description + "This leaf identifies in which node the supporting + termination point is present."; + } + leaf tp-ref { + type leafref { + path "/nw:networks/nw:network[nw:network-id=current()/"+ + "../network-ref]/nw:node[nw:node-id=current()/../"+ + "node-ref]/termination-point/tp-id"; + require-instance false; + } + description + "Reference to the underlay node (the underlay node must + be in a different topology)."; + } + } + } + } +} \ No newline at end of file diff --git a/src/tests/tools/mock_nce_fan_ctrl/yang/rfc8345/ietf-network.yang b/src/tests/tools/mock_nce_fan_ctrl/yang/rfc8345/ietf-network.yang new file mode 100644 index 000000000..c67a3fa40 --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/yang/rfc8345/ietf-network.yang @@ -0,0 +1,192 @@ +module ietf-network { + yang-version 1.1; + namespace "urn:ietf:params:xml:ns:yang:ietf-network"; + prefix nw; + + import ietf-inet-types { + prefix inet; + reference + "RFC 6991: Common YANG Data Types"; + } + + organization + "IETF I2RS (Interface to the Routing System) Working Group"; + + contact + "WG Web: + WG List: + + Editor: Alexander Clemm + + + Editor: Jan Medved + + + Editor: Robert Varga + + + Editor: Nitin Bahadur + + + Editor: Hariharan Ananthakrishnan + + + Editor: Xufeng Liu + "; + description + "This module defines a common base data model for a collection + of nodes in a network. Node definitions are further used + in network topologies and inventories. + + Copyright (c) 2018 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC 8345; + see the RFC itself for full legal notices."; + + revision 2018-02-26 { + description + "Initial revision."; + reference + "RFC 8345: A YANG Data Model for Network Topologies"; + } + + typedef node-id { + type inet:uri; + description + "Identifier for a node. The precise structure of the node-id + will be up to the implementation. For example, some + implementations MAY pick a URI that includes the network-id + as part of the path. The identifier SHOULD be chosen + such that the same node in a real network topology will + always be identified through the same identifier, even if + the data model is instantiated in separate datastores. An + implementation MAY choose to capture semantics in the + identifier -- for example, to indicate the type of node."; + } + + typedef network-id { + type inet:uri; + description + "Identifier for a network. The precise structure of the + network-id will be up to the implementation. The identifier + SHOULD be chosen such that the same network will always be + identified through the same identifier, even if the data model + is instantiated in separate datastores. An implementation MAY + choose to capture semantics in the identifier -- for example, + to indicate the type of network."; + } + + grouping network-ref { + description + "Contains the information necessary to reference a network -- + for example, an underlay network."; + leaf network-ref { + type leafref { + path "/nw:networks/nw:network/nw:network-id"; + require-instance false; + } + description + "Used to reference a network -- for example, an underlay + network."; + } + } + + grouping node-ref { + description + "Contains the information necessary to reference a node."; + leaf node-ref { + type leafref { + path "/nw:networks/nw:network[nw:network-id=current()/../"+ + "network-ref]/nw:node/nw:node-id"; + require-instance false; + } + description + "Used to reference a node. + Nodes are identified relative to the network that + contains them."; + } + uses network-ref; + } + + container networks { + description + "Serves as a top-level container for a list of networks."; + list network { + key "network-id"; + description + "Describes a network. + A network typically contains an inventory of nodes, + topological information (augmented through the + network-topology data model), and layering information."; + leaf network-id { + type network-id; + description + "Identifies a network."; + } + container network-types { + description + "Serves as an augmentation target. + The network type is indicated through corresponding + presence containers augmented into this container."; + } + list supporting-network { + key "network-ref"; + description + "An underlay network, used to represent layered network + topologies."; + leaf network-ref { + type leafref { + path "/nw:networks/nw:network/nw:network-id"; + require-instance false; + } + description + "References the underlay network."; + } + } + + list node { + key "node-id"; + description + "The inventory of nodes of this network."; + leaf node-id { + type node-id; + description + "Uniquely identifies a node within the containing + network."; + } + list supporting-node { + key "network-ref node-ref"; + description + "Represents another node that is in an underlay network + and that supports this node. Used to represent layering + structure."; + leaf network-ref { + type leafref { + path "../../../nw:supporting-network/nw:network-ref"; + require-instance false; + } + description + "References the underlay network of which the + underlay node is a part."; + } + leaf node-ref { + type leafref { + path "/nw:networks/nw:network/nw:node/nw:node-id"; + require-instance false; + } + description + "References the underlay node itself."; + } + } + } + } + } +} \ No newline at end of file diff --git a/src/tests/tools/mock_nce_fan_ctrl/yang/rfc8346/ietf-l3-unicast-topology.yang b/src/tests/tools/mock_nce_fan_ctrl/yang/rfc8346/ietf-l3-unicast-topology.yang new file mode 100644 index 000000000..56941fdca --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/yang/rfc8346/ietf-l3-unicast-topology.yang @@ -0,0 +1,359 @@ +module ietf-l3-unicast-topology { + yang-version 1.1; + namespace + "urn:ietf:params:xml:ns:yang:ietf-l3-unicast-topology"; + prefix "l3t"; + import ietf-network { + prefix "nw"; + } + import ietf-network-topology { + prefix "nt"; + } + import ietf-inet-types { + prefix "inet"; + } + import ietf-routing-types { + prefix "rt-types"; + } + organization + "IETF I2RS (Interface to the Routing System) Working Group"; + contact + "WG Web: + WG List: + Editor: Alexander Clemm + + Editor: Jan Medved + + Editor: Robert Varga + + Editor: Xufeng Liu + + Editor: Nitin Bahadur + + Editor: Hariharan Ananthakrishnan + "; + description + "This module defines a model for Layer 3 Unicast + topologies. + + Copyright (c) 2018 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Simplified BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + + This version of this YANG module is part of + RFC 8346; see the RFC itself for full legal notices."; + revision "2018-02-26" { + description + "Initial revision."; + reference + "RFC 8346: A YANG Data Model for Layer 3 Topologies"; + } + + identity flag-identity { + description "Base type for flags"; + } + + typedef l3-event-type { + type enumeration { + enum "add" { + description + "A Layer 3 node, link, prefix, or termination point has + been added"; + } + enum "remove" { + description + "A Layer 3 node, link, prefix, or termination point has + been removed"; + } + enum "update" { + description + "A Layer 3 node, link, prefix, or termination point has + been updated"; + } + } + description "Layer 3 event type for notifications"; + } + + typedef prefix-flag-type { + type identityref { + base "flag-identity"; + } + description "Prefix flag attributes"; + } + + typedef node-flag-type { + type identityref { + base "flag-identity"; + } + description "Node flag attributes"; + } + + typedef link-flag-type { + type identityref { + base "flag-identity"; + } + description "Link flag attributes"; + } + + typedef l3-flag-type { + type identityref { + base "flag-identity"; + } + description "L3 flag attributes"; + } + + grouping l3-prefix-attributes { + description + "L3 prefix attributes"; + leaf prefix { + type inet:ip-prefix; + description + "IP prefix value"; + } + leaf metric { + type uint32; + description + "Prefix metric"; + } + leaf-list flag { + type prefix-flag-type; + description + "Prefix flags"; + } + } + grouping l3-unicast-topology-type { + description "Identifies the topology type to be L3 Unicast."; + container l3-unicast-topology { + presence "indicates L3 Unicast topology"; + description + "The presence of the container node indicates L3 Unicast + topology"; + } + } + grouping l3-topology-attributes { + description "Topology scope attributes"; + container l3-topology-attributes { + description "Contains topology attributes"; + leaf name { + type string; + description + "Name of the topology"; + } + leaf-list flag { + type l3-flag-type; + description + "Topology flags"; + } + } + } + grouping l3-node-attributes { + description "L3 node scope attributes"; + container l3-node-attributes { + description + "Contains node attributes"; + leaf name { + type inet:domain-name; + description + "Node name"; + } + leaf-list flag { + type node-flag-type; + description + "Node flags"; + } + leaf-list router-id { + type rt-types:router-id; + description + "Router-id for the node"; + } + list prefix { + key "prefix"; + description + "A list of prefixes along with their attributes"; + uses l3-prefix-attributes; + } + } + } + grouping l3-link-attributes { + description + "L3 link scope attributes"; + container l3-link-attributes { + description + "Contains link attributes"; + leaf name { + type string; + description + "Link Name"; + } + leaf-list flag { + type link-flag-type; + description + "Link flags"; + } + leaf metric1 { + type uint64; + description + "Link Metric 1"; + } + leaf metric2 { + type uint64; + description + "Link Metric 2"; + } + } + } + grouping l3-termination-point-attributes { + description "L3 termination point scope attributes"; + container l3-termination-point-attributes { + description + "Contains termination point attributes"; + choice termination-point-type { + description + "Indicates the termination point type"; + case ip { + leaf-list ip-address { + type inet:ip-address; + description + "IPv4 or IPv6 address."; + } + } + case unnumbered { + leaf unnumbered-id { + type uint32; + description + "Unnumbered interface identifier. + The identifier will correspond to the ifIndex value + of the interface, i.e., the ifIndex value of the + ifEntry that represents the interface in + implementations where the Interfaces Group MIB + (RFC 2863) is supported."; + reference + "RFC 2863: The Interfaces Group MIB"; + } + } + case interface-name { + leaf interface-name { + type string; + description + "Name of the interface. The name can (but does not + have to) correspond to an interface reference of a + containing node's interface, i.e., the path name of a + corresponding interface data node on the containing + node reminiscent of data type interface-ref defined + in RFC 8343. It should be noted that data type + interface-ref of RFC 8343 cannot be used directly, + + as this data type is used to reference an interface + in a datastore of a single node in the network, not + to uniquely reference interfaces across a network."; + reference + "RFC 8343: A YANG Data Model for Interface Management"; + } + } + } + } + } + augment "/nw:networks/nw:network/nw:network-types" { + description + "Introduces new network type for L3 Unicast topology"; + uses l3-unicast-topology-type; + } + augment "/nw:networks/nw:network" { + when "nw:network-types/l3t:l3-unicast-topology" { + description + "Augmentation parameters apply only for networks with + L3 Unicast topology"; + } + description + "L3 Unicast for the network as a whole"; + uses l3-topology-attributes; + } + augment "/nw:networks/nw:network/nw:node" { + when "../nw:network-types/l3t:l3-unicast-topology" { + description + "Augmentation parameters apply only for networks with + L3 Unicast topology"; + } + description + "L3 Unicast node-level attributes "; + uses l3-node-attributes; + } + augment "/nw:networks/nw:network/nt:link" { + when "../nw:network-types/l3t:l3-unicast-topology" { + description + "Augmentation parameters apply only for networks with + L3 Unicast topology"; + } + description + "Augments topology link attributes"; + uses l3-link-attributes; + } + augment "/nw:networks/nw:network/nw:node/" + +"nt:termination-point" { + when "../../nw:network-types/l3t:l3-unicast-topology" { + description + "Augmentation parameters apply only for networks with + L3 Unicast topology"; + } + description "Augments topology termination point configuration"; + uses l3-termination-point-attributes; + } + notification l3-node-event { + description + "Notification event for L3 node"; + leaf l3-event-type { + type l3-event-type; + description + "Event type"; + } + uses nw:node-ref; + uses l3-unicast-topology-type; + uses l3-node-attributes; + } + notification l3-link-event { + description + "Notification event for L3 link"; + leaf l3-event-type { + type l3-event-type; + description + "Event type"; + } + uses nt:link-ref; + uses l3-unicast-topology-type; + uses l3-link-attributes; + } + notification l3-prefix-event { + description + "Notification event for L3 prefix"; + leaf l3-event-type { + type l3-event-type; + description + "Event type"; + } + uses nw:node-ref; + uses l3-unicast-topology-type; + container prefix { + description + "Contains L3 prefix attributes"; + uses l3-prefix-attributes; + } + } + notification termination-point-event { + description + "Notification event for L3 termination point"; + leaf l3-event-type { + type l3-event-type; + description + "Event type"; + } + uses nt:tp-ref; + uses l3-unicast-topology-type; + uses l3-termination-point-attributes; + } +} \ No newline at end of file diff --git a/src/tests/tools/mock_nce_fan_ctrl/yang/rfc8795/ietf-te-topology.yang b/src/tests/tools/mock_nce_fan_ctrl/yang/rfc8795/ietf-te-topology.yang new file mode 100644 index 000000000..41edbcd1f --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/yang/rfc8795/ietf-te-topology.yang @@ -0,0 +1,1952 @@ +module ietf-te-topology { + yang-version 1.1; + namespace "urn:ietf:params:xml:ns:yang:ietf-te-topology"; + prefix tet; + + import ietf-yang-types { + prefix yang; + reference + "RFC 6991: Common YANG Data Types"; + } + import ietf-inet-types { + prefix inet; + reference + "RFC 6991: Common YANG Data Types"; + } + import ietf-te-types { + prefix te-types; + reference + "RFC 8776: Common YANG Data Types for Traffic Engineering"; + } + import ietf-network { + prefix nw; + reference + "RFC 8345: A YANG Data Model for Network Topologies"; + } + import ietf-network-topology { + prefix nt; + reference + "RFC 8345: A YANG Data Model for Network Topologies"; + } + + organization + "IETF Traffic Engineering Architecture and Signaling (TEAS) + Working Group"; + contact + "WG Web: + WG List: + + Editor: Xufeng Liu + + + Editor: Igor Bryskin + + + Editor: Vishnu Pavan Beeram + + + Editor: Tarek Saad + + + Editor: Himanshu Shah + + + Editor: Oscar Gonzalez de Dios + "; + description + "This YANG module defines a TE topology model for representing, + retrieving, and manipulating technology-agnostic TE topologies. + + Copyright (c) 2020 IETF Trust and the persons identified as + authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject to + the license terms contained in, the Simplified BSD License set + forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC 8795; see the + RFC itself for full legal notices."; + + revision 2020-08-06 { + description + "Initial revision."; + reference + "RFC 8795: YANG Data Model for Traffic Engineering (TE) + Topologies"; + } + + /* + * Features + */ + + feature nsrlg { + description + "This feature indicates that the system supports NSRLGs + (Non-Shared Risk Link Groups)."; + } + + feature te-topology-hierarchy { + description + "This feature indicates that the system allows an underlay + and/or overlay TE topology hierarchy."; + } + + feature template { + description + "This feature indicates that the system supports + template configuration."; + } + + /* + * Typedefs + */ + + typedef geographic-coordinate-degree { + type decimal64 { + fraction-digits 8; + } + description + "Decimal degree (DD) used to express latitude and longitude + geographic coordinates."; + } + // geographic-coordinate-degree + + typedef te-info-source { + type enumeration { + enum unknown { + description + "The source is unknown."; + } + enum locally-configured { + description + "Configured entity."; + } + enum ospfv2 { + description + "OSPFv2."; + } + enum ospfv3 { + description + "OSPFv3."; + } + enum isis { + description + "IS-IS."; + } + enum bgp-ls { + description + "BGP-LS."; + reference + "RFC 7752: North-Bound Distribution of Link-State and + Traffic Engineering (TE) Information Using BGP"; + } + enum system-processed { + description + "System-processed entity."; + } + enum other { + description + "Other source."; + } + } + description + "Describes the type of source that has provided the + related information, and the source's credibility."; + } + // te-info-source + + /* + * Groupings + */ + + grouping connectivity-matrix-entry-path-attributes { + description + "Attributes of a connectivity matrix entry."; + leaf is-allowed { + type boolean; + description + "'true' - switching is allowed; + 'false' - switching is disallowed."; + } + container underlay { + if-feature "te-topology-hierarchy"; + description + "Attributes of the TE link underlay."; + reference + "RFC 4206: Label Switched Paths (LSP) Hierarchy with + Generalized Multi-Protocol Label Switching (GMPLS) + Traffic Engineering (TE)"; + uses te-link-underlay-attributes; + } + uses te-types:generic-path-constraints; + uses te-types:generic-path-optimization; + uses te-types:generic-path-properties; + } + // connectivity-matrix-entry-path-attributes + + grouping geolocation-container { + description + "Contains a GPS location."; + container geolocation { + config false; + description + "Contains a GPS location."; + leaf altitude { + type int64; + units "millimeters"; + description + "Distance above sea level."; + } + leaf latitude { + type geographic-coordinate-degree { + range "-90..90"; + } + description + "Relative position north or south on the Earth's surface."; + } + leaf longitude { + type geographic-coordinate-degree { + range "-180..180"; + } + description + "Angular distance east or west on the Earth's surface."; + } + } + // geolocation + } + // geolocation-container + + grouping information-source-state-attributes { + description + "The attributes identifying the source that has provided the + related information, and the source's credibility."; + leaf credibility-preference { + type uint16; + description + "The preference value for calculating the Traffic + Engineering database credibility value used for + tie-break selection between different information-source + values. A higher value is preferable."; + } + leaf logical-network-element { + type string; + description + "When applicable, this is the name of a logical network + element from which the information is learned."; + } + leaf network-instance { + type string; + description + "When applicable, this is the name of a network instance + from which the information is learned."; + } + } + // information-source-state-attributes + + grouping information-source-per-link-attributes { + description + "Per-node container of the attributes identifying the source + that has provided the related information, and the source's + credibility."; + leaf information-source { + type te-info-source; + config false; + description + "Indicates the type of information source."; + } + leaf information-source-instance { + type string; + config false; + description + "The name indicating the instance of the information + source."; + } + container information-source-state { + config false; + description + "Contains state attributes related to the information + source."; + uses information-source-state-attributes; + container topology { + description + "When the information is processed by the system, + the attributes in this container indicate which topology + is used to generate the result information."; + uses nt:link-ref; + } + } + } + // information-source-per-link-attributes + + grouping information-source-per-node-attributes { + description + "Per-node container of the attributes identifying the source + that has provided the related information, and the source's + credibility."; + leaf information-source { + type te-info-source; + config false; + description + "Indicates the type of information source."; + } + leaf information-source-instance { + type string; + config false; + description + "The name indicating the instance of the information + source."; + } + container information-source-state { + config false; + description + "Contains state attributes related to the information + source."; + uses information-source-state-attributes; + container topology { + description + "When the information is processed by the system, + the attributes in this container indicate which topology + is used to generate the result information."; + uses nw:node-ref; + } + } + } + // information-source-per-node-attributes + + grouping interface-switching-capability-list { + description + "List of Interface Switching Capability Descriptors (ISCDs)."; + list interface-switching-capability { + key "switching-capability encoding"; + description + "List of ISCDs for this link."; + reference + "RFC 3471: Generalized Multi-Protocol Label Switching (GMPLS) + Signaling Functional Description + RFC 4203: OSPF Extensions in Support of Generalized + Multi-Protocol Label Switching (GMPLS)"; + leaf switching-capability { + type identityref { + base te-types:switching-capabilities; + } + description + "Switching capability for this interface."; + } + leaf encoding { + type identityref { + base te-types:lsp-encoding-types; + } + description + "Encoding supported by this interface."; + } + uses te-link-iscd-attributes; + } + // interface-switching-capability + } + // interface-switching-capability-list + + grouping statistics-per-link { + description + "Statistics attributes per TE link."; + leaf discontinuity-time { + type yang:date-and-time; + description + "The time of the most recent occasion at which any one or + more of this interface's counters suffered a + discontinuity. If no such discontinuities have occurred + since the last re-initialization of the local management + subsystem, then this node contains the time the local + management subsystem re-initialized itself."; + } + /* Administrative attributes */ + leaf disables { + type yang:counter32; + description + "Number of times that a link was disabled."; + } + leaf enables { + type yang:counter32; + description + "Number of times that a link was enabled."; + } + leaf maintenance-clears { + type yang:counter32; + description + "Number of times that a link was taken out of maintenance."; + } + leaf maintenance-sets { + type yang:counter32; + description + "Number of times that a link was put in maintenance."; + } + leaf modifies { + type yang:counter32; + description + "Number of times that a link was modified."; + } + /* Operational attributes */ + leaf downs { + type yang:counter32; + description + "Number of times that a link was set to an operational state + of 'down'."; + } + leaf ups { + type yang:counter32; + description + "Number of times that a link was set to an operational state + of 'up'."; + } + /* Recovery attributes */ + leaf fault-clears { + type yang:counter32; + description + "Number of times that a link experienced a fault-clear + event."; + } + leaf fault-detects { + type yang:counter32; + description + "Number of times that a link experienced fault detection."; + } + leaf protection-switches { + type yang:counter32; + description + "Number of times that a link experienced protection + switchover."; + } + leaf protection-reverts { + type yang:counter32; + description + "Number of times that a link experienced protection + reversion."; + } + leaf restoration-failures { + type yang:counter32; + description + "Number of times that a link experienced restoration + failure."; + } + leaf restoration-starts { + type yang:counter32; + description + "Number of times that a link experienced restoration + start."; + } + leaf restoration-successes { + type yang:counter32; + description + "Number of times that a link experienced restoration + success."; + } + leaf restoration-reversion-failures { + type yang:counter32; + description + "Number of times that a link experienced restoration + reversion failure."; + } + leaf restoration-reversion-starts { + type yang:counter32; + description + "Number of times that a link experienced restoration + reversion start."; + } + leaf restoration-reversion-successes { + type yang:counter32; + description + "Number of times that a link experienced restoration + reversion success."; + } + } + // statistics-per-link + + grouping statistics-per-node { + description + "Statistics attributes per TE node."; + leaf discontinuity-time { + type yang:date-and-time; + description + "The time of the most recent occasion at which any one or + more of this interface's counters suffered a + discontinuity. If no such discontinuities have occurred + since the last re-initialization of the local management + subsystem, then this node contains the time the local + management subsystem re-initialized itself."; + } + container node { + description + "Contains statistics attributes at the TE node level."; + leaf disables { + type yang:counter32; + description + "Number of times that a node was disabled."; + } + leaf enables { + type yang:counter32; + description + "Number of times that a node was enabled."; + } + leaf maintenance-sets { + type yang:counter32; + description + "Number of times that a node was put in maintenance."; + } + leaf maintenance-clears { + type yang:counter32; + description + "Number of times that a node was taken out of + maintenance."; + } + leaf modifies { + type yang:counter32; + description + "Number of times that a node was modified."; + } + } + // node + container connectivity-matrix-entry { + description + "Contains statistics attributes at the level of a + connectivity matrix entry."; + leaf creates { + type yang:counter32; + description + "Number of times that a connectivity matrix entry was + created."; + reference + "RFC 6241: Network Configuration Protocol (NETCONF), + Section 7.2, 'create' operation"; + } + leaf deletes { + type yang:counter32; + description + "Number of times that a connectivity matrix entry was + deleted."; + reference + "RFC 6241: Network Configuration Protocol (NETCONF), + Section 7.2, 'delete' operation"; + } + leaf disables { + type yang:counter32; + description + "Number of times that a connectivity matrix entry was + disabled."; + } + leaf enables { + type yang:counter32; + description + "Number of times that a connectivity matrix entry was + enabled."; + } + leaf modifies { + type yang:counter32; + description + "Number of times that a connectivity matrix entry was + modified."; + } + } + // connectivity-matrix-entry + } + // statistics-per-node + + grouping statistics-per-ttp { + description + "Statistics attributes per TE TTP (Tunnel Termination Point)."; + leaf discontinuity-time { + type yang:date-and-time; + description + "The time of the most recent occasion at which any one or + more of this interface's counters suffered a + discontinuity. If no such discontinuities have occurred + since the last re-initialization of the local management + subsystem, then this node contains the time the local + management subsystem re-initialized itself."; + } + container tunnel-termination-point { + description + "Contains statistics attributes at the TE TTP level."; + /* Administrative attributes */ + leaf disables { + type yang:counter32; + description + "Number of times that a TTP was disabled."; + } + leaf enables { + type yang:counter32; + description + "Number of times that a TTP was enabled."; + } + leaf maintenance-clears { + type yang:counter32; + description + "Number of times that a TTP was taken out of maintenance."; + } + leaf maintenance-sets { + type yang:counter32; + description + "Number of times that a TTP was put in maintenance."; + } + leaf modifies { + type yang:counter32; + description + "Number of times that a TTP was modified."; + } + /* Operational attributes */ + leaf downs { + type yang:counter32; + description + "Number of times that a TTP was set to an operational state + of 'down'."; + } + leaf ups { + type yang:counter32; + description + "Number of times that a TTP was set to an operational state + of 'up'."; + } + leaf in-service-clears { + type yang:counter32; + description + "Number of times that a TTP was taken out of service + (TE tunnel was released)."; + } + leaf in-service-sets { + type yang:counter32; + description + "Number of times that a TTP was put in service by a TE + tunnel (TE tunnel was set up)."; + } + } + // tunnel-termination-point + container local-link-connectivity { + description + "Contains statistics attributes at the TE LLCL (Local Link + Connectivity List) level."; + leaf creates { + type yang:counter32; + description + "Number of times that an LLCL entry was created."; + reference + "RFC 6241: Network Configuration Protocol (NETCONF), + Section 7.2, 'create' operation"; + } + leaf deletes { + type yang:counter32; + description + "Number of times that an LLCL entry was deleted."; + reference + "RFC 6241: Network Configuration Protocol (NETCONF), + Section 7.2, 'delete' operation"; + } + leaf disables { + type yang:counter32; + description + "Number of times that an LLCL entry was disabled."; + } + leaf enables { + type yang:counter32; + description + "Number of times that an LLCL entry was enabled."; + } + leaf modifies { + type yang:counter32; + description + "Number of times that an LLCL entry was modified."; + } + } + // local-link-connectivity + } + // statistics-per-ttp + + grouping te-link-augment { + description + "Augmentation for a TE link."; + uses te-link-config; + uses te-link-state-derived; + container statistics { + config false; + description + "Statistics data."; + uses statistics-per-link; + } + } + // te-link-augment + + grouping te-link-config { + description + "TE link configuration grouping."; + choice bundle-stack-level { + description + "The TE link can be partitioned into bundled links or + component links."; + case bundle { + container bundled-links { + description + "A set of bundled links."; + reference + "RFC 4201: Link Bundling in MPLS Traffic + Engineering (TE)"; + list bundled-link { + key "sequence"; + description + "Specifies a bundled interface that is + further partitioned."; + leaf sequence { + type uint32; + description + "Identifies the sequence in the bundle."; + } + } + } + } + case component { + container component-links { + description + "A set of component links."; + list component-link { + key "sequence"; + description + "Specifies a component interface that is + sufficient to unambiguously identify the + appropriate resources."; + leaf sequence { + type uint32; + description + "Identifies the sequence in the bundle."; + } + leaf src-interface-ref { + type string; + description + "Reference to a component link interface on the + source node."; + } + leaf des-interface-ref { + type string; + description + "Reference to a component link interface on the + destination node."; + } + } + } + } + } + // bundle-stack-level + leaf-list te-link-template { + if-feature "template"; + type leafref { + path "../../../../te/templates/link-template/name"; + } + description + "The reference to a TE link template."; + } + uses te-link-config-attributes; + } + // te-link-config + + grouping te-link-config-attributes { + description + "Link configuration attributes in a TE topology."; + container te-link-attributes { + description + "Link attributes in a TE topology."; + leaf access-type { + type te-types:te-link-access-type; + description + "Link access type, which can be point-to-point or + multi-access."; + } + container external-domain { + description + "For an inter-domain link, specifies the attributes of + the remote end of the link, to facilitate the signaling at + the local end."; + uses nw:network-ref; + leaf remote-te-node-id { + type te-types:te-node-id; + description + "Remote TE node identifier, used together with + 'remote-te-link-tp-id' to identify the remote Link + Termination Point (LTP) in a different domain."; + } + leaf remote-te-link-tp-id { + type te-types:te-tp-id; + description + "Remote TE LTP identifier, used together with + 'remote-te-node-id' to identify the remote LTP in a + different domain."; + } + } + leaf is-abstract { + type empty; + description + "Present if the link is abstract."; + } + leaf name { + type string; + description + "Link name."; + } + container underlay { + if-feature "te-topology-hierarchy"; + description + "Attributes of the TE link underlay."; + reference + "RFC 4206: Label Switched Paths (LSP) Hierarchy with + Generalized Multi-Protocol Label Switching (GMPLS) + Traffic Engineering (TE)"; + uses te-link-underlay-attributes; + } + leaf admin-status { + type te-types:te-admin-status; + description + "The administrative state of the link."; + } + uses te-link-info-attributes; + } + // te-link-attributes + } + // te-link-config-attributes + + grouping te-link-info-attributes { + description + "Advertised TE information attributes."; + leaf link-index { + type uint64; + description + "The link identifier. If OSPF is used, this object + represents an ospfLsdbID. If IS-IS is used, this object + represents an isisLSPID. If a locally configured link is + used, this object represents a unique value, which is + locally defined in a router."; + } + leaf administrative-group { + type te-types:admin-groups; + description + "Administrative group or color of the link. + This attribute covers both administrative groups (defined + in RFCs 3630 and 5305) and Extended Administrative Groups + (defined in RFC 7308)."; + reference + "RFC 3630: Traffic Engineering (TE) Extensions to OSPF + Version 2 + RFC 5305: IS-IS Extensions for Traffic Engineering + RFC 7308: Extended Administrative Groups in MPLS Traffic + Engineering (MPLS-TE)"; + } + uses interface-switching-capability-list; + uses te-types:label-set-info; + leaf link-protection-type { + type identityref { + base te-types:link-protection-type; + } + description + "Link Protection Type desired for this link."; + reference + "RFC 4202: Routing Extensions in Support of + Generalized Multi-Protocol Label Switching (GMPLS)"; + } + container max-link-bandwidth { + uses te-types:te-bandwidth; + description + "Maximum bandwidth that can be seen on this link in this + direction. Units are in bytes per second."; + reference + "RFC 3630: Traffic Engineering (TE) Extensions to OSPF + Version 2 + RFC 5305: IS-IS Extensions for Traffic Engineering"; + } + container max-resv-link-bandwidth { + uses te-types:te-bandwidth; + description + "Maximum amount of bandwidth that can be reserved in this + direction in this link. Units are in bytes per second."; + reference + "RFC 3630: Traffic Engineering (TE) Extensions to OSPF + Version 2 + RFC 5305: IS-IS Extensions for Traffic Engineering"; + } + list unreserved-bandwidth { + key "priority"; + max-elements 8; + description + "Unreserved bandwidth for priority levels 0-7. Units are in + bytes per second."; + reference + "RFC 3630: Traffic Engineering (TE) Extensions to OSPF + Version 2 + RFC 5305: IS-IS Extensions for Traffic Engineering"; + leaf priority { + type uint8 { + range "0..7"; + } + description + "Priority."; + } + uses te-types:te-bandwidth; + } + leaf te-default-metric { + type uint32; + description + "Traffic Engineering metric."; + reference + "RFC 3630: Traffic Engineering (TE) Extensions to OSPF + Version 2 + RFC 5305: IS-IS Extensions for Traffic Engineering"; + } + leaf te-delay-metric { + type uint32; + description + "Traffic Engineering delay metric."; + reference + "RFC 7471: OSPF Traffic Engineering (TE) Metric Extensions"; + } + leaf te-igp-metric { + type uint32; + description + "IGP metric used for Traffic Engineering."; + reference + "RFC 3785: Use of Interior Gateway Protocol (IGP) Metric as a + second MPLS Traffic Engineering (TE) Metric"; + } + container te-srlgs { + description + "Contains a list of SRLGs."; + leaf-list value { + type te-types:srlg; + description + "SRLG value."; + reference + "RFC 4202: Routing Extensions in Support of + Generalized Multi-Protocol Label Switching (GMPLS)"; + } + } + container te-nsrlgs { + if-feature "nsrlg"; + description + "Contains a list of NSRLGs (Non-Shared Risk Link Groups). + When an abstract TE link is configured, this list specifies + the request that underlay TE paths need to be mutually + disjoint with other TE links in the same groups."; + leaf-list id { + type uint32; + description + "NSRLG ID, uniquely configured within a topology."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + } + } + } + // te-link-info-attributes + + grouping te-link-iscd-attributes { + description + "TE link ISCD attributes."; + reference + "RFC 4203: OSPF Extensions in Support of Generalized + Multi-Protocol Label Switching (GMPLS), Section 1.4"; + list max-lsp-bandwidth { + key "priority"; + max-elements 8; + description + "Maximum Label Switched Path (LSP) bandwidth at + priorities 0-7."; + leaf priority { + type uint8 { + range "0..7"; + } + description + "Priority."; + } + uses te-types:te-bandwidth; + } + } + // te-link-iscd-attributes + + grouping te-link-state-derived { + description + "Link state attributes in a TE topology."; + leaf oper-status { + type te-types:te-oper-status; + config false; + description + "The current operational state of the link."; + } + leaf is-transitional { + type empty; + config false; + description + "Present if the link is transitional; used as an + alternative approach in lieu of 'inter-layer-lock-id' + for path computation in a TE topology covering multiple + layers or multiple regions."; + reference + "RFC 5212: Requirements for GMPLS-Based Multi-Region and + Multi-Layer Networks (MRN/MLN) + RFC 6001: Generalized MPLS (GMPLS) Protocol Extensions + for Multi-Layer and Multi-Region Networks (MLN/MRN)"; + } + uses information-source-per-link-attributes; + list information-source-entry { + key "information-source information-source-instance"; + config false; + description + "A list of information sources learned, including the source + that is used."; + uses information-source-per-link-attributes; + uses te-link-info-attributes; + } + container recovery { + config false; + description + "Status of the recovery process."; + leaf restoration-status { + type te-types:te-recovery-status; + description + "Restoration status."; + } + leaf protection-status { + type te-types:te-recovery-status; + description + "Protection status."; + } + } + container underlay { + if-feature "te-topology-hierarchy"; + config false; + description + "State attributes for the TE link underlay."; + leaf dynamic { + type boolean; + description + "'true' if the underlay is dynamically created."; + } + leaf committed { + type boolean; + description + "'true' if the underlay is committed."; + } + } + } + // te-link-state-derived + + grouping te-link-underlay-attributes { + description + "Attributes for the TE link underlay."; + reference + "RFC 4206: Label Switched Paths (LSP) Hierarchy with + Generalized Multi-Protocol Label Switching (GMPLS) + Traffic Engineering (TE)"; + leaf enabled { + type boolean; + description + "'true' if the underlay is enabled. + 'false' if the underlay is disabled."; + } + container primary-path { + description + "The service path on the underlay topology that + supports this link."; + uses nw:network-ref; + list path-element { + key "path-element-id"; + description + "A list of path elements describing the service path."; + leaf path-element-id { + type uint32; + description + "To identify the element in a path."; + } + uses te-path-element; + } + } + // primary-path + list backup-path { + key "index"; + description + "A list of backup service paths on the underlay topology that + protect the underlay primary path. If the primary path is + not protected, the list contains zero elements. If the + primary path is protected, the list contains one or more + elements."; + leaf index { + type uint32; + description + "A sequence number to identify a backup path."; + } + uses nw:network-ref; + list path-element { + key "path-element-id"; + description + "A list of path elements describing the backup service + path."; + leaf path-element-id { + type uint32; + description + "To identify the element in a path."; + } + uses te-path-element; + } + } + // backup-path + leaf protection-type { + type identityref { + base te-types:lsp-protection-type; + } + description + "Underlay protection type desired for this link."; + } + container tunnel-termination-points { + description + "Underlay TTPs desired for this link."; + leaf source { + type binary; + description + "Source TTP identifier."; + } + leaf destination { + type binary; + description + "Destination TTP identifier."; + } + } + container tunnels { + description + "Underlay TE tunnels supporting this TE link."; + leaf sharing { + type boolean; + default "true"; + description + "'true' if the underlay tunnel can be shared with other + TE links; + 'false' if the underlay tunnel is dedicated to this + TE link. + This leaf is the default option for all TE tunnels + and may be overridden by the per-TE-tunnel value."; + } + list tunnel { + key "tunnel-name"; + description + "Zero, one, or more underlay TE tunnels that support this + TE link."; + leaf tunnel-name { + type string; + description + "A tunnel name uniquely identifies an underlay TE tunnel, + used together with the 'source-node' value for this + link."; + reference + "RFC 3209: RSVP-TE: Extensions to RSVP for LSP Tunnels"; + } + leaf sharing { + type boolean; + description + "'true' if the underlay tunnel can be shared with other + TE links; + 'false' if the underlay tunnel is dedicated to this + TE link."; + } + } + // tunnel + } + // tunnels + } + // te-link-underlay-attributes + + grouping te-node-augment { + description + "Augmentation for a TE node."; + uses te-node-config; + uses te-node-state-derived; + container statistics { + config false; + description + "Statistics data."; + uses statistics-per-node; + } + list tunnel-termination-point { + key "tunnel-tp-id"; + description + "A termination point can terminate a tunnel."; + leaf tunnel-tp-id { + type binary; + description + "TTP identifier."; + } + uses te-node-tunnel-termination-point-config; + leaf oper-status { + type te-types:te-oper-status; + config false; + description + "The current operational state of the TTP."; + } + uses geolocation-container; + container statistics { + config false; + description + "Statistics data."; + uses statistics-per-ttp; + } + // Relationship to other TTPs + list supporting-tunnel-termination-point { + key "node-ref tunnel-tp-ref"; + description + "Identifies the TTPs on which this TTP depends."; + leaf node-ref { + type inet:uri; + description + "This leaf identifies the node in which the supporting + TTP is present. + This node is either the supporting node or a node in + an underlay topology."; + } + leaf tunnel-tp-ref { + type binary; + description + "Reference to a TTP that is in either the supporting node + or a node in an underlay topology."; + } + } + // supporting-tunnel-termination-point + } + // tunnel-termination-point + } + // te-node-augment + + grouping te-node-config { + description + "TE node configuration grouping."; + leaf-list te-node-template { + if-feature "template"; + type leafref { + path "../../../../te/templates/node-template/name"; + } + description + "The reference to a TE node template."; + } + uses te-node-config-attributes; + } + // te-node-config + + grouping te-node-config-attributes { + description + "Configuration node attributes in a TE topology."; + container te-node-attributes { + description + "Contains node attributes in a TE topology."; + leaf admin-status { + type te-types:te-admin-status; + description + "The administrative state of the link."; + } + uses te-node-connectivity-matrices; + uses te-node-info-attributes; + } + } + // te-node-config-attributes + + grouping te-node-config-attributes-template { + description + "Configuration node attributes for a template in a TE + topology."; + container te-node-attributes { + description + "Contains node attributes in a TE topology."; + leaf admin-status { + type te-types:te-admin-status; + description + "The administrative state of the link."; + } + uses te-node-info-attributes; + } + } + // te-node-config-attributes-template + + grouping te-node-connectivity-matrices { + description + "Connectivity matrix on a TE node."; + container connectivity-matrices { + description + "Contains a connectivity matrix on a TE node."; + leaf number-of-entries { + type uint16; + description + "The number of connectivity matrix entries. + If this number is specified in the configuration request, + the number is the requested number of entries, which may + not all be listed in the list; + if this number is reported in the state data, + the number is the current number of operational entries."; + } + uses te-types:label-set-info; + uses connectivity-matrix-entry-path-attributes; + list connectivity-matrix { + key "id"; + description + "Represents a node's switching limitations, i.e., + limitations in the interconnecting network TE links + across the node."; + reference + "RFC 7579: General Network Element Constraint Encoding + for GMPLS-Controlled Networks"; + leaf id { + type uint32; + description + "Identifies the connectivity matrix entry."; + } + } + // connectivity-matrix + } + // connectivity-matrices + } + // te-node-connectivity-matrices + + grouping te-node-connectivity-matrix-attributes { + description + "Termination point references of a connectivity matrix entry."; + container from { + description + "Reference to a source LTP."; + leaf tp-ref { + type leafref { + path "../../../../../../nt:termination-point/nt:tp-id"; + } + description + "Relative reference to a termination point."; + } + uses te-types:label-set-info; + } + container to { + description + "Reference to a destination LTP."; + leaf tp-ref { + type leafref { + path "../../../../../../nt:termination-point/nt:tp-id"; + } + description + "Relative reference to a termination point."; + } + uses te-types:label-set-info; + } + uses connectivity-matrix-entry-path-attributes; + } + // te-node-connectivity-matrix-attributes + + grouping te-node-info-attributes { + description + "Advertised TE information attributes."; + leaf domain-id { + type uint32; + description + "Identifies the domain to which this node belongs. + This attribute is used to support inter-domain links."; + reference + "RFC 5152: A Per-Domain Path Computation Method for + Establishing Inter-Domain Traffic Engineering (TE) + Label Switched Paths (LSPs) + RFC 5316: ISIS Extensions in Support of Inter-Autonomous + System (AS) MPLS and GMPLS Traffic Engineering + RFC 5392: OSPF Extensions in Support of Inter-Autonomous + System (AS) MPLS and GMPLS Traffic Engineering"; + } + leaf is-abstract { + type empty; + description + "Present if the node is abstract; not present if the node + is actual."; + } + leaf name { + type string; + description + "Node name."; + } + leaf-list signaling-address { + type inet:ip-address; + description + "The node's signaling address."; + } + container underlay-topology { + if-feature "te-topology-hierarchy"; + description + "When an abstract node encapsulates a topology, the + attributes in this container point to said topology."; + uses nw:network-ref; + } + } + // te-node-info-attributes + + grouping te-node-state-derived { + description + "Node state attributes in a TE topology."; + leaf oper-status { + type te-types:te-oper-status; + config false; + description + "The current operational state of the node."; + } + uses geolocation-container; + leaf is-multi-access-dr { + type empty; + config false; + description + "The presence of this attribute indicates that this TE node + is a pseudonode elected as a designated router."; + reference + "RFC 1195: Use of OSI IS-IS for Routing in TCP/IP and Dual + Environments + RFC 3630: Traffic Engineering (TE) Extensions to OSPF + Version 2"; + } + uses information-source-per-node-attributes; + list information-source-entry { + key "information-source information-source-instance"; + config false; + description + "A list of information sources learned, including the source + that is used."; + uses information-source-per-node-attributes; + uses te-node-connectivity-matrices; + uses te-node-info-attributes; + } + } + // te-node-state-derived + + grouping te-node-tunnel-termination-point-config { + description + "Termination capability of a TTP on a TE node."; + uses te-node-tunnel-termination-point-config-attributes; + container local-link-connectivities { + description + "Contains an LLCL for a TTP on a TE node."; + leaf number-of-entries { + type uint16; + description + "The number of LLCL entries. + If this number is specified in the configuration request, + the number is the requested number of entries, which may + not all be listed in the list; + if this number is reported in the state data, + the number is the current number of operational entries."; + } + uses te-types:label-set-info; + uses connectivity-matrix-entry-path-attributes; + } + } + // te-node-tunnel-termination-point-config + + grouping te-node-tunnel-termination-point-config-attributes { + description + "Configuration attributes of a TTP on a TE node."; + leaf admin-status { + type te-types:te-admin-status; + description + "The administrative state of the TTP."; + } + leaf name { + type string; + description + "A descriptive name for the TTP."; + } + leaf switching-capability { + type identityref { + base te-types:switching-capabilities; + } + description + "Switching capability for this interface."; + } + leaf encoding { + type identityref { + base te-types:lsp-encoding-types; + } + description + "Encoding supported by this interface."; + } + leaf-list inter-layer-lock-id { + type uint32; + description + "Inter-layer lock ID, used for path computation in a TE + topology covering multiple layers or multiple regions."; + reference + "RFC 5212: Requirements for GMPLS-Based Multi-Region and + Multi-Layer Networks (MRN/MLN) + RFC 6001: Generalized MPLS (GMPLS) Protocol Extensions + for Multi-Layer and Multi-Region Networks (MLN/MRN)"; + } + leaf protection-type { + type identityref { + base te-types:lsp-protection-type; + } + description + "The protection type that this TTP is capable of."; + } + container client-layer-adaptation { + description + "Contains capability information to support a client-layer + adaptation in a multi-layer topology."; + list switching-capability { + key "switching-capability encoding"; + description + "List of supported switching capabilities."; + reference + "RFC 4202: Routing Extensions in Support of + Generalized Multi-Protocol Label Switching (GMPLS) + RFC 6001: Generalized MPLS (GMPLS) Protocol Extensions + for Multi-Layer and Multi-Region Networks (MLN/MRN)"; + leaf switching-capability { + type identityref { + base te-types:switching-capabilities; + } + description + "Switching capability for the client-layer adaptation."; + } + leaf encoding { + type identityref { + base te-types:lsp-encoding-types; + } + description + "Encoding supported by the client-layer adaptation."; + } + uses te-types:te-bandwidth; + } + } + } + // te-node-tunnel-termination-point-config-attributes + + grouping te-node-tunnel-termination-point-llc-list { + description + "LLCL of a TTP on a TE node."; + list local-link-connectivity { + key "link-tp-ref"; + description + "The termination capabilities between the TTP and the LTP. + This capability information can be used to compute + the tunnel path. + The Interface Adjustment Capability Descriptors (IACDs) + (defined in RFC 6001) on each LTP can be derived from + this list."; + reference + "RFC 6001: Generalized MPLS (GMPLS) Protocol Extensions + for Multi-Layer and Multi-Region Networks (MLN/MRN)"; + leaf link-tp-ref { + type leafref { + path "../../../../../nt:termination-point/nt:tp-id"; + } + description + "LTP."; + } + uses te-types:label-set-info; + uses connectivity-matrix-entry-path-attributes; + } + } + // te-node-tunnel-termination-point-llc-list + + grouping te-path-element { + description + "A group of attributes defining an element in a TE path, + such as a TE node, TE link, TE atomic resource, or label."; + uses te-types:explicit-route-hop; + } + // te-path-element + + grouping te-termination-point-augment { + description + "Augmentation for a TE termination point."; + leaf te-tp-id { + type te-types:te-tp-id; + description + "An identifier that uniquely identifies a TE termination + point."; + } + container te { + must '../te-tp-id'; + presence "TE support"; + description + "Indicates TE support."; + uses te-termination-point-config; + leaf oper-status { + type te-types:te-oper-status; + config false; + description + "The current operational state of the LTP."; + } + uses geolocation-container; + } + } + // te-termination-point-augment + + grouping te-termination-point-config { + description + "TE termination point configuration grouping."; + leaf admin-status { + type te-types:te-admin-status; + description + "The administrative state of the LTP."; + } + leaf name { + type string; + description + "A descriptive name for the LTP."; + } + uses interface-switching-capability-list; + leaf inter-domain-plug-id { + type binary; + description + "A network-wide unique number that identifies on the + network a connection that supports a given inter-domain + TE link. This is a more flexible alternative to specifying + 'remote-te-node-id' and 'remote-te-link-tp-id' on a TE link + when the provider either does not know 'remote-te-node-id' + and 'remote-te-link-tp-id' or needs to give the client the + flexibility to mix and match multiple topologies."; + } + leaf-list inter-layer-lock-id { + type uint32; + description + "Inter-layer lock ID, used for path computation in a TE + topology covering multiple layers or multiple regions."; + reference + "RFC 5212: Requirements for GMPLS-Based Multi-Region and + Multi-Layer Networks (MRN/MLN) + RFC 6001: Generalized MPLS (GMPLS) Protocol Extensions + for Multi-Layer and Multi-Region Networks (MLN/MRN)"; + } + } + // te-termination-point-config + + grouping te-topologies-augment { + description + "Augmentation for TE topologies."; + container te { + presence "TE support"; + description + "Indicates TE support."; + container templates { + description + "Configuration parameters for templates used for a TE + topology."; + list node-template { + if-feature "template"; + key "name"; + leaf name { + type te-types:te-template-name; + description + "The name to identify a TE node template."; + } + description + "The list of TE node templates used to define sharable + and reusable TE node attributes."; + uses template-attributes; + uses te-node-config-attributes-template; + } + // node-template + list link-template { + if-feature "template"; + key "name"; + leaf name { + type te-types:te-template-name; + description + "The name to identify a TE link template."; + } + description + "The list of TE link templates used to define sharable + and reusable TE link attributes."; + uses template-attributes; + uses te-link-config-attributes; + } + // link-template + } + // templates + } + // te + } + // te-topologies-augment + + grouping te-topology-augment { + description + "Augmentation for a TE topology."; + uses te-types:te-topology-identifier; + container te { + must '../te-topology-identifier/provider-id' + + ' and ../te-topology-identifier/client-id' + + ' and ../te-topology-identifier/topology-id'; + presence "TE support"; + description + "Indicates TE support."; + uses te-topology-config; + uses geolocation-container; + } + } + // te-topology-augment + + grouping te-topology-config { + description + "TE topology configuration grouping."; + leaf name { + type string; + description + "Name of the TE topology. This attribute is optional and can + be specified by the operator to describe the TE topology, + which can be useful when 'network-id' (RFC 8345) is not + descriptive and not modifiable because of being generated + by the system."; + reference + "RFC 8345: A YANG Data Model for Network Topologies"; + } + leaf preference { + type uint8 { + range "1..255"; + } + description + "Specifies a preference for this topology. A lower number + indicates a higher preference."; + } + leaf optimization-criterion { + type identityref { + base te-types:objective-function-type; + } + description + "Optimization criterion applied to this topology."; + reference + "RFC 3272: Overview and Principles of Internet Traffic + Engineering"; + } + list nsrlg { + if-feature "nsrlg"; + key "id"; + description + "List of NSRLGs (Non-Shared Risk Link Groups)."; + reference + "RFC 4872: RSVP-TE Extensions in Support of End-to-End + Generalized Multi-Protocol Label Switching (GMPLS) + Recovery"; + leaf id { + type uint32; + description + "Identifies the NSRLG entry."; + } + leaf disjointness { + type te-types:te-path-disjointness; + description + "The type of resource disjointness."; + } + } + // nsrlg + } + // te-topology-config + + grouping template-attributes { + description + "Common attributes for all templates."; + leaf priority { + type uint16; + description + "The preference value for resolving conflicts between + different templates. When two or more templates specify + values for one configuration attribute, the value from the + template with the highest priority is used. + A lower number indicates a higher priority. The highest + priority is 0."; + } + leaf reference-change-policy { + type enumeration { + enum no-action { + description + "When an attribute changes in this template, the + configuration node referring to this template does + not take any action."; + } + enum not-allowed { + description + "When any configuration object has a reference to this + template, changing this template is not allowed."; + } + enum cascade { + description + "When an attribute changes in this template, the + configuration object referring to this template applies + the new attribute value to the corresponding + configuration."; + } + } + description + "This attribute specifies the action taken for a + configuration node that has a reference to this template."; + } + } + // template-attributes + + /* + * Data nodes + */ + + augment "/nw:networks/nw:network/nw:network-types" { + description + "Introduces a new network type for a TE topology."; + container te-topology { + presence "Indicates a TE topology"; + description + "Its presence identifies the TE topology type."; + } + } + + augment "/nw:networks" { + description + "Augmentation parameters for TE topologies."; + uses te-topologies-augment; + } + + augment "/nw:networks/nw:network" { + when 'nw:network-types/tet:te-topology' { + description + "Augmentation parameters apply only for networks with a + TE topology type."; + } + description + "Configuration parameters for a TE topology."; + uses te-topology-augment; + } + + augment "/nw:networks/nw:network/nw:node" { + when '../nw:network-types/tet:te-topology' { + description + "Augmentation parameters apply only for networks with a + TE topology type."; + } + description + "Configuration parameters for TE at the node level."; + leaf te-node-id { + type te-types:te-node-id; + description + "The identifier of a node in the TE topology. + A node is specific to a topology to which it belongs."; + } + container te { + must '../te-node-id' { + description + "'te-node-id' is mandatory."; + } + must 'count(../nw:supporting-node)<=1' { + description + "For a node in a TE topology, there cannot be more + than one supporting node. If multiple nodes are + abstracted, the 'underlay-topology' field is used."; + } + presence "TE support"; + description + "Indicates TE support."; + uses te-node-augment; + } + } + + augment "/nw:networks/nw:network/nt:link" { + when '../nw:network-types/tet:te-topology' { + description + "Augmentation parameters apply only for networks with a + TE topology type."; + } + description + "Configuration parameters for TE at the link level."; + container te { + must 'count(../nt:supporting-link)<=1' { + description + "For a link in a TE topology, there cannot be more + than one supporting link. If one or more link paths are + abstracted, the underlay is used."; + } + presence "TE support"; + description + "Indicates TE support."; + uses te-link-augment; + } + } + + augment "/nw:networks/nw:network/nw:node/" + + "nt:termination-point" { + when '../../nw:network-types/tet:te-topology' { + description + "Augmentation parameters apply only for networks with a + TE topology type."; + } + description + "Configuration parameters for TE at the termination point + level."; + uses te-termination-point-augment; + } + + augment "/nw:networks/nw:network/nt:link/te/bundle-stack-level/" + + "bundle/bundled-links/bundled-link" { + when '../../../../nw:network-types/tet:te-topology' { + description + "Augmentation parameters apply only for networks with a + TE topology type."; + } + description + "Augmentation for a TE bundled link."; + leaf src-tp-ref { + type leafref { + path "../../../../../nw:node[nw:node-id = " + + "current()/../../../../nt:source/" + + "nt:source-node]/" + + "nt:termination-point/nt:tp-id"; + require-instance true; + } + description + "Reference to another TE termination point on the + same source node."; + } + leaf des-tp-ref { + type leafref { + path "../../../../../nw:node[nw:node-id = " + + "current()/../../../../nt:destination/" + + "nt:dest-node]/" + + "nt:termination-point/nt:tp-id"; + require-instance true; + } + description + "Reference to another TE termination point on the + same destination node."; + } + } + + augment "/nw:networks/nw:network/nw:node/te/" + + "information-source-entry/connectivity-matrices/" + + "connectivity-matrix" { + when '../../../../../nw:network-types/tet:te-topology' { + description + "Augmentation parameters apply only for networks with a + TE topology type."; + } + description + "Augmentation for the TE node connectivity matrix."; + uses te-node-connectivity-matrix-attributes; + } + + augment "/nw:networks/nw:network/nw:node/te/te-node-attributes/" + + "connectivity-matrices/connectivity-matrix" { + when '../../../../../nw:network-types/tet:te-topology' { + description + "Augmentation parameters apply only for networks with a + TE topology type."; + } + description + "Augmentation for the TE node connectivity matrix."; + uses te-node-connectivity-matrix-attributes; + } + + augment "/nw:networks/nw:network/nw:node/te/" + + "tunnel-termination-point/local-link-connectivities" { + when '../../../../nw:network-types/tet:te-topology' { + description + "Augmentation parameters apply only for networks with a + TE topology type."; + } + description + "Augmentation for TE node TTP LLCs (Local Link + Connectivities)."; + uses te-node-tunnel-termination-point-llc-list; + } +} \ No newline at end of file diff --git a/src/tests/tools/mock_nce_fan_ctrl/yang/yang-repo-url.txt b/src/tests/tools/mock_nce_fan_ctrl/yang/yang-repo-url.txt new file mode 100644 index 000000000..df60dab3b --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/yang/yang-repo-url.txt @@ -0,0 +1 @@ +https://github.com/YangModels/yang -- GitLab From db06508c177d2b412a15234db16b4ab574f4ab15 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 2 Sep 2025 14:52:26 +0000 Subject: [PATCH 083/367] SIMAP Connector - SimapUpdater: - Correct code for dispatching devices inherited from underlaying controllers --- .../service/simap_updater/SimapUpdater.py | 89 +++++++++---------- 1 file changed, 43 insertions(+), 46 deletions(-) diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 56413cdc1..e130267dd 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -35,6 +35,14 @@ LOGGER = logging.getLogger(__name__) RESTCONF_LOGGER = logging.getLogger(__name__ + '.RestConfClient') +SKIPPED_DEVICE_TYPES = { + DeviceTypeEnum.EMULATED_IP_SDN_CONTROLLER.value, + DeviceTypeEnum.IP_SDN_CONTROLLER.value, + DeviceTypeEnum.NCE.value, + DeviceTypeEnum.TERAFLOWSDN_CONTROLLER.value, +} + + class EventDispatcher(BaseEventDispatcher): def __init__( self, events_queue : queue.PriorityQueue, @@ -127,12 +135,6 @@ class EventDispatcher(BaseEventDispatcher): device = self._object_cache.get(CachedEntities.DEVICE, device_uuid) device_type = device.device_type - SKIPPED_DEVICE_TYPES = { - DeviceTypeEnum.EMULATED_IP_SDN_CONTROLLER.value, - DeviceTypeEnum.IP_SDN_CONTROLLER.value, - DeviceTypeEnum.NCE.value, - DeviceTypeEnum.TERAFLOWSDN_CONTROLLER.value, - } if device_type in SKIPPED_DEVICE_TYPES: self._add_skipped_device(device) MSG = ( @@ -144,21 +146,21 @@ class EventDispatcher(BaseEventDispatcher): LOGGER.warning(MSG.format(str_device_event, str_device)) return - device_controller_uuid = device.controller_id.device_uuid.uuid - if len(device_controller_uuid) > 0: - self._add_skipped_device(device) - MSG = ( - 'DeviceEvent({:s}) skipped, is a remotely-managed device. ' - 'SIMAP should be populated by remote controller: {:s}' - ) - str_device_event = grpc_message_to_json_string(device_event) - str_device = grpc_message_to_json_string(device) - LOGGER.warning(MSG.format(str_device_event, str_device)) - return + #device_controller_uuid = device.controller_id.device_uuid.uuid + #if len(device_controller_uuid) > 0: + # self._add_skipped_device(device) + # MSG = ( + # 'DeviceEvent({:s}) skipped, is a remotely-managed device. ' + # 'SIMAP should be populated by remote controller: {:s}' + # ) + # str_device_event = grpc_message_to_json_string(device_event) + # str_device = grpc_message_to_json_string(device) + # LOGGER.warning(MSG.format(str_device_event, str_device)) + # return topology_uuid, endpoint_names = get_device_endpoint(device) if topology_uuid is None: - self._add_skipped_device(device) + #self._add_skipped_device(device) MSG = 'DeviceEvent({:s}) skipped, no endpoints to identify topology: {:s}' str_device_event = grpc_message_to_json_string(device_event) str_device = grpc_message_to_json_string(device) @@ -172,8 +174,13 @@ class EventDispatcher(BaseEventDispatcher): te_topo.update() device_name = device.name - te_topo.node(device_name).create(termination_point_ids=endpoint_names) - self._remove_skipped_device(device) + te_device = te_topo.node(device_name) + te_device.update() + + for endpoint_name in endpoint_names: + te_device.termination_point(endpoint_name).update() + + #self._remove_skipped_device(device) MSG = 'Device Created: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(device_event))) @@ -187,12 +194,6 @@ class EventDispatcher(BaseEventDispatcher): device = self._object_cache.get(CachedEntities.DEVICE, device_uuid) device_type = device.device_type - SKIPPED_DEVICE_TYPES = { - DeviceTypeEnum.EMULATED_IP_SDN_CONTROLLER.value, - DeviceTypeEnum.IP_SDN_CONTROLLER.value, - DeviceTypeEnum.NCE.value, - DeviceTypeEnum.TERAFLOWSDN_CONTROLLER.value, - } if device_type in SKIPPED_DEVICE_TYPES: self._add_skipped_device(device) MSG = ( @@ -204,21 +205,21 @@ class EventDispatcher(BaseEventDispatcher): LOGGER.warning(MSG.format(str_device_event, str_device)) return - device_controller_uuid = device.controller_id.device_uuid.uuid - if len(device_controller_uuid) > 0: - self._add_skipped_device(device) - MSG = ( - 'DeviceEvent({:s}) skipped, is a remotely-managed device. ' - 'SIMAP should be updated by remote controller: {:s}' - ) - str_device_event = grpc_message_to_json_string(device_event) - str_device = grpc_message_to_json_string(device) - LOGGER.warning(MSG.format(str_device_event, str_device)) - return + #device_controller_uuid = device.controller_id.device_uuid.uuid + #if len(device_controller_uuid) > 0: + # self._add_skipped_device(device) + # MSG = ( + # 'DeviceEvent({:s}) skipped, is a remotely-managed device. ' + # 'SIMAP should be updated by remote controller: {:s}' + # ) + # str_device_event = grpc_message_to_json_string(device_event) + # str_device = grpc_message_to_json_string(device) + # LOGGER.warning(MSG.format(str_device_event, str_device)) + # return topology_uuid, endpoint_names = get_device_endpoint(device) if topology_uuid is None: - self._add_skipped_device(device) + #self._add_skipped_device(device) MSG = 'DeviceEvent({:s}) skipped, no endpoints to identify topology: {:s}' str_device_event = grpc_message_to_json_string(device_event) str_device = grpc_message_to_json_string(device) @@ -238,7 +239,7 @@ class EventDispatcher(BaseEventDispatcher): for endpoint_name in endpoint_names: te_device.termination_point(endpoint_name).update() - self._remove_skipped_device(device) + #self._remove_skipped_device(device) MSG = 'Device Updated: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(device_event))) @@ -252,12 +253,6 @@ class EventDispatcher(BaseEventDispatcher): device = self._object_cache.get(CachedEntities.DEVICE, device_uuid) device_type = device.device_type - SKIPPED_DEVICE_TYPES = { - DeviceTypeEnum.EMULATED_IP_SDN_CONTROLLER.value, - DeviceTypeEnum.IP_SDN_CONTROLLER.value, - DeviceTypeEnum.NCE.value, - DeviceTypeEnum.TERAFLOWSDN_CONTROLLER.value, - } if device_type in SKIPPED_DEVICE_TYPES: self._add_skipped_device(device) MSG = ( @@ -271,7 +266,9 @@ class EventDispatcher(BaseEventDispatcher): device_controller_uuid = device.controller_id.device_uuid.uuid if len(device_controller_uuid) > 0: - self._add_skipped_device(device) + # if it is a delete of a remotely-managed device, + # should be managed by owner controller + #self._add_skipped_device(device) MSG = ( 'DeviceEvent({:s}) skipped, is a remotely-managed device. ' 'SIMAP should be updated by remote controller: {:s}' -- GitLab From c44be7fb37799c8ceaeebba187746ae00494c3a7 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 2 Sep 2025 15:31:06 +0000 Subject: [PATCH 084/367] Tests - Tools - Mock NCE-FAN Controller - Configured private YANG models - Made SIMAP optional --- .../mock_nce_fan_ctrl/nce_fan_ctrl/app.py | 78 ++++++++++--------- .../tools/mock_nce_fan_ctrl/yang/.gitignore | 17 ++++ 2 files changed, 58 insertions(+), 37 deletions(-) create mode 100644 src/tests/tools/mock_nce_fan_ctrl/yang/.gitignore diff --git a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/app.py b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/app.py index 7a87f732a..654971fd2 100644 --- a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/app.py +++ b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/app.py @@ -13,7 +13,7 @@ # limitations under the License. -import json, logging, secrets +import json, logging, os, secrets from flask import Flask from flask_restful import Api from .Dispatch import RestConfDispatch @@ -46,43 +46,47 @@ with open(STARTUP_FILE, mode='r', encoding='UTF-8') as fp: YANG_STARTUP_DATA = json.loads(fp.read()) -restconf_client = RestConfClient( - '172.17.0.1', port=8080, - logger=logging.getLogger('RestConfClient') -) -simap_client = SimapClient(restconf_client) - -te_topo = simap_client.network('admin') -te_topo.update() - -networks = YANG_STARTUP_DATA.get('ietf-network:networks', dict()) -networks = networks.get('network', list()) -assert len(networks) == 1 -network = networks[0] -assert network['network-id'] == 'admin' - -nodes = network.get('node', list()) -for node in nodes: - node_id = node['node-id'] - tp_ids = [ - tp['tp-id'] - for tp in node['ietf-network-topology:termination-point'] - ] - te_topo.node(node_id).create(termination_point_ids=tp_ids) - -links = network.get('ietf-network-topology:link', list()) -for link in links: - link_id = link['link-id'] - link_src = link['source'] - link_dst = link['destination'] - link_src_node_id = link_src['source-node'] - link_src_tp_id = link_src['source-tp'] - link_dst_node_id = link_dst['dest-node'] - link_dst_tp_id = link_dst['dest-tp'] - - te_topo.link(link_id).create( - link_src_node_id, link_src_tp_id, link_dst_node_id, link_dst_tp_id +SIMAP_ADDRESS = os.environ.get('SIMAP_ADDRESS') +SIMAP_PORT = os.environ.get('SIMAP_PORT' ) + +if SIMAP_ADDRESS is not None and SIMAP_PORT is not None: + restconf_client = RestConfClient( + SIMAP_ADDRESS, port=SIMAP_PORT, + logger=logging.getLogger('RestConfClient') ) + simap_client = SimapClient(restconf_client) + + te_topo = simap_client.network('admin') + te_topo.update() + + networks = YANG_STARTUP_DATA.get('ietf-network:networks', dict()) + networks = networks.get('network', list()) + assert len(networks) == 1 + network = networks[0] + assert network['network-id'] == 'admin' + + nodes = network.get('node', list()) + for node in nodes: + node_id = node['node-id'] + tp_ids = [ + tp['tp-id'] + for tp in node['ietf-network-topology:termination-point'] + ] + te_topo.node(node_id).create(termination_point_ids=tp_ids) + + links = network.get('ietf-network-topology:link', list()) + for link in links: + link_id = link['link-id'] + link_src = link['source'] + link_dst = link['destination'] + link_src_node_id = link_src['source-node'] + link_src_tp_id = link_src['source-tp'] + link_dst_node_id = link_dst['dest-node'] + link_dst_tp_id = link_dst['dest-tp'] + + te_topo.link(link_id).create( + link_src_node_id, link_src_tp_id, link_dst_node_id, link_dst_tp_id + ) yang_handler = YangHandler( diff --git a/src/tests/tools/mock_nce_fan_ctrl/yang/.gitignore b/src/tests/tools/mock_nce_fan_ctrl/yang/.gitignore new file mode 100644 index 000000000..6c90250b2 --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/yang/.gitignore @@ -0,0 +1,17 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Add here folders containing non-public data models +private-*/ -- GitLab From 9d7eb54dac0243431bbd2cf9204e25c880f90ae6 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 2 Sep 2025 15:32:10 +0000 Subject: [PATCH 085/367] Tests - Tools - Mock NCE-T Controller - Made SIMAP optional --- .../tools/mock_nce_t_ctrl/nce_t_ctrl/app.py | 78 ++++++++++--------- 1 file changed, 41 insertions(+), 37 deletions(-) diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py index 7a87f732a..654971fd2 100644 --- a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py @@ -13,7 +13,7 @@ # limitations under the License. -import json, logging, secrets +import json, logging, os, secrets from flask import Flask from flask_restful import Api from .Dispatch import RestConfDispatch @@ -46,43 +46,47 @@ with open(STARTUP_FILE, mode='r', encoding='UTF-8') as fp: YANG_STARTUP_DATA = json.loads(fp.read()) -restconf_client = RestConfClient( - '172.17.0.1', port=8080, - logger=logging.getLogger('RestConfClient') -) -simap_client = SimapClient(restconf_client) - -te_topo = simap_client.network('admin') -te_topo.update() - -networks = YANG_STARTUP_DATA.get('ietf-network:networks', dict()) -networks = networks.get('network', list()) -assert len(networks) == 1 -network = networks[0] -assert network['network-id'] == 'admin' - -nodes = network.get('node', list()) -for node in nodes: - node_id = node['node-id'] - tp_ids = [ - tp['tp-id'] - for tp in node['ietf-network-topology:termination-point'] - ] - te_topo.node(node_id).create(termination_point_ids=tp_ids) - -links = network.get('ietf-network-topology:link', list()) -for link in links: - link_id = link['link-id'] - link_src = link['source'] - link_dst = link['destination'] - link_src_node_id = link_src['source-node'] - link_src_tp_id = link_src['source-tp'] - link_dst_node_id = link_dst['dest-node'] - link_dst_tp_id = link_dst['dest-tp'] - - te_topo.link(link_id).create( - link_src_node_id, link_src_tp_id, link_dst_node_id, link_dst_tp_id +SIMAP_ADDRESS = os.environ.get('SIMAP_ADDRESS') +SIMAP_PORT = os.environ.get('SIMAP_PORT' ) + +if SIMAP_ADDRESS is not None and SIMAP_PORT is not None: + restconf_client = RestConfClient( + SIMAP_ADDRESS, port=SIMAP_PORT, + logger=logging.getLogger('RestConfClient') ) + simap_client = SimapClient(restconf_client) + + te_topo = simap_client.network('admin') + te_topo.update() + + networks = YANG_STARTUP_DATA.get('ietf-network:networks', dict()) + networks = networks.get('network', list()) + assert len(networks) == 1 + network = networks[0] + assert network['network-id'] == 'admin' + + nodes = network.get('node', list()) + for node in nodes: + node_id = node['node-id'] + tp_ids = [ + tp['tp-id'] + for tp in node['ietf-network-topology:termination-point'] + ] + te_topo.node(node_id).create(termination_point_ids=tp_ids) + + links = network.get('ietf-network-topology:link', list()) + for link in links: + link_id = link['link-id'] + link_src = link['source'] + link_dst = link['destination'] + link_src_node_id = link_src['source-node'] + link_src_tp_id = link_src['source-tp'] + link_dst_node_id = link_dst['dest-node'] + link_dst_tp_id = link_dst['dest-tp'] + + te_topo.link(link_id).create( + link_src_node_id, link_src_tp_id, link_dst_node_id, link_dst_tp_id + ) yang_handler = YangHandler( -- GitLab From 0338c975780c657c88a3b3fe40d20b124f10b8b5 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 2 Sep 2025 15:33:43 +0000 Subject: [PATCH 086/367] ECOC F5GA Telemetry Demo: - Updated redeploy script to specify SIMAP server address and port --- src/tests/ecoc25-f5ga-telemetry/redeploy.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/tests/ecoc25-f5ga-telemetry/redeploy.sh b/src/tests/ecoc25-f5ga-telemetry/redeploy.sh index 2a0dd0736..1b4db5507 100755 --- a/src/tests/ecoc25-f5ga-telemetry/redeploy.sh +++ b/src/tests/ecoc25-f5ga-telemetry/redeploy.sh @@ -42,8 +42,8 @@ case "$HOSTNAME" in echo "Deploying support services..." docker run --detach --name simap-server --publish 8080:8080 simap-server:mock - docker run --detach --name nce-fan-ctrl --publish 8081:8080 nce-fan-ctrl:mock - docker run --detach --name nce-t-ctrl --publish 8082:8080 nce-t-ctrl:mock + docker run --detach --name nce-fan-ctrl --publish 8081:8080 --env SIMAP_ADDRESS=172.17.0.1 SIMAP_PORT=8080 nce-fan-ctrl:mock + docker run --detach --name nce-t-ctrl --publish 8082:8080 --env SIMAP_ADDRESS=172.17.0.1 SIMAP_PORT=8080 nce-t-ctrl:mock sleep 2 docker ps -a -- GitLab From c227f437e6b893c3a2b0a22200ed7f74b5dc7f74 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 2 Sep 2025 15:38:54 +0000 Subject: [PATCH 087/367] ECOC F5GA Telemetry Demo: - Fixed redeploy script --- src/tests/ecoc25-f5ga-telemetry/redeploy.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/tests/ecoc25-f5ga-telemetry/redeploy.sh b/src/tests/ecoc25-f5ga-telemetry/redeploy.sh index 1b4db5507..60e351abc 100755 --- a/src/tests/ecoc25-f5ga-telemetry/redeploy.sh +++ b/src/tests/ecoc25-f5ga-telemetry/redeploy.sh @@ -42,8 +42,8 @@ case "$HOSTNAME" in echo "Deploying support services..." docker run --detach --name simap-server --publish 8080:8080 simap-server:mock - docker run --detach --name nce-fan-ctrl --publish 8081:8080 --env SIMAP_ADDRESS=172.17.0.1 SIMAP_PORT=8080 nce-fan-ctrl:mock - docker run --detach --name nce-t-ctrl --publish 8082:8080 --env SIMAP_ADDRESS=172.17.0.1 SIMAP_PORT=8080 nce-t-ctrl:mock + docker run --detach --name nce-fan-ctrl --publish 8081:8080 --env SIMAP_ADDRESS=172.17.0.1 --env SIMAP_PORT=8080 nce-fan-ctrl:mock + docker run --detach --name nce-t-ctrl --publish 8082:8080 --env SIMAP_ADDRESS=172.17.0.1 --env SIMAP_PORT=8080 nce-t-ctrl:mock sleep 2 docker ps -a -- GitLab From c4d36d2d542c03630ff4b7f752a2103cd099f368 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 2 Sep 2025 15:42:25 +0000 Subject: [PATCH 088/367] ECOC F5GA Telemetry Demo: - Fixed redeploy script --- src/tests/ecoc25-f5ga-telemetry/redeploy.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests/ecoc25-f5ga-telemetry/redeploy.sh b/src/tests/ecoc25-f5ga-telemetry/redeploy.sh index 60e351abc..fd798ed4c 100755 --- a/src/tests/ecoc25-f5ga-telemetry/redeploy.sh +++ b/src/tests/ecoc25-f5ga-telemetry/redeploy.sh @@ -28,7 +28,7 @@ case "$HOSTNAME" in docker buildx build -t simap-server:mock -f Dockerfile . echo "Building NCE-FAN Controller..." - cd ~/tfs-ctrl/src/tests/tools/mock_nce_ctrl + cd ~/tfs-ctrl/src/tests/tools/mock_nce_fan_ctrl docker buildx build -t nce-fan-ctrl:mock -f Dockerfile . echo "Building NCE-T Controller..." -- GitLab From 6a3609d9cf1fe02c262efd46600bfd00badf3fc8 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 2 Sep 2025 15:59:40 +0000 Subject: [PATCH 089/367] Device component - ACTN Driver: - Code polishing --- .../drivers/ietf_actn/IetfActnDriver.py | 14 +-- .../ietf_actn/handlers/RestApiClient.py | 104 ------------------ 2 files changed, 5 insertions(+), 113 deletions(-) delete mode 100644 src/device/service/drivers/ietf_actn/handlers/RestApiClient.py diff --git a/src/device/service/drivers/ietf_actn/IetfActnDriver.py b/src/device/service/drivers/ietf_actn/IetfActnDriver.py index b15cb3f84..12064c3e8 100644 --- a/src/device/service/drivers/ietf_actn/IetfActnDriver.py +++ b/src/device/service/drivers/ietf_actn/IetfActnDriver.py @@ -85,14 +85,10 @@ class IetfActnDriver(_Driver): with self.__lock: if len(resource_keys) == 0: resource_keys = ALL_RESOURCE_KEYS for i, resource_key in enumerate(resource_keys): - chk_string('resource_key[#{:d}]'.format(i), resource_key, allow_empty=False) - + str_resource_name = 'resource_key[#{:d}]'.format(i) try: + chk_string(str_resource_name, resource_key, allow_empty=False) if resource_key == RESOURCE_ENDPOINTS: - # Add mgmt endpoint by default - #resource_key = '/endpoints/endpoint[mgmt]' - #resource_value = {'uuid': 'mgmt', 'name': 'mgmt', 'type': 'mgmt'} - #results.append((resource_key, resource_value)) results.extend(self._handler_net_topology.get()) elif resource_key == RESOURCE_SERVICES: get_osu_tunnels(self._handler_osu_tunnel, results) @@ -105,9 +101,9 @@ class IetfActnDriver(_Driver): if etht_service_name is not None: get_etht_services(self._handler_etht_service, results, etht_service_name=etht_service_name) except Exception as e: - MSG = 'Error processing resource_key: {:s}' - LOGGER.exception(MSG.format(str(resource_key))) - results.append((resource_key, e)) + MSG = 'Error processing resource_key({:s}, {:s})' + LOGGER.exception(MSG.format(str_resource_name, str(resource_key))) + results.append((resource_key, e)) # if processing fails, store the exception return results diff --git a/src/device/service/drivers/ietf_actn/handlers/RestApiClient.py b/src/device/service/drivers/ietf_actn/handlers/RestApiClient.py deleted file mode 100644 index 2e369ab59..000000000 --- a/src/device/service/drivers/ietf_actn/handlers/RestApiClient.py +++ /dev/null @@ -1,104 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import copy, json, logging, requests -from requests.auth import HTTPBasicAuth -from typing import Any, Dict, List, Set, Union - -LOGGER = logging.getLogger(__name__) - -DEFAULT_BASE_URL = '/restconf/v2/data' -DEFAULT_SCHEME = 'https' -DEFAULT_TIMEOUT = 120 -DEFAULT_VERIFY = False - -HTTP_STATUS_OK = 200 -HTTP_STATUS_CREATED = 201 -HTTP_STATUS_ACCEPTED = 202 -HTTP_STATUS_NO_CONTENT = 204 - -HTTP_OK_CODES = { - HTTP_STATUS_OK, - HTTP_STATUS_CREATED, - HTTP_STATUS_ACCEPTED, - HTTP_STATUS_NO_CONTENT, -} - -class RestApiClient: - def __init__(self, address : str, port : int, settings : Dict[str, Any] = dict()) -> None: - username = settings.get('username') - password = settings.get('password') - self._auth = HTTPBasicAuth(username, password) if username is not None and password is not None else None - - scheme = settings.get('scheme', DEFAULT_SCHEME ) - base_url = settings.get('base_url', DEFAULT_BASE_URL) - self._base_url = '{:s}://{:s}:{:d}{:s}'.format(scheme, address, int(port), base_url) - - self._timeout = int(settings.get('timeout', DEFAULT_TIMEOUT)) - self._verify = bool(settings.get('verify', DEFAULT_VERIFY)) - - def get( - self, object_name : str, url : str, - expected_http_status : Set[int] = {HTTP_STATUS_OK} - ) -> Union[Dict, List]: - MSG = 'Get {:s}({:s})' - LOGGER.info(MSG.format(str(object_name), str(url))) - response = requests.get( - self._base_url + url, - timeout=self._timeout, verify=self._verify, auth=self._auth - ) - LOGGER.info(' Response[{:s}]: {:s}'.format(str(response.status_code), str(response.content))) - - if response.status_code in expected_http_status: return json.loads(response.content) - - MSG = 'Could not get {:s}({:s}): status_code={:s} reply={:s}' - raise Exception(MSG.format(str(object_name), str(url), str(response.status_code), str(response))) - - def update( - self, object_name : str, url : str, data : Dict, headers : Dict[str, Any] = dict(), - expected_http_status : Set[int] = HTTP_OK_CODES - ) -> None: - headers = copy.deepcopy(headers) - if 'content-type' not in {header_name.lower() for header_name in headers.keys()}: - headers.update({'content-type': 'application/json'}) - - MSG = 'Create/Update {:s}({:s}, {:s})' - LOGGER.info(MSG.format(str(object_name), str(url), str(data))) - response = requests.post( - self._base_url + url, data=json.dumps(data), headers=headers, - timeout=self._timeout, verify=self._verify, auth=self._auth - ) - LOGGER.info(' Response[{:s}]: {:s}'.format(str(response.status_code), str(response.content))) - - if response.status_code in expected_http_status: return - - MSG = 'Could not create/update {:s}({:s}, {:s}): status_code={:s} reply={:s}' - raise Exception(MSG.format(str(object_name), str(url), str(data), str(response.status_code), str(response))) - - def delete( - self, object_name : str, url : str, - expected_http_status : Set[int] = HTTP_OK_CODES - ) -> None: - MSG = 'Delete {:s}({:s})' - LOGGER.info(MSG.format(str(object_name), str(url))) - response = requests.delete( - self._base_url + url, - timeout=self._timeout, verify=self._verify, auth=self._auth - ) - LOGGER.info(' Response[{:s}]: {:s}'.format(str(response.status_code), str(response.content))) - - if response.status_code in expected_http_status: return - - MSG = 'Could not delete {:s}({:s}): status_code={:s} reply={:s}' - raise Exception(MSG.format(str(object_name), str(url), str(response.status_code), str(response))) -- GitLab From df7d5bfd670c6e478538c67d49194a75b743be93 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 2 Sep 2025 16:00:31 +0000 Subject: [PATCH 090/367] Device component - NCE-FAN Driver: - Add topology discovery - Multiple bug fixes - Code polishing --- src/device/service/drivers/nce/driver.py | 91 ++++------ .../nce/handlers/NetworkTopologyHandler.py | 162 ++++++++++++++++++ .../service/drivers/nce/handlers/__init__.py | 13 ++ 3 files changed, 209 insertions(+), 57 deletions(-) create mode 100644 src/device/service/drivers/nce/handlers/NetworkTopologyHandler.py create mode 100644 src/device/service/drivers/nce/handlers/__init__.py diff --git a/src/device/service/drivers/nce/driver.py b/src/device/service/drivers/nce/driver.py index cabe17991..c0f487fc2 100644 --- a/src/device/service/drivers/nce/driver.py +++ b/src/device/service/drivers/nce/driver.py @@ -12,30 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json -import logging -import re -import threading +import anytree, copy, json, logging, re, requests, threading from typing import Any, Iterator, List, Optional, Tuple, Union - -import anytree -import requests -from requests.auth import HTTPBasicAuth - from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method +from common.tools.client.RestConfClient import RestConfClient from common.type_checkers.Checkers import chk_length, chk_string, chk_type from device.service.driver_api._Driver import _Driver from device.service.driver_api.AnyTreeTools import ( - TreeNode, - dump_subtree, - get_subnode, - set_subnode_value, -) -from device.service.driver_api.ImportTopologyEnum import ( - ImportTopologyEnum, - get_import_topology, + TreeNode, dump_subtree, get_subnode, set_subnode_value, ) - +from .handlers.NetworkTopologyHandler import NetworkTopologyHandler from .Constants import SPECIAL_RESOURCE_MAPPINGS from .nce_fan_client import ( NCEClient, @@ -44,43 +30,38 @@ from .nce_fan_client import ( ) from .Tools import compose_resource_endpoint + LOGGER = logging.getLogger(__name__) RE_NCE_APP_FLOW_DATA = re.compile(r'^\/service\[[^\]]+\]\/AppFlow$') RE_NCE_APP_FLOW_OPERATION = re.compile(r'^\/service\[[^\]]+\]\/AppFlow\/operation$') + DRIVER_NAME = 'nce' METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': DRIVER_NAME}) class NCEDriver(_Driver): - def __init__(self, address: str, port: str, **settings) -> None: - super().__init__(DRIVER_NAME, address, int(port), **settings) + def __init__(self, address: str, port: int, **settings) -> None: + super().__init__(DRIVER_NAME, address, port, **settings) self.__lock = threading.Lock() self.__started = threading.Event() self.__terminate = threading.Event() + + restconf_settings = copy.deepcopy(settings) + restconf_settings.pop('base_url', None) + restconf_settings.pop('import_topology', None) + restconf_settings['logger'] = logging.getLogger(__name__ + '.RestConfClient') + self._rest_conf_client = RestConfClient(address, port=port, **restconf_settings) + self._handler_net_topology = NetworkTopologyHandler(self._rest_conf_client, **settings) + self.__running = TreeNode('.') scheme = self.settings.get('scheme', 'http') username = self.settings.get('username') password = self.settings.get('password') self.nce = NCEClient( - self.address, - self.port, - scheme=scheme, - username=username, - password=password, - ) - self.__auth = None - # ( - # HTTPBasicAuth(username, password) - # if username is not None and password is not None - # else None - # ) - self.__tfs_nbi_root = '{:s}://{:s}:{:d}'.format(scheme, self.address, int(self.port)) - self.__timeout = int(self.settings.get('timeout', 120)) - self.__import_topology = get_import_topology( - self.settings, default=ImportTopologyEnum.DEVICES + self.address, self.port, scheme=scheme, username=username, password=password, ) endpoints = self.settings.get('endpoints', []) endpoint_resources = [] @@ -127,13 +108,14 @@ class NCEDriver(_Driver): def Connect(self) -> bool: with self.__lock: - if self.__started.is_set(): - return True + if self.__started.is_set(): return True try: - ... + self._rest_conf_client._discover_base_url() except requests.exceptions.Timeout: + LOGGER.exception('Timeout exception checking connectivity') return False except Exception: # pylint: disable=broad-except + LOGGER.exception('Unhandled exception checking connectivity') return False else: self.__started.set() @@ -150,28 +132,27 @@ class NCEDriver(_Driver): return [] @metered_subclass_method(METRICS_POOL) - def GetConfig( - self, resource_keys: List[str] = [] - ) -> List[Tuple[str, Union[Any, None, Exception]]]: + def GetConfig(self, resource_keys : List[str] = []) -> List[Tuple[str, Union[Any, None, Exception]]]: chk_type('resources', resource_keys, list) + results = list() with self.__lock: if len(resource_keys) == 0: return dump_subtree(self.__running) - results = [] + resolver = anytree.Resolver(pathattr='name') for i, resource_key in enumerate(resource_keys): str_resource_name = 'resource_key[#{:d}]'.format(i) try: chk_string(str_resource_name, resource_key, allow_empty=False) - resource_key = SPECIAL_RESOURCE_MAPPINGS.get(resource_key, resource_key) - resource_path = resource_key.split('/') + if resource_key == RESOURCE_ENDPOINTS: + results.extend(self._handler_net_topology.get()) + else: + resource_key = SPECIAL_RESOURCE_MAPPINGS.get(resource_key, resource_key) + resource_path = resource_key.split('/') except Exception as e: # pylint: disable=broad-except - LOGGER.exception( - 'Exception validating {:s}: {:s}'.format( - str_resource_name, str(resource_key) - ) - ) - results.append((resource_key, e)) # if validation fails, store the exception + MSG = 'Error processing resource_key({:s}, {:s})' + LOGGER.exception(MSG.format(str_resource_name, str(resource_key))) + results.append((resource_key, e)) # if processing fails, store the exception continue resource_node = get_subnode(resolver, self.__running, resource_path, default=None) @@ -185,10 +166,7 @@ class NCEDriver(_Driver): @metered_subclass_method(METRICS_POOL) def SetConfig(self, resources: List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: results = [] - - if len(resources) == 0: - return results - + if len(resources) == 0: return results with self.__lock: for resource in resources: resource_key, resource_value = resource @@ -224,8 +202,7 @@ class NCEDriver(_Driver): @metered_subclass_method(METRICS_POOL) def DeleteConfig(self, resources: List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: results = [] - if len(resources) == 0: - return results + if len(resources) == 0: return results with self.__lock: for resource in resources: LOGGER.info('resource = {:s}'.format(str(resource))) diff --git a/src/device/service/drivers/nce/handlers/NetworkTopologyHandler.py b/src/device/service/drivers/nce/handlers/NetworkTopologyHandler.py new file mode 100644 index 000000000..d03f00830 --- /dev/null +++ b/src/device/service/drivers/nce/handlers/NetworkTopologyHandler.py @@ -0,0 +1,162 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from typing import Dict, List, Optional +from common.Constants import DEFAULT_TOPOLOGY_NAME +from common.DeviceTypes import DeviceTypeEnum +from common.proto.context_pb2 import ( + DEVICEDRIVER_UNDEFINED, DEVICEOPERATIONALSTATUS_DISABLED, + DEVICEOPERATIONALSTATUS_ENABLED +) +from common.tools.client.RestConfClient import RestConfClient +from device.service.driver_api.ImportTopologyEnum import ( + ImportTopologyEnum, get_import_topology +) + + +LOGGER = logging.getLogger(__name__) + + +class NetworkTopologyHandler: + def __init__(self, rest_conf_client : RestConfClient, **settings) -> None: + self._rest_conf_client = rest_conf_client + self._subpath_root = '/ietf-network:networks' + self._subpath_item = self._subpath_root + '/network={network_id:s}' + + # Options are: + # disabled --> just import endpoints as usual + # devices --> imports sub-devices but not links connecting them. + # (a remotely-controlled transport domain might exist between them) + # topology --> imports sub-devices and links connecting them. + # (not supported by XR driver) + self._import_topology = get_import_topology(settings, default=ImportTopologyEnum.TOPOLOGY) + + + def get(self, network_id : Optional[str] = None) -> List[Dict]: + if network_id is None: network_id = DEFAULT_TOPOLOGY_NAME + endpoint = self._subpath_item.format(network_id=network_id) + reply = self._rest_conf_client.get(endpoint) + + if 'ietf-network:network' not in reply: + raise Exception('Malformed reply. "ietf-network:network" missing') + networks = reply['ietf-network:network'] + + if len(networks) == 0: + MSG = '[get] Network({:s}) not found; returning' + LOGGER.debug(MSG.format(str(network_id))) + return list() + + if len(networks) > 1: + MSG = '[get] Multiple occurrences for Network({:s}); returning' + LOGGER.debug(MSG.format(str(network_id))) + return list() + + network = networks[0] + + MSG = '[get] import_topology={:s}' + LOGGER.debug(MSG.format(str(self._import_topology))) + + result = list() + if self._import_topology == ImportTopologyEnum.DISABLED: + LOGGER.debug('[get] abstract controller; returning') + return result + + device_type = DeviceTypeEnum.EMULATED_PACKET_SWITCH.value + endpoint_type = '' + if 'network-types' in network: + nnt = network['network-types'] + if 'ietf-te-topology:te-topology' in nnt: + nnt_tet = nnt['ietf-te-topology:te-topology'] + if 'ietf-otn-topology:otn-topology' in nnt_tet: + device_type = DeviceTypeEnum.EMULATED_OPTICAL_ROADM.value + endpoint_type = 'optical' + elif 'ietf-eth-te-topology:eth-tran-topology' in nnt_tet: + device_type = DeviceTypeEnum.EMULATED_PACKET_SWITCH.value + endpoint_type = 'copper' + elif 'ietf-l3-unicast-topology:l3-unicast-topology' in nnt_tet: + device_type = DeviceTypeEnum.EMULATED_PACKET_ROUTER.value + endpoint_type = 'copper' + + for node in network['node']: + node_id = node['node-id'] + + node_name = node_id + node_is_up = True + if 'ietf-te-topology:te' in node: + nte = node['ietf-te-topology:te'] + + if 'oper-status' in nte: + node_is_up = nte['oper-status'] == 'up' + + if 'te-node-attributes' in nte: + ntea = nte['te-node-attributes'] + if 'name' in ntea: + node_name = ntea['name'] + + device_url = '/devices/device[{:s}]'.format(node_id) + device_data = { + 'uuid': node_id, + 'name': node_name, + 'type': device_type, + 'status': DEVICEOPERATIONALSTATUS_ENABLED if node_is_up else DEVICEOPERATIONALSTATUS_DISABLED, + 'drivers': [DEVICEDRIVER_UNDEFINED], + } + result.append((device_url, device_data)) + + for tp in node['ietf-network-topology:termination-point']: + tp_id = tp['tp-id'] + + tp_name = tp_id + if 'ietf-te-topology:te' in tp: + tpte = tp['ietf-te-topology:te'] + if 'name' in tpte: + tp_name = tpte['name'] + + endpoint_url = '/endpoints/endpoint[{:s}, {:s}]'.format(node_id, tp_id) + endpoint_data = { + 'device_uuid': node_id, + 'uuid': tp_id, + 'name': tp_name, + 'type': endpoint_type, + } + result.append((endpoint_url, endpoint_data)) + + if self._import_topology == ImportTopologyEnum.DEVICES: + LOGGER.debug('[get] devices only; returning') + return result + + for link in network['ietf-network-topology:link']: + link_uuid = link['link-id'] + link_src = link['source'] + link_dst = link['destination'] + link_src_dev_id = link_src['source-node'] + link_src_ep_id = link_src['source-tp'] + link_dst_dev_id = link_dst['dest-node'] + link_dst_ep_id = link_dst['dest-tp'] + + link_url = '/links/link[{:s}]'.format(link_uuid) + link_endpoint_ids = [ + (link_src_dev_id, link_src_ep_id), + (link_dst_dev_id, link_dst_ep_id), + ] + link_data = { + 'uuid': link_uuid, + 'name': link_uuid, + 'endpoints': link_endpoint_ids, + } + result.append((link_url, link_data)) + + LOGGER.debug('[get] topology; returning') + return result diff --git a/src/device/service/drivers/nce/handlers/__init__.py b/src/device/service/drivers/nce/handlers/__init__.py new file mode 100644 index 000000000..7363515f0 --- /dev/null +++ b/src/device/service/drivers/nce/handlers/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. -- GitLab From 5db967922986996287c1b2da705026aa3c3372f5 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 2 Sep 2025 16:00:44 +0000 Subject: [PATCH 091/367] ECOC F5GA Telemetry Demo: - Fixed topology descriptors --- .../ecoc25-f5ga-telemetry/data/topology/topology-agg.json | 2 +- .../ecoc25-f5ga-telemetry/data/topology/topology-e2e.json | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json index 7a961d3e7..ed40c65a2 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json @@ -16,7 +16,7 @@ "timeout": 120, "verify_certs": false, "import_topology": "topology" }}} ]}}, - {"device_id": {"device_uuid": {"uuid": "NCE-T"}}, "device_type": "ip-sdn-controller", + {"device_id": {"device_uuid": {"uuid": "NCE-T"}}, "device_type": "nce", "device_drivers": ["DEVICEDRIVER_IETF_ACTN"], "device_config": {"config_rules": [ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.0.9"}}, diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-e2e.json b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-e2e.json index 81d107031..4cfa080af 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-e2e.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-e2e.json @@ -13,17 +13,17 @@ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "80" }}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { "scheme": "http", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", - "timeout": 120, "verify": false + "timeout": 120, "verify_certs": false, "import_topology": "topology" }}} ]}}, - {"device_id": {"device_uuid": {"uuid": "NCE-FAN"}}, "device_type": "ip-sdn-controller", - "device_drivers": ["DEVICEDRIVER_IETF_ACTN"], + {"device_id": {"device_uuid": {"uuid": "NCE-FAN"}}, "device_type": "nce", + "device_drivers": ["DEVICEDRIVER_NCE"], "device_config": {"config_rules": [ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.0.9"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8081" }}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { "scheme": "http", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", - "timeout": 120, "verify": false + "timeout": 120, "verify_certs": false, "import_topology": "topology" }}} ]}} ], -- GitLab From d3e79ddfc297e9c28d987544c27ed3cbd05cddbd Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 3 Sep 2025 08:30:31 +0000 Subject: [PATCH 092/367] Device component - L3VPN Driver: - Added support for underlying controller-managed devices --- src/device/service/drivers/ietf_l3vpn/TfsApiClient.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/device/service/drivers/ietf_l3vpn/TfsApiClient.py b/src/device/service/drivers/ietf_l3vpn/TfsApiClient.py index f379d56b8..4f566d487 100644 --- a/src/device/service/drivers/ietf_l3vpn/TfsApiClient.py +++ b/src/device/service/drivers/ietf_l3vpn/TfsApiClient.py @@ -86,6 +86,7 @@ class TfsApiClient(RestApiClient): device_type : str = json_device['device_type'] #if not device_type.startswith('emu-'): device_type = 'emu-' + device_type device_status = json_device['device_operational_status'] + ctrl_uuid : str = json_device['controller_id']['device_uuid']['uuid'] device_url = '/devices/device[{:s}]'.format(device_uuid) device_data = { 'uuid': json_device['device_id']['device_uuid']['uuid'], @@ -97,6 +98,8 @@ class TfsApiClient(RestApiClient): for driver in json_device['device_drivers'] ], } + if ctrl_uuid is not None and len(ctrl_uuid) > 0: + device_data['ctrl_uuid'] = ctrl_uuid result.append((device_url, device_data)) for json_endpoint in json_device['device_endpoints']: -- GitLab From 7201adf6526c99f068274c120ef25ae3eea20b48 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 3 Sep 2025 08:30:43 +0000 Subject: [PATCH 093/367] Device component: - Added support for underlying controller-managed devices --- src/device/service/Tools.py | 84 ++++++++++++++++++++++++++++--------- 1 file changed, 65 insertions(+), 19 deletions(-) diff --git a/src/device/service/Tools.py b/src/device/service/Tools.py index a62a0d702..b0a120615 100644 --- a/src/device/service/Tools.py +++ b/src/device/service/Tools.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json, logging -from typing import Any, Dict, List, Optional, Tuple, Union +import json, logging, re +from typing import Any, Dict, List, Optional, Set, Tuple, Union from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME from common.DeviceTypes import DeviceTypeEnum from common.method_wrappers.ServiceExceptions import InvalidArgumentException, NotFoundException @@ -123,6 +123,9 @@ def populate_endpoints( add_mgmt_port = True break + devices_with_mgmt_endpoints : Set[str] = set() + mgmt_links : Set[Tuple[str, str]] = set() + if add_mgmt_port: # add mgmt port to main device device_mgmt_endpoint = device.device_endpoints.add() @@ -133,6 +136,9 @@ def populate_endpoints( device_mgmt_endpoint.name = 'mgmt' device_mgmt_endpoint.endpoint_type = 'mgmt' + devices_with_mgmt_endpoints.add(device_uuid) + devices_with_mgmt_endpoints.add(device_name) + errors : List[str] = list() for resource_data in results_getconfig: if len(resource_data) != 2: @@ -150,9 +156,11 @@ def populate_endpoints( if resource_key.startswith('/devices/device'): # create sub-device _sub_device_uuid = resource_value['uuid'] + _sub_device_name = resource_value['name'] + _sub_device_ctrl = resource_value.get('ctrl_uuid') _sub_device = Device() _sub_device.device_id.device_uuid.uuid = _sub_device_uuid # pylint: disable=no-member - _sub_device.name = resource_value['name'] + _sub_device.name = _sub_device_name _sub_device.device_type = resource_value['type'] _sub_device.device_operational_status = resource_value['status'] @@ -167,32 +175,52 @@ def populate_endpoints( MSG = 'Unsupported drivers definition in sub-device({:s}, {:s})' raise Exception(MSG.format(str(resource_key), str(resource_value))) - # Sub-devices should always have a controller associated. - _sub_device.controller_id.device_uuid.uuid = device_uuid + if _sub_device_ctrl is not None: + # Sub-device is managed by an underlying controller + _sub_device.controller_id.device_uuid.uuid = _sub_device_ctrl + else: + # Sub-devices should always have a controller associated. + _sub_device.controller_id.device_uuid.uuid = device_uuid new_sub_devices[_sub_device_uuid] = _sub_device # add mgmt port to sub-device - _sub_device_mgmt_endpoint = _sub_device.device_endpoints.add() # pylint: disable=no-member - _sub_device_mgmt_endpoint.endpoint_id.topology_id.context_id.context_uuid.uuid = DEFAULT_CONTEXT_NAME - _sub_device_mgmt_endpoint.endpoint_id.topology_id.topology_uuid.uuid = DEFAULT_TOPOLOGY_NAME - _sub_device_mgmt_endpoint.endpoint_id.device_id.device_uuid.uuid = _sub_device_uuid - _sub_device_mgmt_endpoint.endpoint_id.endpoint_uuid.uuid = 'mgmt' - _sub_device_mgmt_endpoint.name = 'mgmt' - _sub_device_mgmt_endpoint.endpoint_type = 'mgmt' + if ( + _sub_device_uuid not in devices_with_mgmt_endpoints and + _sub_device_name not in devices_with_mgmt_endpoints + ): + _sub_device_mgmt_endpoint = _sub_device.device_endpoints.add() # pylint: disable=no-member + _sub_device_mgmt_endpoint.endpoint_id.topology_id.context_id.context_uuid.uuid = DEFAULT_CONTEXT_NAME + _sub_device_mgmt_endpoint.endpoint_id.topology_id.topology_uuid.uuid = DEFAULT_TOPOLOGY_NAME + _sub_device_mgmt_endpoint.endpoint_id.device_id.device_uuid.uuid = _sub_device_uuid + _sub_device_mgmt_endpoint.endpoint_id.endpoint_uuid.uuid = 'mgmt' + _sub_device_mgmt_endpoint.name = 'mgmt' + _sub_device_mgmt_endpoint.endpoint_type = 'mgmt' # add mgmt link - _mgmt_link_uuid = '{:s}/{:s}=={:s}/{:s}'.format(device_name, 'mgmt', _sub_device.name, 'mgmt') - _mgmt_link = Link() - _mgmt_link.link_id.link_uuid.uuid = _mgmt_link_uuid # pylint: disable=no-member - _mgmt_link.name = _mgmt_link_uuid - _mgmt_link.link_endpoint_ids.append(device_mgmt_endpoint.endpoint_id) # pylint: disable=no-member - _mgmt_link.link_endpoint_ids.append(_sub_device_mgmt_endpoint.endpoint_id) # pylint: disable=no-member - new_sub_links[_mgmt_link_uuid] = _mgmt_link + if (device_name, _sub_device_name) not in mgmt_links: + _mgmt_link_uuid = '{:s}/{:s}=={:s}/{:s}'.format(device_name, 'mgmt', _sub_device_name, 'mgmt') + _mgmt_link = Link() + _mgmt_link.link_id.link_uuid.uuid = _mgmt_link_uuid # pylint: disable=no-member + _mgmt_link.name = _mgmt_link_uuid + _mgmt_link.link_endpoint_ids.append(device_mgmt_endpoint.endpoint_id) # pylint: disable=no-member + _mgmt_link.link_endpoint_ids.append(_sub_device_mgmt_endpoint.endpoint_id) # pylint: disable=no-member + new_sub_links[_mgmt_link_uuid] = _mgmt_link + + mgmt_links.add((device_name, _sub_device_name)) elif resource_key.startswith('/endpoints/endpoint'): endpoint_uuid = resource_value['uuid'] _device_uuid = resource_value.get('device_uuid') + endpoint_name = resource_value.get('name') + + if endpoint_uuid == 'mgmt' or endpoint_name == 'mgmt': + if _device_uuid is None: + if device_uuid in devices_with_mgmt_endpoints: + continue + else: + if _device_uuid in devices_with_mgmt_endpoints: + continue if _device_uuid is None: # add endpoint to current device @@ -225,7 +253,22 @@ def populate_endpoints( if location is not None: device_endpoint.endpoint_location.MergeFrom(Location(**location)) + if endpoint_uuid == 'mgmt' or endpoint_name == 'mgmt': + if _device_uuid is None: + devices_with_mgmt_endpoints.add(device_uuid) + devices_with_mgmt_endpoints.add(device_name) + else: + devices_with_mgmt_endpoints.add(_device_uuid) + devices_with_mgmt_endpoints.add(new_sub_devices[_device_uuid].name) + elif resource_key.startswith('/links/link'): + link_name = resource_value['name'] + mgmt_match = re.match(r'^([^\/]+)\/mgmt\=\=([^\/]+)\/mgmt$', link_name) + if mgmt_match is not None: + # is management link + src_dev_name, dst_dev_name = mgmt_match.groups() + if (src_dev_name, dst_dev_name) in mgmt_links: continue + # create sub-link _sub_link_uuid = resource_value['uuid'] _sub_link = Link() @@ -240,6 +283,9 @@ def populate_endpoints( _sub_link_endpoint_id.device_id.device_uuid.uuid = device_uuid _sub_link_endpoint_id.endpoint_uuid.uuid = endpoint_uuid + if mgmt_match is not None: + mgmt_links.add((src_dev_name, dst_dev_name)) + # ----------Experimental -------------- elif resource_key.startswith('/opticalconfigs/opticalconfig/'): new_optical_configs["new_optical_config"]=resource_value -- GitLab From 8ce6c924c74b73163f85486d7bc19e2a445f2e80 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 3 Sep 2025 08:41:16 +0000 Subject: [PATCH 094/367] Device component - L3VPN Driver: - Fixed underlying controller id retrieval --- src/device/service/drivers/ietf_l3vpn/TfsApiClient.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/device/service/drivers/ietf_l3vpn/TfsApiClient.py b/src/device/service/drivers/ietf_l3vpn/TfsApiClient.py index 4f566d487..87bb6dc1e 100644 --- a/src/device/service/drivers/ietf_l3vpn/TfsApiClient.py +++ b/src/device/service/drivers/ietf_l3vpn/TfsApiClient.py @@ -86,7 +86,10 @@ class TfsApiClient(RestApiClient): device_type : str = json_device['device_type'] #if not device_type.startswith('emu-'): device_type = 'emu-' + device_type device_status = json_device['device_operational_status'] - ctrl_uuid : str = json_device['controller_id']['device_uuid']['uuid'] + + ctrl_id : Dict[str, Dict] = json_device.get('controller_id', dict()) + ctrl_uuid : Optional[str] = ctrl_id.get('device_uuid', dict()).get('uuid') + device_url = '/devices/device[{:s}]'.format(device_uuid) device_data = { 'uuid': json_device['device_id']['device_uuid']['uuid'], -- GitLab From d98e50670a3b16301bb9c1ee6d8cd92c957cc2ad Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 3 Sep 2025 08:41:33 +0000 Subject: [PATCH 095/367] Device component - NCE Driver: - Fixed URL composition --- src/device/service/drivers/nce/nce_fan_client.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/device/service/drivers/nce/nce_fan_client.py b/src/device/service/drivers/nce/nce_fan_client.py index 91e2001e3..eac69e2b1 100644 --- a/src/device/service/drivers/nce/nce_fan_client.py +++ b/src/device/service/drivers/nce/nce_fan_client.py @@ -87,12 +87,12 @@ class NCEClient: def __init__( self, address: str, - port: int, + port: str, scheme: str = 'http', username: Optional[str] = None, password: Optional[str] = None, ) -> None: - self._nce_fan_url = NCE_FAN_URL.format(scheme, address, port) + self._nce_fan_url = NCE_FAN_URL.format(scheme, address, int(port)) self._auth = None def create_app_flow(self, app_flow_data: dict) -> None: -- GitLab From f7948cf304237c2733f99e374ccb5030614057e6 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 3 Sep 2025 08:53:35 +0000 Subject: [PATCH 096/367] Device component - L3VPN Driver: - Code cleanup --- src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py b/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py index 79219e895..3bc94e18a 100644 --- a/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py +++ b/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py @@ -13,7 +13,7 @@ # limitations under the License. -import anytree, json, logging, re, requests, threading +import anytree, json, logging, re, threading from typing import Any, Iterator, List, Optional, Tuple, Union from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method from common.type_checkers.Checkers import chk_length, chk_string, chk_type -- GitLab From 347e73829a2ac900b808b50230b2fe329e6fc381 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 3 Sep 2025 08:53:51 +0000 Subject: [PATCH 097/367] Device component - NCE Driver: - Fixed imports --- src/device/service/drivers/nce/driver.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/device/service/drivers/nce/driver.py b/src/device/service/drivers/nce/driver.py index c0f487fc2..94c58370a 100644 --- a/src/device/service/drivers/nce/driver.py +++ b/src/device/service/drivers/nce/driver.py @@ -17,7 +17,7 @@ from typing import Any, Iterator, List, Optional, Tuple, Union from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method from common.tools.client.RestConfClient import RestConfClient from common.type_checkers.Checkers import chk_length, chk_string, chk_type -from device.service.driver_api._Driver import _Driver +from device.service.driver_api._Driver import _Driver, RESOURCE_ENDPOINTS, RESOURCE_SERVICES from device.service.driver_api.AnyTreeTools import ( TreeNode, dump_subtree, get_subnode, set_subnode_value, ) @@ -31,6 +31,12 @@ from .nce_fan_client import ( from .Tools import compose_resource_endpoint +ALL_RESOURCE_KEYS = [ + RESOURCE_ENDPOINTS, + RESOURCE_SERVICES, +] + + LOGGER = logging.getLogger(__name__) -- GitLab From 87c9a247d6c2c15db6b1235cae65dc4e327c8e9e Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 3 Sep 2025 09:03:18 +0000 Subject: [PATCH 098/367] Device component - NCE Driver: - Fixed data retrieval --- src/device/service/drivers/nce/driver.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/device/service/drivers/nce/driver.py b/src/device/service/drivers/nce/driver.py index 94c58370a..009b373ca 100644 --- a/src/device/service/drivers/nce/driver.py +++ b/src/device/service/drivers/nce/driver.py @@ -155,17 +155,15 @@ class NCEDriver(_Driver): else: resource_key = SPECIAL_RESOURCE_MAPPINGS.get(resource_key, resource_key) resource_path = resource_key.split('/') + resource_node = get_subnode(resolver, self.__running, resource_path, default=None) + # if not found, resource_node is None + if resource_node is None: continue + results.extend(dump_subtree(resource_node)) except Exception as e: # pylint: disable=broad-except MSG = 'Error processing resource_key({:s}, {:s})' LOGGER.exception(MSG.format(str_resource_name, str(resource_key))) results.append((resource_key, e)) # if processing fails, store the exception continue - - resource_node = get_subnode(resolver, self.__running, resource_path, default=None) - # if not found, resource_node is None - if resource_node is None: - continue - results.extend(dump_subtree(resource_node)) return results return results -- GitLab From 765a22d502e132afb7cee8a5d0feff00d22cf8c5 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 3 Sep 2025 09:49:09 +0000 Subject: [PATCH 099/367] Device component: - Implemented Topological Sort of sub_devices to prevent inserting a sub-device before its controller in multi-controller hierarchies --- .../service/DeviceServiceServicerImpl.py | 10 ++++---- src/device/service/Tools.py | 23 +++++++++++++++++-- 2 files changed, 27 insertions(+), 6 deletions(-) diff --git a/src/device/service/DeviceServiceServicerImpl.py b/src/device/service/DeviceServiceServicerImpl.py index 2ea57c7cc..b6a10d4be 100644 --- a/src/device/service/DeviceServiceServicerImpl.py +++ b/src/device/service/DeviceServiceServicerImpl.py @@ -13,7 +13,7 @@ # limitations under the License. import grpc, logging, os, time -from typing import Dict +from typing import Dict, List from prometheus_client import Histogram from common.Constants import ServiceNameEnum from common.Settings import ENVVAR_SUFIX_SERVICE_HOST, get_env_var_name @@ -109,6 +109,7 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): # (which controller is in charge of which sub-device). new_sub_devices : Dict[str, Device] = dict() new_sub_links : Dict[str, Link] = dict() + sorted_sub_device_uuids : List[str] = list() #----- Experimental ------------ new_optical_configs : Dict[str, OpticalConfig] = dict() @@ -117,8 +118,8 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): t5 = time.time() # created from request, populate endpoints using driver errors.extend(populate_endpoints( - device, driver, self.monitoring_loops, new_sub_devices, new_sub_links, - new_optical_configs + device, driver, self.monitoring_loops, new_sub_devices, sorted_sub_device_uuids, + new_sub_links, new_optical_configs )) t6 = time.time() t_pop_endpoints = t6 - t5 @@ -165,7 +166,8 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): t10 = time.time() - for sub_device in new_sub_devices.values(): + for sub_device_uuid in sorted_sub_device_uuids: + sub_device = new_sub_devices[sub_device_uuid] context_client.SetDevice(sub_device) t11 = time.time() diff --git a/src/device/service/Tools.py b/src/device/service/Tools.py index b0a120615..cb6a2d515 100644 --- a/src/device/service/Tools.py +++ b/src/device/service/Tools.py @@ -13,6 +13,7 @@ # limitations under the License. import json, logging, re +from graphlib import TopologicalSorter, CycleError from typing import Any, Dict, List, Optional, Set, Tuple, Union from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME from common.DeviceTypes import DeviceTypeEnum @@ -104,8 +105,8 @@ def get_device_controller_uuid(device : Device) -> Optional[str]: def populate_endpoints( device : Device, driver : _Driver, monitoring_loops : MonitoringLoops, - new_sub_devices : Dict[str, Device], new_sub_links : Dict[str, Link], - new_optical_configs : Dict[str, OpticalConfig] + new_sub_devices : Dict[str, Device], sorted_sub_device_uuids : List[str], + new_sub_links : Dict[str, Link], new_optical_configs : Dict[str, OpticalConfig] ) -> List[str]: device_uuid = device.device_id.device_uuid.uuid device_name = device.name @@ -293,6 +294,24 @@ def populate_endpoints( errors.append(ERROR_UNSUP_RESOURCE.format(device_uuid=device_uuid, resource_data=str(resource_data))) continue + # Topologically sort new_sub_devices so that controllers (those referenced by other + # devices via their controller_id) come before the devices that depend on them. + graph : Dict[str, Set[str]] = dict() + for dev_uuid, sub_device in new_sub_devices.items(): + predecesors = graph.setdefault(dev_uuid, set()) + ctrl_uuid = get_device_controller_uuid(sub_device) + if ctrl_uuid is None: continue + predecesors.add(ctrl_uuid) + + try: + ts = TopologicalSorter(graph) + sorted_sub_device_uuids.extend(list(ts.static_order())) + except CycleError: + MSG = 'Topological sort failed due to cycle among sub-devices({:s}), graph({:s})' + msg = MSG.format(str(new_sub_devices), str(graph)) + LOGGER.exception(msg) + errors.append(msg) + return errors def populate_endpoint_monitoring_resources(device_with_uuids : Device, monitoring_loops : MonitoringLoops) -> None: -- GitLab From f3a34144d5e6e0258ec3967d1b8ff69a1273a5b3 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 3 Sep 2025 10:31:56 +0000 Subject: [PATCH 100/367] Device component: - Fix Topological Sort of sub_devices --- src/device/service/Tools.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/device/service/Tools.py b/src/device/service/Tools.py index cb6a2d515..f1d241577 100644 --- a/src/device/service/Tools.py +++ b/src/device/service/Tools.py @@ -300,7 +300,8 @@ def populate_endpoints( for dev_uuid, sub_device in new_sub_devices.items(): predecesors = graph.setdefault(dev_uuid, set()) ctrl_uuid = get_device_controller_uuid(sub_device) - if ctrl_uuid is None: continue + if ctrl_uuid is None: continue # sub_device has no controller + if ctrl_uuid == device_uuid: continue # current device has no dependencies predecesors.add(ctrl_uuid) try: -- GitLab From 6884fff3ffb0d10f36e50adee2f8bfe87590c8f5 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 3 Sep 2025 10:38:39 +0000 Subject: [PATCH 101/367] Device component: - Fix Topological Sort of sub_devices --- src/device/service/Tools.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/device/service/Tools.py b/src/device/service/Tools.py index f1d241577..81d509d93 100644 --- a/src/device/service/Tools.py +++ b/src/device/service/Tools.py @@ -298,6 +298,7 @@ def populate_endpoints( # devices via their controller_id) come before the devices that depend on them. graph : Dict[str, Set[str]] = dict() for dev_uuid, sub_device in new_sub_devices.items(): + if dev_uuid == device_uuid: continue # current device has no dependencies predecesors = graph.setdefault(dev_uuid, set()) ctrl_uuid = get_device_controller_uuid(sub_device) if ctrl_uuid is None: continue # sub_device has no controller -- GitLab From 9a60550658601bb75217f951dcd3f2b84ce5a17e Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 3 Sep 2025 10:48:22 +0000 Subject: [PATCH 102/367] Device component: - Fix Topological Sort of sub_devices --- src/device/service/Tools.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/device/service/Tools.py b/src/device/service/Tools.py index 81d509d93..8a23c3388 100644 --- a/src/device/service/Tools.py +++ b/src/device/service/Tools.py @@ -296,15 +296,21 @@ def populate_endpoints( # Topologically sort new_sub_devices so that controllers (those referenced by other # devices via their controller_id) come before the devices that depend on them. + + LOGGER.info('new_sub_devices.keys={:s}'.format(str(new_sub_devices.keys()))) graph : Dict[str, Set[str]] = dict() for dev_uuid, sub_device in new_sub_devices.items(): - if dev_uuid == device_uuid: continue # current device has no dependencies - predecesors = graph.setdefault(dev_uuid, set()) ctrl_uuid = get_device_controller_uuid(sub_device) + LOGGER.info('dev_uuid={:s}, ctrl_uuid={:s}'.format(str(dev_uuid), str(ctrl_uuid))) + + if dev_uuid == device_uuid: continue # current device has no dependencies if ctrl_uuid is None: continue # sub_device has no controller if ctrl_uuid == device_uuid: continue # current device has no dependencies + predecesors = graph.setdefault(dev_uuid, set()) predecesors.add(ctrl_uuid) + LOGGER.info('graph={:s}'.format(str(graph))) + try: ts = TopologicalSorter(graph) sorted_sub_device_uuids.extend(list(ts.static_order())) @@ -314,6 +320,8 @@ def populate_endpoints( LOGGER.exception(msg) errors.append(msg) + LOGGER.info('sorted_sub_device_uuids={:s}'.format(str(sorted_sub_device_uuids))) + return errors def populate_endpoint_monitoring_resources(device_with_uuids : Device, monitoring_loops : MonitoringLoops) -> None: -- GitLab From d26e263be2781d97586b2f209bb77110fb309dc4 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 3 Sep 2025 11:09:07 +0000 Subject: [PATCH 103/367] Device component: - Fix Topological Sort of sub_devices --- src/device/service/Tools.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/device/service/Tools.py b/src/device/service/Tools.py index 8a23c3388..ca73cd9bc 100644 --- a/src/device/service/Tools.py +++ b/src/device/service/Tools.py @@ -302,18 +302,18 @@ def populate_endpoints( for dev_uuid, sub_device in new_sub_devices.items(): ctrl_uuid = get_device_controller_uuid(sub_device) LOGGER.info('dev_uuid={:s}, ctrl_uuid={:s}'.format(str(dev_uuid), str(ctrl_uuid))) - - if dev_uuid == device_uuid: continue # current device has no dependencies if ctrl_uuid is None: continue # sub_device has no controller - if ctrl_uuid == device_uuid: continue # current device has no dependencies predecesors = graph.setdefault(dev_uuid, set()) + LOGGER.info('dev_uuid={:s} predecesors={:s}'.format(str(dev_uuid), str(predecesors))) predecesors.add(ctrl_uuid) + LOGGER.info('graph={:s}'.format(str(graph))) - LOGGER.info('graph={:s}'.format(str(graph))) + LOGGER.info('final graph={:s}'.format(str(graph))) try: ts = TopologicalSorter(graph) sorted_sub_device_uuids.extend(list(ts.static_order())) + sorted_sub_device_uuids.remove(device_uuid) except CycleError: MSG = 'Topological sort failed due to cycle among sub-devices({:s}), graph({:s})' msg = MSG.format(str(new_sub_devices), str(graph)) -- GitLab From da6d2d9247db8918d8ee4b723f6f78a565f918c7 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 3 Sep 2025 11:18:13 +0000 Subject: [PATCH 104/367] Device component: - Fix Topological Sort of sub_devices --- src/device/service/Tools.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/device/service/Tools.py b/src/device/service/Tools.py index ca73cd9bc..8da4047cc 100644 --- a/src/device/service/Tools.py +++ b/src/device/service/Tools.py @@ -277,12 +277,12 @@ def populate_endpoints( _sub_link.name = resource_value['name'] new_sub_links[_sub_link_uuid] = _sub_link - for device_uuid,endpoint_uuid in resource_value['endpoints']: + for _device_uuid,_endpoint_uuid in resource_value['endpoints']: _sub_link_endpoint_id = _sub_link.link_endpoint_ids.add() # pylint: disable=no-member _sub_link_endpoint_id.topology_id.context_id.context_uuid.uuid = DEFAULT_CONTEXT_NAME _sub_link_endpoint_id.topology_id.topology_uuid.uuid = DEFAULT_TOPOLOGY_NAME - _sub_link_endpoint_id.device_id.device_uuid.uuid = device_uuid - _sub_link_endpoint_id.endpoint_uuid.uuid = endpoint_uuid + _sub_link_endpoint_id.device_id.device_uuid.uuid = _device_uuid + _sub_link_endpoint_id.endpoint_uuid.uuid = _endpoint_uuid if mgmt_match is not None: mgmt_links.add((src_dev_name, dst_dev_name)) -- GitLab From 218bdfd1fb105d30fa175ffe2c3f101c94b9271b Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 3 Sep 2025 11:19:18 +0000 Subject: [PATCH 105/367] Device component: - Fix Topological Sort of sub_devices --- src/device/service/Tools.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/device/service/Tools.py b/src/device/service/Tools.py index 8da4047cc..d2eaaa8e6 100644 --- a/src/device/service/Tools.py +++ b/src/device/service/Tools.py @@ -313,7 +313,8 @@ def populate_endpoints( try: ts = TopologicalSorter(graph) sorted_sub_device_uuids.extend(list(ts.static_order())) - sorted_sub_device_uuids.remove(device_uuid) + if device_uuid in sorted_sub_device_uuids: + sorted_sub_device_uuids.remove(device_uuid) except CycleError: MSG = 'Topological sort failed due to cycle among sub-devices({:s}), graph({:s})' msg = MSG.format(str(new_sub_devices), str(graph)) -- GitLab From c34bfe37e18314e151230960ef62d620687f3fe4 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 3 Sep 2025 12:52:37 +0000 Subject: [PATCH 106/367] Device component: - Fix handling of underlying controller-managed devices --- src/device/service/Tools.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/device/service/Tools.py b/src/device/service/Tools.py index d2eaaa8e6..6b2bb0b12 100644 --- a/src/device/service/Tools.py +++ b/src/device/service/Tools.py @@ -198,6 +198,9 @@ def populate_endpoints( _sub_device_mgmt_endpoint.name = 'mgmt' _sub_device_mgmt_endpoint.endpoint_type = 'mgmt' + devices_with_mgmt_endpoints.add(_sub_device_uuid) + devices_with_mgmt_endpoints.add(_sub_device_name) + # add mgmt link if (device_name, _sub_device_name) not in mgmt_links: _mgmt_link_uuid = '{:s}/{:s}=={:s}/{:s}'.format(device_name, 'mgmt', _sub_device_name, 'mgmt') -- GitLab From c61dae753eb0e04ca5dbd3c4d7571b8386408a22 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 3 Sep 2025 13:14:29 +0000 Subject: [PATCH 107/367] ECOC F5GA Telemetry Demo: - Fixed topology descriptors --- .../data/topology/topology-agg.json | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json index ed40c65a2..b3d7ad444 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json @@ -63,25 +63,25 @@ ]}} ], "links": [ - {"link_id": {"link_uuid": {"uuid": "OLT-PPE1"}}, "link_type" : "LINKTYPE_COPPER", + {"link_id": {"link_uuid": {"uuid": "L3"}}, "link_type" : "LINKTYPE_COPPER", "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ {"device_id": {"device_uuid": {"uuid": "OLT" }}, "endpoint_uuid": {"uuid": "501"}}, {"device_id": {"device_uuid": {"uuid": "P-PE1"}}, "endpoint_uuid": {"uuid": "200"}} ]}, - {"link_id": {"link_uuid": {"uuid": "OLT-OPE1"}}, "link_type" : "LINKTYPE_COPPER", + {"link_id": {"link_uuid": {"uuid": "L4"}}, "link_type" : "LINKTYPE_COPPER", "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ {"device_id": {"device_uuid": {"uuid": "OLT" }}, "endpoint_uuid": {"uuid": "500"}}, {"device_id": {"device_uuid": {"uuid": "O-PE1"}}, "endpoint_uuid": {"uuid": "200"}} ]}, - {"link_id": {"link_uuid": {"uuid": "PPE2-POP2"}}, "link_type" : "LINKTYPE_COPPER", + {"link_id": {"link_uuid": {"uuid": "L13"}}, "link_type" : "LINKTYPE_COPPER", "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ {"device_id": {"device_uuid": {"uuid": "P-PE2"}}, "endpoint_uuid": {"uuid": "200"}}, - {"device_id": {"device_uuid": {"uuid": "POP2" }}, "endpoint_uuid": {"uuid": "500"}} + {"device_id": {"device_uuid": {"uuid": "POP1" }}, "endpoint_uuid": {"uuid": "500"}} ]}, - {"link_id": {"link_uuid": {"uuid": "OPE2-POP2"}}, "link_type" : "LINKTYPE_COPPER", + {"link_id": {"link_uuid": {"uuid": "L14"}}, "link_type" : "LINKTYPE_COPPER", "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ {"device_id": {"device_uuid": {"uuid": "O-PE2"}}, "endpoint_uuid": {"uuid": "200"}}, - {"device_id": {"device_uuid": {"uuid": "POP1" }}, "endpoint_uuid": {"uuid": "500"}} + {"device_id": {"device_uuid": {"uuid": "POP2" }}, "endpoint_uuid": {"uuid": "500"}} ]} ] } -- GitLab From a433e23e199a4062500e681a9e42b008ff7028b5 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 3 Sep 2025 13:31:28 +0000 Subject: [PATCH 108/367] ECOC F5GA Telemetry Demo: - Fixed topology descriptors - Added network slice descriptors - Copied old-numbered files to separate folder --- .../network-slice1.json} | 0 .../network-slice2.json} | 0 .../data/slices/network-slice1.json | 118 ++++++++++++++++++ .../data/slices/network-slice2.json | 118 ++++++++++++++++++ .../data/topology/topology-agg.json | 4 +- 5 files changed, 238 insertions(+), 2 deletions(-) rename src/tests/ecoc25-f5ga-telemetry/data/{slices/network_slice1.json => old-numbered/network-slice1.json} (100%) rename src/tests/ecoc25-f5ga-telemetry/data/{slices/network_slice2.json => old-numbered/network-slice2.json} (100%) create mode 100644 src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice1.json create mode 100644 src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json diff --git a/src/tests/ecoc25-f5ga-telemetry/data/slices/network_slice1.json b/src/tests/ecoc25-f5ga-telemetry/data/old-numbered/network-slice1.json similarity index 100% rename from src/tests/ecoc25-f5ga-telemetry/data/slices/network_slice1.json rename to src/tests/ecoc25-f5ga-telemetry/data/old-numbered/network-slice1.json diff --git a/src/tests/ecoc25-f5ga-telemetry/data/slices/network_slice2.json b/src/tests/ecoc25-f5ga-telemetry/data/old-numbered/network-slice2.json similarity index 100% rename from src/tests/ecoc25-f5ga-telemetry/data/slices/network_slice2.json rename to src/tests/ecoc25-f5ga-telemetry/data/old-numbered/network-slice2.json diff --git a/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice1.json b/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice1.json new file mode 100644 index 000000000..121e20de5 --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice1.json @@ -0,0 +1,118 @@ +{ + "slice-service": [ + { + "id": "slice1", + "description": "network slice 1, PC1-VM1", + "sdps": { + "sdp": [ + { + "id": "1", + "node-id": "ONT1", + "sdp-ip-address": ["172.16.61.10"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["21"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.16.204.221/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10500"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.1.101.22/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10200"]} + ], + "target-connection-group-id": "line1" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC ONT1", + "description": "AC ONT1 connected to PC1", + "ac-node-id": "ONT1", + "ac-tp-id": "200" + }]} + }, + { + "id": "2", + "node-id": "POP2", + "sdp-ip-address": ["172.16.204.221"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["101"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.16.104.221/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10500"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.1.101.22/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10200"]} + ], + "target-connection-group-id": "line1" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC POP2 to VMa", + "description": "AC POP2 connected to VMa", + "ac-node-id": "POP2", + "ac-tp-id": "200" + }]} + } + ] + }, + "connection-groups": { + "connection-group": [ + { + "id": "line1", + "connectivity-type": "point-to-point", + "connectivity-construct": [ + { + "id": 1, + "p2p-sender-sdp": "1", + "p2p-receiver-sdp": "2", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "10" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "5000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.001" + } + ] + } + } + }, + { + "id": 2, + "p2p-sender-sdp": "2", + "p2p-receiver-sdp": "1", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "20" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "1000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.001" + } + ] + } + } + } + ] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json b/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json new file mode 100644 index 000000000..2d10b9693 --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json @@ -0,0 +1,118 @@ +{ + "slice-service": [ + { + "id": "slice1", + "description": "network slice 1, PC1-VM1", + "sdps": { + "sdp": [ + { + "id": "1", + "node-id": "ONT1", + "sdp-ip-address": ["172.16.61.10"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["21"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.16.204.221/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10500"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.1.101.22/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10200"]} + ], + "target-connection-group-id": "line1" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC ONT1", + "description": "AC ONT1 connected to PC1", + "ac-node-id": "ONT1", + "ac-tp-id": "200" + }]} + }, + { + "id": "2", + "node-id": "POP1", + "sdp-ip-address": ["172.16.204.221"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["101"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.16.104.221/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10500"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.1.101.22/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10200"]} + ], + "target-connection-group-id": "line1" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC POP1 to VMb", + "description": "AC POP1 connected to VMb", + "ac-node-id": "POP1", + "ac-tp-id": "200" + }]} + } + ] + }, + "connection-groups": { + "connection-group": [ + { + "id": "line1", + "connectivity-type": "point-to-point", + "connectivity-construct": [ + { + "id": 1, + "p2p-sender-sdp": "1", + "p2p-receiver-sdp": "2", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "10" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "5000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.001" + } + ] + } + } + }, + { + "id": 2, + "p2p-sender-sdp": "2", + "p2p-receiver-sdp": "1", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "20" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "1000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.001" + } + ] + } + } + } + ] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json index b3d7ad444..b7a882239 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json @@ -76,12 +76,12 @@ {"link_id": {"link_uuid": {"uuid": "L13"}}, "link_type" : "LINKTYPE_COPPER", "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ {"device_id": {"device_uuid": {"uuid": "P-PE2"}}, "endpoint_uuid": {"uuid": "200"}}, - {"device_id": {"device_uuid": {"uuid": "POP1" }}, "endpoint_uuid": {"uuid": "500"}} + {"device_id": {"device_uuid": {"uuid": "POP2" }}, "endpoint_uuid": {"uuid": "500"}} ]}, {"link_id": {"link_uuid": {"uuid": "L14"}}, "link_type" : "LINKTYPE_COPPER", "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ {"device_id": {"device_uuid": {"uuid": "O-PE2"}}, "endpoint_uuid": {"uuid": "200"}}, - {"device_id": {"device_uuid": {"uuid": "POP2" }}, "endpoint_uuid": {"uuid": "500"}} + {"device_id": {"device_uuid": {"uuid": "POP1" }}, "endpoint_uuid": {"uuid": "500"}} ]} ] } -- GitLab From ce56069dd27b5d24dd75f870ef202cb7bc63a28a Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 3 Sep 2025 13:45:44 +0000 Subject: [PATCH 109/367] WebUI component: - Permanently show labels of nodes in topology --- src/webui/service/templates/js/topology.js | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/src/webui/service/templates/js/topology.js b/src/webui/service/templates/js/topology.js index 13fdc7316..dcbbc51be 100644 --- a/src/webui/service/templates/js/topology.js +++ b/src/webui/service/templates/js/topology.js @@ -51,7 +51,7 @@ const svg = d3.select('#topology') ; // svg objects -var link, node, optical_link; +var link, node, optical_link, labels; // values for all forces forceProperties = { @@ -115,7 +115,18 @@ d3.json("{{ url_for('main.topology') }}", function(data) { .call(d3.drag().on("start", dragstarted).on("drag", dragged).on("end", dragended)); // node tooltip - node.append("title").text(function(n) { return n.name; }); + //node.append("title").text(function(n) { return n.name; }); + // persistent node labels + labels = svg.append("g").attr("class", "labels") + .selectAll("text") + .data(data.devices) + .enter() + .append("text") + .text(function(d) { return d.name; }) + .attr("font-family", "sans-serif") + .attr("font-size", 12) + .attr("text-anchor", "middle") + .attr("pointer-events", "none"); // link tooltip link.append("title").text(function(l) { return l.name; }); // optical link tooltip @@ -183,6 +194,11 @@ function ticked() { node .attr('x', function(d) { return d.x-icon_width/2; }) .attr('y', function(d) { return d.y-icon_height/2; }); + if (labels) { + labels + .attr('x', function(d) { return d.x; }) + .attr('y', function(d) { return d.y + icon_height/2 + 12; }); + } } /******************** UI EVENTS ********************/ -- GitLab From 4ef1dc0f2a23a7f156db62b9110b47b139b55b99 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 3 Sep 2025 13:47:27 +0000 Subject: [PATCH 110/367] ECOC F5GA Telemetry Demo: - Add provisioning script for slice1 --- .../ecoc25-f5ga-telemetry/provision-slice1.sh | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 src/tests/ecoc25-f5ga-telemetry/provision-slice1.sh diff --git a/src/tests/ecoc25-f5ga-telemetry/provision-slice1.sh b/src/tests/ecoc25-f5ga-telemetry/provision-slice1.sh new file mode 100644 index 000000000..de9598da2 --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/provision-slice1.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +echo "[E2E] Provisioning slice1..." +curl -X POST -d @data/slices/network-slice1.json http://0.0.0.0:80/restconf/data/ietf-network-slice-service:network-slice-services +echo + +echo "Done!" -- GitLab From 222d93ff0b02ad02b02ee2f5d0e5e4995d96d001 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 3 Sep 2025 13:49:51 +0000 Subject: [PATCH 111/367] ECOC F5GA Telemetry Demo: - Add provisioning script for slice2 - Fixed permissions of script --- .../ecoc25-f5ga-telemetry/provision-slice1.sh | 4 +++ .../ecoc25-f5ga-telemetry/provision-slice2.sh | 25 +++++++++++++++++++ 2 files changed, 29 insertions(+) mode change 100644 => 100755 src/tests/ecoc25-f5ga-telemetry/provision-slice1.sh create mode 100755 src/tests/ecoc25-f5ga-telemetry/provision-slice2.sh diff --git a/src/tests/ecoc25-f5ga-telemetry/provision-slice1.sh b/src/tests/ecoc25-f5ga-telemetry/provision-slice1.sh old mode 100644 new mode 100755 index de9598da2..56a808241 --- a/src/tests/ecoc25-f5ga-telemetry/provision-slice1.sh +++ b/src/tests/ecoc25-f5ga-telemetry/provision-slice1.sh @@ -14,6 +14,10 @@ # limitations under the License. +# Make folder containing the script the root folder for its execution +cd $(dirname $0) + + echo "[E2E] Provisioning slice1..." curl -X POST -d @data/slices/network-slice1.json http://0.0.0.0:80/restconf/data/ietf-network-slice-service:network-slice-services echo diff --git a/src/tests/ecoc25-f5ga-telemetry/provision-slice2.sh b/src/tests/ecoc25-f5ga-telemetry/provision-slice2.sh new file mode 100755 index 000000000..9808dd0a6 --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/provision-slice2.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Make folder containing the script the root folder for its execution +cd $(dirname $0) + + +echo "[E2E] Provisioning slice2..." +curl -X POST -d @data/slices/network-slice2.json http://0.0.0.0:80/restconf/data/ietf-network-slice-service:network-slice-services +echo + +echo "Done!" -- GitLab From b6d0c688aabf392b77f5841560bbaf9a0a0cb6a3 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 3 Sep 2025 14:15:36 +0000 Subject: [PATCH 112/367] ECOC F5GA Telemetry Demo: - Fixed provisioning scripts --- src/tests/ecoc25-f5ga-telemetry/provision-slice1.sh | 5 ++++- src/tests/ecoc25-f5ga-telemetry/provision-slice2.sh | 5 ++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/src/tests/ecoc25-f5ga-telemetry/provision-slice1.sh b/src/tests/ecoc25-f5ga-telemetry/provision-slice1.sh index 56a808241..fba21415c 100755 --- a/src/tests/ecoc25-f5ga-telemetry/provision-slice1.sh +++ b/src/tests/ecoc25-f5ga-telemetry/provision-slice1.sh @@ -19,7 +19,10 @@ cd $(dirname $0) echo "[E2E] Provisioning slice1..." -curl -X POST -d @data/slices/network-slice1.json http://0.0.0.0:80/restconf/data/ietf-network-slice-service:network-slice-services +curl --request POST --location --header 'Content-Type: application/json' \ + --data @data/slices/network-slice1.json \ + http://0.0.0.0:80/restconf/data/ietf-network-slice-service:network-slice-services echo + echo "Done!" diff --git a/src/tests/ecoc25-f5ga-telemetry/provision-slice2.sh b/src/tests/ecoc25-f5ga-telemetry/provision-slice2.sh index 9808dd0a6..bd1e9a73c 100755 --- a/src/tests/ecoc25-f5ga-telemetry/provision-slice2.sh +++ b/src/tests/ecoc25-f5ga-telemetry/provision-slice2.sh @@ -19,7 +19,10 @@ cd $(dirname $0) echo "[E2E] Provisioning slice2..." -curl -X POST -d @data/slices/network-slice2.json http://0.0.0.0:80/restconf/data/ietf-network-slice-service:network-slice-services +curl --request POST --location --header 'Content-Type: application/json' \ + --data @data/slices/network-slice2.json \ + http://0.0.0.0:80/restconf/data/ietf-network-slice-service:network-slice-services echo + echo "Done!" -- GitLab From 530c51ba602681a1d7f37df7d3a379b231aacaa4 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 3 Sep 2025 14:28:58 +0000 Subject: [PATCH 113/367] Device component: - Fix handling of underlying controller-managed devices --- src/device/service/Tools.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/device/service/Tools.py b/src/device/service/Tools.py index 6b2bb0b12..93dcb5a93 100644 --- a/src/device/service/Tools.py +++ b/src/device/service/Tools.py @@ -202,7 +202,10 @@ def populate_endpoints( devices_with_mgmt_endpoints.add(_sub_device_name) # add mgmt link - if (device_name, _sub_device_name) not in mgmt_links: + if ( + _sub_device_ctrl is None and + (device_name, _sub_device_name) not in mgmt_links + ): _mgmt_link_uuid = '{:s}/{:s}=={:s}/{:s}'.format(device_name, 'mgmt', _sub_device_name, 'mgmt') _mgmt_link = Link() _mgmt_link.link_id.link_uuid.uuid = _mgmt_link_uuid # pylint: disable=no-member -- GitLab From a02136f1605181d29aa51ece4d9a2a8c3ff04228 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 3 Sep 2025 15:29:35 +0000 Subject: [PATCH 114/367] Device component: - Code cleanup --- src/device/service/Tools.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/src/device/service/Tools.py b/src/device/service/Tools.py index 93dcb5a93..353f3fa65 100644 --- a/src/device/service/Tools.py +++ b/src/device/service/Tools.py @@ -303,18 +303,12 @@ def populate_endpoints( # Topologically sort new_sub_devices so that controllers (those referenced by other # devices via their controller_id) come before the devices that depend on them. - LOGGER.info('new_sub_devices.keys={:s}'.format(str(new_sub_devices.keys()))) graph : Dict[str, Set[str]] = dict() for dev_uuid, sub_device in new_sub_devices.items(): ctrl_uuid = get_device_controller_uuid(sub_device) - LOGGER.info('dev_uuid={:s}, ctrl_uuid={:s}'.format(str(dev_uuid), str(ctrl_uuid))) if ctrl_uuid is None: continue # sub_device has no controller predecesors = graph.setdefault(dev_uuid, set()) - LOGGER.info('dev_uuid={:s} predecesors={:s}'.format(str(dev_uuid), str(predecesors))) predecesors.add(ctrl_uuid) - LOGGER.info('graph={:s}'.format(str(graph))) - - LOGGER.info('final graph={:s}'.format(str(graph))) try: ts = TopologicalSorter(graph) @@ -327,8 +321,6 @@ def populate_endpoints( LOGGER.exception(msg) errors.append(msg) - LOGGER.info('sorted_sub_device_uuids={:s}'.format(str(sorted_sub_device_uuids))) - return errors def populate_endpoint_monitoring_resources(device_with_uuids : Device, monitoring_loops : MonitoringLoops) -> None: -- GitLab From 90bf2f34d6a415b242e9ef1bf10d2ba104c35947 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 3 Sep 2025 15:31:00 +0000 Subject: [PATCH 115/367] NBI component- IETF Slice: - Corrected metric bound handling - Disabled code to replace ONT endpoint by Emu-DC, not clear why it is done --- .../ietf_network_slice/ietf_slice_handler.py | 70 ++++++++++++++++--- 1 file changed, 59 insertions(+), 11 deletions(-) diff --git a/src/nbi/service/ietf_network_slice/ietf_slice_handler.py b/src/nbi/service/ietf_network_slice/ietf_slice_handler.py index 149d0cfa8..b4b32cd3d 100644 --- a/src/nbi/service/ietf_network_slice/ietf_slice_handler.py +++ b/src/nbi/service/ietf_network_slice/ietf_slice_handler.py @@ -103,19 +103,65 @@ def build_constraints_from_connection_group(connection_group: dict) -> List[Cons ]["slo-policy"]["metric-bound"] for metric in metric_bounds: - metric_type = metric["metric-type"] - if metric_type == "ietf-nss:one-way-delay-maximum": - bound_value = float(metric["bound"]) + metric_type = str(metric['metric-type']) + metric_type = metric_type.replace('ietf-network-slice-service:', 'ietf-nss:') + + if metric_type == 'ietf-nss:one-way-delay-maximum': + value = float(metric['bound']) + unit = str(metric['metric-unit']) + + if unit == 'nanoseconds': + factor = 1.0e6 + elif unit == 'microseconds': + factor = 1.0e3 + elif unit == 'milliseconds': + factor = 1.0 + else: + MSG = 'Unsupported unit({:s}) for metric({:s})' + raise Exception(MSG.format(unit, metric_type)) + + constraint = Constraint() + constraint.sla_latency.e2e_latency_ms = value / factor + constraints.append(constraint) + + elif metric_type == 'ietf-nss:one-way-bandwidth': + value = float(metric['bound']) + unit = str(metric['metric-unit']) + + if unit == 'bps': + factor = 1.0e9 + elif unit == 'Kbps': + factor = 1.0e6 + elif unit == 'Mbps': + factor = 1.0e3 + elif unit == 'Gbps': + factor = 1.0 + else: + MSG = 'Unsupported unit({:s}) for metric({:s})' + raise Exception(MSG.format(unit, metric_type)) + constraint = Constraint() - constraint.sla_latency.e2e_latency_ms = bound_value + constraint.sla_capacity.capacity_gbps = value / factor constraints.append(constraint) - elif metric_type == "ietf-nss:one-way-bandwidth": - bound_value = float(metric["bound"]) + + elif metric_type == "ietf-nss:two-way-packet-loss": + value = float(metric["percentile-value"]) + unit = str(metric['metric-unit']) + + if unit != 'percentage': + MSG = 'Unsupported unit({:s}) for metric({:s})' + raise Exception(MSG.format(unit, metric_type)) + constraint = Constraint() - # Convert from Mbps to Gbps if needed - constraint.sla_capacity.capacity_gbps = bound_value / 1.0e3 + constraint.sla_availability.num_disjoint_paths = 1 + constraint.sla_availability.all_active = True + constraint.sla_availability.availability = 100.0 - value constraints.append(constraint) + else: + MSG = 'Unsupported metric({:s})' + raise Exception(MSG.format(str(metric))) + return constraints @@ -335,9 +381,11 @@ class IETFSliceHandler: # Sort endpoints and optionally replace the ONT endpoint list_endpoints = sort_endpoints(list_endpoints, sdps, found_cg, context_client) - list_endpoints = replace_ont_endpoint_with_emu_dc( - list_endpoints, context_client - ) + + # NOTE: not sure why this is needed + #list_endpoints = replace_ont_endpoint_with_emu_dc( + # list_endpoints, context_client + #) slice_request.slice_endpoint_ids.extend(list_endpoints) slice_request.slice_constraints.extend(list_constraints) -- GitLab From 9e78473a248fd0fdfd506d386e21e0fcf8f733d5 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 3 Sep 2025 15:31:21 +0000 Subject: [PATCH 116/367] Manifests: - Increased log level to DEBUG for NBI, PathComp, Service, Slice --- manifests/nbiservice.yaml | 2 +- manifests/pathcompservice.yaml | 2 +- manifests/serviceservice.yaml | 2 +- manifests/sliceservice.yaml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/manifests/nbiservice.yaml b/manifests/nbiservice.yaml index cac267495..ec6db58b7 100644 --- a/manifests/nbiservice.yaml +++ b/manifests/nbiservice.yaml @@ -39,7 +39,7 @@ spec: #- containerPort: 9192 env: - name: LOG_LEVEL - value: "INFO" + value: "DEBUG" - name: FLASK_ENV value: "production" # normal value is "production", change to "development" if developing - name: IETF_NETWORK_RENDERER diff --git a/manifests/pathcompservice.yaml b/manifests/pathcompservice.yaml index 2db0d41b0..71c7e4cd7 100644 --- a/manifests/pathcompservice.yaml +++ b/manifests/pathcompservice.yaml @@ -36,7 +36,7 @@ spec: - containerPort: 9192 env: - name: LOG_LEVEL - value: "INFO" + value: "DEBUG" - name: ENABLE_FORECASTER value: "NO" readinessProbe: diff --git a/manifests/serviceservice.yaml b/manifests/serviceservice.yaml index 8262550ef..8615e8879 100644 --- a/manifests/serviceservice.yaml +++ b/manifests/serviceservice.yaml @@ -36,7 +36,7 @@ spec: - containerPort: 9192 env: - name: LOG_LEVEL - value: "INFO" + value: "DEBUG" readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:3030"] diff --git a/manifests/sliceservice.yaml b/manifests/sliceservice.yaml index 1df4797b7..a05798f0a 100644 --- a/manifests/sliceservice.yaml +++ b/manifests/sliceservice.yaml @@ -36,7 +36,7 @@ spec: - containerPort: 9192 env: - name: LOG_LEVEL - value: "INFO" + value: "DEBUG" - name: SLICE_GROUPING value: "DISABLE" envFrom: -- GitLab From 78eb3b3db25bb9b61f390a7e2605179d1050a3a7 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 3 Sep 2025 16:14:06 +0000 Subject: [PATCH 117/367] PathComp component - FrontEnd: - Fixed sub-service composition --- .../algorithms/tools/ComputeSubServices.py | 10 +++++++--- .../service/algorithms/tools/ServiceTypes.py | 17 ++++++++++++++--- 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py b/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py index 3b8e4bbb1..5a6d6aa39 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py +++ b/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py @@ -114,7 +114,10 @@ def convert_explicit_path_hops_to_connections( elif prv_res_class[2] is None and res_class[2] is not None: # entering domain of a device controller, create underlying connection LOGGER.debug(' entering domain of a device controller, create underlying connection') - prv_service_type = connection_stack.queue[-1].service_type + if len(connection_stack.queue) > 0: + prv_service_type = connection_stack.queue[-1].service_type + else: + prv_service_type = None service_type = get_service_type(res_class[1], prv_service_type) connection_entry = ConnectionEntry(service_type=service_type, path_hops=[path_hop]) connection_stack.put(connection_entry) @@ -123,8 +126,9 @@ def convert_explicit_path_hops_to_connections( LOGGER.debug(' leaving domain of a device controller, terminate underlying connection') connection = connection_stack.get() connections.append(connection) - connection_stack.queue[-1].dependencies.append(connection) - connection_stack.queue[-1].path_hops.append(path_hop) + if len(connection_stack.queue) > 0: + connection_stack.queue[-1].dependencies.append(connection) + connection_stack.queue[-1].path_hops.append(path_hop) elif prv_res_class[2] is not None and res_class[2] is not None: if prv_res_class[2] == res_class[2]: # stay in domain of a device controller, connection continues diff --git a/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py b/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py index 2f55db0c6..75025f25c 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py +++ b/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py @@ -13,6 +13,7 @@ # limitations under the License. +from typing import Optional from common.DeviceTypes import DeviceTypeEnum from common.proto.context_pb2 import ServiceTypeEnum @@ -47,11 +48,21 @@ SERVICE_TYPE_L3NM = {ServiceTypeEnum.SERVICETYPE_L3NM} SERVICE_TYPE_LXNM = {ServiceTypeEnum.SERVICETYPE_L3NM, ServiceTypeEnum.SERVICETYPE_L2NM} SERVICE_TYPE_TAPI = {ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE} -def get_service_type(device_type : DeviceTypeEnum, prv_service_type : ServiceTypeEnum) -> ServiceTypeEnum: - if device_type in PACKET_DEVICE_TYPES and prv_service_type in SERVICE_TYPE_LXNM: return prv_service_type +def get_service_type( + device_type : DeviceTypeEnum, prv_service_type : Optional[ServiceTypeEnum] = None +) -> ServiceTypeEnum: + if device_type is DeviceTypeEnum.NCE: return ServiceTypeEnum.SERVICETYPE_L3NM + if ( + device_type in PACKET_DEVICE_TYPES and + prv_service_type is not None and + prv_service_type in SERVICE_TYPE_LXNM + ): return prv_service_type if device_type in L2_DEVICE_TYPES: return ServiceTypeEnum.SERVICETYPE_L2NM if device_type in OPTICAL_DEVICE_TYPES: return ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE - if device_type in NETWORK_DEVICE_TYPES: return prv_service_type + if ( + device_type in NETWORK_DEVICE_TYPES and + prv_service_type is not None + ): return prv_service_type str_fields = ', '.join([ 'device_type={:s}'.format(str(device_type)), -- GitLab From f6eb01b4f4e940e81741bc1cef82e62169dcde72 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 3 Sep 2025 16:18:23 +0000 Subject: [PATCH 118/367] PathComp component - FrontEnd: - Fixed sub-service composition --- src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py b/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py index 75025f25c..6d80ea322 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py +++ b/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py @@ -49,7 +49,7 @@ SERVICE_TYPE_LXNM = {ServiceTypeEnum.SERVICETYPE_L3NM, ServiceTypeEnum.SERVICETY SERVICE_TYPE_TAPI = {ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE} def get_service_type( - device_type : DeviceTypeEnum, prv_service_type : Optional[ServiceTypeEnum] = None + device_type : DeviceTypeEnum, prv_service_type : ServiceTypeEnum = None ) -> ServiceTypeEnum: if device_type is DeviceTypeEnum.NCE: return ServiceTypeEnum.SERVICETYPE_L3NM if ( -- GitLab From 3d55a0e5c7c337f378cdb5b2b0b6280699ec8c57 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 3 Sep 2025 16:31:43 +0000 Subject: [PATCH 119/367] PathComp component - FrontEnd: - Fixed sub-service composition --- .../service/algorithms/tools/ComputeSubServices.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py b/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py index 5a6d6aa39..30ecf8c57 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py +++ b/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py @@ -114,10 +114,12 @@ def convert_explicit_path_hops_to_connections( elif prv_res_class[2] is None and res_class[2] is not None: # entering domain of a device controller, create underlying connection LOGGER.debug(' entering domain of a device controller, create underlying connection') + if len(connection_stack.queue) > 0: prv_service_type = connection_stack.queue[-1].service_type else: prv_service_type = None + service_type = get_service_type(res_class[1], prv_service_type) connection_entry = ConnectionEntry(service_type=service_type, path_hops=[path_hop]) connection_stack.put(connection_entry) @@ -126,6 +128,7 @@ def convert_explicit_path_hops_to_connections( LOGGER.debug(' leaving domain of a device controller, terminate underlying connection') connection = connection_stack.get() connections.append(connection) + if len(connection_stack.queue) > 0: connection_stack.queue[-1].dependencies.append(connection) connection_stack.queue[-1].path_hops.append(path_hop) @@ -139,9 +142,15 @@ def convert_explicit_path_hops_to_connections( LOGGER.debug(' switching to different device controller, chain connections') connection = connection_stack.get() connections.append(connection) - connection_stack.queue[-1].dependencies.append(connection) - prv_service_type = connection_stack.queue[-1].service_type + if len(connection_stack.queue) > 0: + connection_stack.queue[-1].dependencies.append(connection) + + if len(connection_stack.queue) > 0: + prv_service_type = connection_stack.queue[-1].service_type + else: + prv_service_type = None + service_type = get_service_type(res_class[1], prv_service_type) connection_entry = ConnectionEntry(service_type=service_type, path_hops=[path_hop]) connection_stack.put(connection_entry) -- GitLab From 308c8457a15670286e1fd47ad889c4d764a7da32 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 3 Sep 2025 16:41:56 +0000 Subject: [PATCH 120/367] PathComp component - FrontEnd: - Fixed sub-service composition --- src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py b/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py index 6d80ea322..da63bfd04 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py +++ b/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py @@ -52,6 +52,7 @@ def get_service_type( device_type : DeviceTypeEnum, prv_service_type : ServiceTypeEnum = None ) -> ServiceTypeEnum: if device_type is DeviceTypeEnum.NCE: return ServiceTypeEnum.SERVICETYPE_L3NM + if device_type is DeviceTypeEnum.TERAFLOWSDN_CONTROLLER: return ServiceTypeEnum.SERVICETYPE_L3NM if ( device_type in PACKET_DEVICE_TYPES and prv_service_type is not None and -- GitLab From 3e8ae19b79285afc2140d8a91773dcdabed835f5 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 3 Sep 2025 16:59:18 +0000 Subject: [PATCH 121/367] PathComp component - FrontEnd: - Fixed sub-service composition --- .../algorithms/tools/ComputeSubServices.py | 35 ++++++++++--------- .../service/algorithms/tools/ServiceTypes.py | 8 ++--- 2 files changed, 21 insertions(+), 22 deletions(-) diff --git a/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py b/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py index 30ecf8c57..b92ccaf34 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py +++ b/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py @@ -115,11 +115,14 @@ def convert_explicit_path_hops_to_connections( # entering domain of a device controller, create underlying connection LOGGER.debug(' entering domain of a device controller, create underlying connection') - if len(connection_stack.queue) > 0: - prv_service_type = connection_stack.queue[-1].service_type - else: - prv_service_type = None - + if len(connection_stack.queue) == 0: + LOGGER.debug(' synthetic path ingress') + connection_entry = ConnectionEntry( + uuid=main_service_uuid, service_type=main_service_type, path_hops=[] + ) + connection_stack.put(connection_entry) + + prv_service_type = connection_stack.queue[-1].service_type service_type = get_service_type(res_class[1], prv_service_type) connection_entry = ConnectionEntry(service_type=service_type, path_hops=[path_hop]) connection_stack.put(connection_entry) @@ -129,9 +132,8 @@ def convert_explicit_path_hops_to_connections( connection = connection_stack.get() connections.append(connection) - if len(connection_stack.queue) > 0: - connection_stack.queue[-1].dependencies.append(connection) - connection_stack.queue[-1].path_hops.append(path_hop) + connection_stack.queue[-1].dependencies.append(connection) + connection_stack.queue[-1].path_hops.append(path_hop) elif prv_res_class[2] is not None and res_class[2] is not None: if prv_res_class[2] == res_class[2]: # stay in domain of a device controller, connection continues @@ -142,15 +144,9 @@ def convert_explicit_path_hops_to_connections( LOGGER.debug(' switching to different device controller, chain connections') connection = connection_stack.get() connections.append(connection) + connection_stack.queue[-1].dependencies.append(connection) - if len(connection_stack.queue) > 0: - connection_stack.queue[-1].dependencies.append(connection) - - if len(connection_stack.queue) > 0: - prv_service_type = connection_stack.queue[-1].service_type - else: - prv_service_type = None - + prv_service_type = connection_stack.queue[-1].service_type service_type = get_service_type(res_class[1], prv_service_type) connection_entry = ConnectionEntry(service_type=service_type, path_hops=[path_hop]) connection_stack.put(connection_entry) @@ -197,6 +193,13 @@ def convert_explicit_path_hops_to_connections( prv_device_uuid = device_uuid prv_res_class = res_class + + while len(connection_stack.queue) > 1: + LOGGER.debug(' synthetic path egress') + connection = connection_stack.get() + connections.append(connection) + connection_stack.queue[-1].dependencies.append(connection) + # path egress LOGGER.debug(' path egress') connections.append(connection_stack.get()) diff --git a/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py b/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py index da63bfd04..6df4ea95c 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py +++ b/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py @@ -49,21 +49,17 @@ SERVICE_TYPE_LXNM = {ServiceTypeEnum.SERVICETYPE_L3NM, ServiceTypeEnum.SERVICETY SERVICE_TYPE_TAPI = {ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE} def get_service_type( - device_type : DeviceTypeEnum, prv_service_type : ServiceTypeEnum = None + device_type : DeviceTypeEnum, prv_service_type : ServiceTypeEnum ) -> ServiceTypeEnum: if device_type is DeviceTypeEnum.NCE: return ServiceTypeEnum.SERVICETYPE_L3NM if device_type is DeviceTypeEnum.TERAFLOWSDN_CONTROLLER: return ServiceTypeEnum.SERVICETYPE_L3NM if ( device_type in PACKET_DEVICE_TYPES and - prv_service_type is not None and prv_service_type in SERVICE_TYPE_LXNM ): return prv_service_type if device_type in L2_DEVICE_TYPES: return ServiceTypeEnum.SERVICETYPE_L2NM if device_type in OPTICAL_DEVICE_TYPES: return ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE - if ( - device_type in NETWORK_DEVICE_TYPES and - prv_service_type is not None - ): return prv_service_type + if device_type in NETWORK_DEVICE_TYPES: return prv_service_type str_fields = ', '.join([ 'device_type={:s}'.format(str(device_type)), -- GitLab From fa494a92f972c6ee26b6889aee5e302c4c9f2ac9 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 3 Sep 2025 17:00:45 +0000 Subject: [PATCH 122/367] ECOC F5GA Telemetry Demo: - Add dump-logs script --- src/tests/ecoc25-f5ga-telemetry/dump-logs.sh | 21 ++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100755 src/tests/ecoc25-f5ga-telemetry/dump-logs.sh diff --git a/src/tests/ecoc25-f5ga-telemetry/dump-logs.sh b/src/tests/ecoc25-f5ga-telemetry/dump-logs.sh new file mode 100755 index 000000000..5fea21189 --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/dump-logs.sh @@ -0,0 +1,21 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +./scripts/show_logs_device.sh > device.log +./scripts/show_logs_service.sh > service.log +./scripts/show_logs_slice.sh > slice.log +./scripts/show_logs_pathcomp_frontend.sh > pathcomp.log +./scripts/show_logs_nbi.sh > nbi.log -- GitLab From e3b5b066dec6928e36205fbce1840b3188f6dc1e Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 3 Sep 2025 17:24:01 +0000 Subject: [PATCH 123/367] Service component: - Handle NCE controller - Allow hierarchy of underlay controllers --- src/service/service/task_scheduler/TaskExecutor.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/service/service/task_scheduler/TaskExecutor.py b/src/service/service/task_scheduler/TaskExecutor.py index f0f9c78b4..de8dea16c 100644 --- a/src/service/service/task_scheduler/TaskExecutor.py +++ b/src/service/service/task_scheduler/TaskExecutor.py @@ -59,12 +59,14 @@ CONTROLLER_DEVICE_TYPES = { DeviceTypeEnum.IETF_SLICE, DeviceTypeEnum.IP_SDN_CONTROLLER, DeviceTypeEnum.MICROWAVE_RADIO_SYSTEM, + DeviceTypeEnum.NCE, DeviceTypeEnum.OPEN_LINE_SYSTEM, DeviceTypeEnum.OPENFLOW_RYU_CONTROLLER, DeviceTypeEnum.TERAFLOWSDN_CONTROLLER, } EXPANSION_CONTROLLER_DEVICE_TYPES = { DeviceTypeEnum.IETF_SLICE, + DeviceTypeEnum.NCE, DeviceTypeEnum.OPENFLOW_RYU_CONTROLLER, } @@ -274,9 +276,12 @@ class TaskExecutor: controller_uuid = device.controller_id.device_uuid.uuid if len(controller_uuid) == 0: return None controller = self.get_device(DeviceId(**json_device_id(controller_uuid))) - controller_uuid = controller.device_id.device_uuid.uuid if controller is None: raise Exception('Device({:s}) not found'.format(str(controller_uuid))) - return controller + if len(controller.controller_id.device_uuid.uuid) == 0: + return controller + else: + # in case controller is an under-underlay controller + return self.get_device_controller(controller) def get_devices_from_connection( self, connection : Connection, exclude_managed_by_controller : bool = False -- GitLab From 46122e898cb153c4a7d620fb34cb7906c1a62e60 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 3 Sep 2025 17:43:36 +0000 Subject: [PATCH 124/367] Service component: - Fix Driver selection based on controllers --- .../service/task_scheduler/TaskExecutor.py | 25 +++++++++++-------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/src/service/service/task_scheduler/TaskExecutor.py b/src/service/service/task_scheduler/TaskExecutor.py index de8dea16c..b0efe0b53 100644 --- a/src/service/service/task_scheduler/TaskExecutor.py +++ b/src/service/service/task_scheduler/TaskExecutor.py @@ -287,7 +287,7 @@ class TaskExecutor: self, connection : Connection, exclude_managed_by_controller : bool = False ) -> Dict[DeviceTypeEnum, Dict[str, Device]]: devices : Dict[DeviceTypeEnum, Dict[str, Device]] = dict() - controllers : Dict[DeviceTypeEnum, Dict[str, Device]] = dict() + #controllers : Dict[DeviceTypeEnum, Dict[str, Device]] = dict() for endpoint_id in connection.path_hops_endpoint_ids: device = self.get_device(endpoint_id.device_id) device_uuid = endpoint_id.device_id.device_uuid.uuid @@ -296,10 +296,11 @@ class TaskExecutor: controller = self.get_device_controller(device) if controller is None: device_type = DeviceTypeEnum._value2member_map_[device.device_type] - if device_type in CONTROLLER_DEVICE_TYPES: - controllers.setdefault(device_type, dict())[device_uuid] = device - else: - devices.setdefault(device_type, dict())[device_uuid] = device + #if device_type in CONTROLLER_DEVICE_TYPES: + # controllers.setdefault(device_type, dict())[device_uuid] = device + #else: + # devices.setdefault(device_type, dict())[device_uuid] = device + devices.setdefault(device_type, dict())[device_uuid] = device else: # ===== Ryu original test ======================================================================== #if not exclude_managed_by_controller: @@ -321,14 +322,16 @@ class TaskExecutor: devices.setdefault(device_type, dict())[device_uuid] = device device_type = DeviceTypeEnum._value2member_map_[controller.device_type] - controllers.setdefault(device_type, dict())[controller.device_id.device_uuid.uuid] = controller + #controllers.setdefault(device_type, dict())[controller.device_id.device_uuid.uuid] = controller + devices.setdefault(device_type, dict())[controller.device_id.device_uuid.uuid] = controller LOGGER.debug('[get_devices_from_connection] devices = {:s}'.format(str(devices))) - LOGGER.debug('[get_devices_from_connection] controllers = {:s}'.format(str(controllers))) - if len(devices) == 0 and len(controllers) > 0: - return controllers - else: - return devices + #LOGGER.debug('[get_devices_from_connection] controllers = {:s}'.format(str(controllers))) + #if len(devices) == 0 and len(controllers) > 0: + # return controllers + #else: + # return devices + return devices # ----- Service-related methods ------------------------------------------------------------------------------------ -- GitLab From 8ba607a1591f6f5aa7552c5aeeb4755593968afe Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 4 Sep 2025 08:28:26 +0000 Subject: [PATCH 125/367] ECOC F5GA Telemetry Demo: - Corrected E2E topology descriptor --- src/tests/ecoc25-f5ga-telemetry/data/topology/topology-e2e.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-e2e.json b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-e2e.json index 4cfa080af..74ddab2b1 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-e2e.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-e2e.json @@ -7,7 +7,7 @@ ], "devices": [ {"device_id": {"device_uuid": {"uuid": "TFS-AGG"}}, "device_type": "teraflowsdn", - "device_drivers": ["DEVICEDRIVER_IETF_L3VPN"], + "device_drivers": ["DEVICEDRIVER_IETF_SLICE"], "device_config": {"config_rules": [ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.0.11"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "80" }}, -- GitLab From d34e517a2b774c9ab221efe9b105587dc0679cac Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 4 Sep 2025 08:29:15 +0000 Subject: [PATCH 126/367] Service component - L3NM IETFL3VPN: - Bug fixed on error reporting --- .../l3nm_ietfl3vpn/L3NM_IETFL3VPN_ServiceHandler.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/service/service/service_handlers/l3nm_ietfl3vpn/L3NM_IETFL3VPN_ServiceHandler.py b/src/service/service/service_handlers/l3nm_ietfl3vpn/L3NM_IETFL3VPN_ServiceHandler.py index 2a412aa53..3153a7c05 100644 --- a/src/service/service/service_handlers/l3nm_ietfl3vpn/L3NM_IETFL3VPN_ServiceHandler.py +++ b/src/service/service/service_handlers/l3nm_ietfl3vpn/L3NM_IETFL3VPN_ServiceHandler.py @@ -27,6 +27,7 @@ from common.proto.context_pb2 import ( Service, ServiceConfig, ) +from common.tools.grpc.Tools import grpc_message_to_json_string from common.tools.object_factory.Device import json_device_id from common.type_checkers.Checkers import chk_type from service.service.service_handler_api._ServiceHandler import _ServiceHandler @@ -424,8 +425,9 @@ class L3NM_IETFL3VPN_ServiceHandler(_ServiceHandler): controller.device_config.config_rules.append(ConfigRule(**jcr)) self.__task_executor.configure_device(controller) except Exception as e: # pylint: disable=broad-except + str_service_id = grpc_message_to_json_string(self.__service.service_id) LOGGER.exception( - "Unable to SetEndpoint for Service({:s})".format(str(service_id)) + "Unable to SetEndpoint for Service({:s})".format(str(str_service_id)) ) results.append(e) -- GitLab From 588ce47461ed231a88c595736f34309b190d0715 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 4 Sep 2025 08:30:09 +0000 Subject: [PATCH 127/367] PathComp component - FrontEnd: - Extended Sub-Service Composition to identify hierarchies of controllers and use the top-level one in the underlying hierarchy --- .../algorithms/tools/ResourceGroups.py | 29 ++++++++++++++----- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py b/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py index 06b0f90e8..78e54128b 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py +++ b/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py @@ -65,12 +65,27 @@ DEVICE_TYPE_TO_DEEPNESS = { IGNORED_DEVICE_TYPES = {DeviceTypeEnum.EMULATED_OPTICAL_SPLITTER} REMOTEDOMAIN_DEVICE_TYPES = {DeviceTypeEnum.NETWORK} +def get_device( + device_dict : Dict[str, Tuple[Dict, Device]], device_uuid : str, + fail_if_not_found : bool = True +) -> Tuple[Dict, Device]: + device_tuple = device_dict.get(device_uuid) + if device_tuple is None and fail_if_not_found: + MSG = 'Device({:s}) not found' + raise Exception(MSG.format(str(device_uuid))) + return device_tuple + def get_device_controller_uuid( - device : Device + device : Device, device_dict : Dict[str, Tuple[Dict, Device]], + retrieve_top_level_controller : bool = True ) -> Optional[str]: - controller_uuid = device.controller_id.device_uuid.uuid - if len(controller_uuid) > 0: return controller_uuid - return None + while True: + controller_uuid = device.controller_id.device_uuid.uuid + if retrieve_top_level_controller: return controller_uuid + if len(controller_uuid) == 0: return None + controller_tuple = get_device(device_dict, controller_uuid, fail_if_not_found=False) + if controller_tuple is None: return controller_uuid + _, device = controller_tuple def _map_device_type(device : Device) -> DeviceTypeEnum: device_type = DeviceTypeEnum._value2member_map_.get(device.device_type) # pylint: disable=no-member @@ -88,15 +103,13 @@ def get_device_type( device : Device, device_dict : Dict[str, Tuple[Dict, Device]], device_controller_uuid : Optional[str] ) -> DeviceTypeEnum: if device_controller_uuid is None: return _map_device_type(device) - device_controller_tuple = device_dict.get(device_controller_uuid) - if device_controller_tuple is None: raise Exception('Device({:s}) not found'.format(str(device_controller_uuid))) - _,device = device_controller_tuple + _,device = get_device(device_dict, device_controller_uuid) return _map_device_type(device) def get_resource_classification( device : Device, device_dict : Dict[str, Tuple[Dict, Device]] ) -> Tuple[int, DeviceTypeEnum, Optional[str]]: - device_controller_uuid = get_device_controller_uuid(device) + device_controller_uuid = get_device_controller_uuid(device, device_dict) device_type = get_device_type(device, device_dict, device_controller_uuid) resource_deepness = _map_resource_to_deepness(device_type) return resource_deepness, device_type, device_controller_uuid -- GitLab From 81aa9b8e54e3037d8e5f0d0ab4c19d1fe5f6ce76 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 4 Sep 2025 09:45:42 +0000 Subject: [PATCH 128/367] Device component - IETF L3VPN: - Code formatting and polishing - Minor bug fixes - Improved Credential check - Improved log reporting --- .../drivers/ietf_l3vpn/IetfL3VpnDriver.py | 89 +++++++++---------- .../drivers/ietf_l3vpn/TfsApiClient.py | 72 +++++++++++---- 2 files changed, 93 insertions(+), 68 deletions(-) diff --git a/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py b/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py index 3bc94e18a..049009ec3 100644 --- a/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py +++ b/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py @@ -24,21 +24,23 @@ from .Constants import SPECIAL_RESOURCE_MAPPINGS from .TfsApiClient import TfsApiClient from .Tools import compose_resource_endpoint + LOGGER = logging.getLogger(__name__) + ALL_RESOURCE_KEYS = [ RESOURCE_ENDPOINTS, RESOURCE_SERVICES, ] -RE_GET_ENDPOINT_FROM_INTERFACE = re.compile(r"^\/interface\[([^\]]+)\].*") -RE_IETF_L3VPN_DATA = re.compile(r"^\/service\[[^\]]+\]\/IETFL3VPN$") -RE_IETF_L3VPN_OPERATION = re.compile(r"^\/service\[[^\]]+\]\/IETFL3VPN\/operation$") +RE_IETF_L3VPN_DATA = re.compile(r'^\/service\[[^\]]+\]\/IETFL3VPN$') +RE_IETF_L3VPN_OPERATION = re.compile(r'^\/service\[[^\]]+\]\/IETFL3VPN\/operation$') DRIVER_NAME = 'ietf_l3vpn' METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': DRIVER_NAME}) + class IetfL3VpnDriver(_Driver): def __init__(self, address : str, port : str, **settings) -> None: super().__init__(DRIVER_NAME, address, int(port), **settings) @@ -54,7 +56,6 @@ class IetfL3VpnDriver(_Driver): self.address, self.port, scheme=scheme, username=username, password=password, timeout=timeout ) - #self.__tfs_nbi_root = "{:s}://{:s}:{:d}".format(scheme, self.address, int(self.port)) # Options are: # disabled --> just import endpoints as usual @@ -90,7 +91,7 @@ class IetfL3VpnDriver(_Driver): resource_key, resource_value = resource chk_string(str_resource_name, resource_key, allow_empty=False) resource_path = resource_key.split("/") - except Exception as e: # pylint: disable=broad-except + except Exception as e: LOGGER.exception( "Exception validating {:s}: {:s}".format( str_resource_name, str(resource_key) @@ -114,14 +115,9 @@ class IetfL3VpnDriver(_Driver): def Connect(self) -> bool: with self.__lock: if self.__started.is_set(): return True - try: - self.tac.check_credentials() - except: # pylint: disable=bare-except - LOGGER.exception('Exception checking credentials') - return False - else: - self.__started.set() - return True + checked = self.tac.check_credentials(raise_if_fail=False) + if checked: self.__started.set() + return checked def Disconnect(self) -> bool: with self.__lock: @@ -179,12 +175,14 @@ class IetfL3VpnDriver(_Driver): for resource in resources: resource_key, resource_value = resource if RE_IETF_L3VPN_OPERATION.match(resource_key): - operation_type = json.loads(resource_value)["type"] + operation_type = json.loads(resource_value)['type'] results.append((resource_key, True)) break else: - raise Exception("operation type not found in resources") - for resource in resources: + raise Exception('operation type not found in resources') + + for i, resource in enumerate(resources): + str_resource_name = 'resource_key[#{:d}]'.format(i) LOGGER.info('resource = {:s}'.format(str(resource))) resource_key, resource_value = resource if not RE_IETF_L3VPN_DATA.match(resource_key): @@ -192,31 +190,24 @@ class IetfL3VpnDriver(_Driver): try: resource_value = json.loads(resource_value) - # if service_exists(self.__tfs_nbi_root, self.__auth, service_uuid): - # exc = NotImplementedError( - # "IETF L3VPN Service Update is still not supported" - # ) - # results.append((resource[0], exc)) - # continue - if operation_type == "create": - service_id = resource_value["ietf-l3vpn-svc:l3vpn-svc"][ - "vpn-services" - ]["vpn-service"][0]["vpn-id"] + service_uuid = resource_value['ietf-l3vpn-svc:l3vpn-svc'][ + 'vpn-services' + ]['vpn-service'][0]['vpn-id'] + + if operation_type == 'create': self.tac.create_connectivity_service(resource_value) - elif operation_type == "update": - service_id = resource_value["ietf-l3vpn-svc:l3vpn-svc"][ - "vpn-services" - ]["vpn-service"][0]["vpn-id"] + elif operation_type == 'update': self.tac.update_connectivity_service(resource_value) + elif operation_type == 'delete': + self.tac.delete_connectivity_service(service_uuid) else: - raise Exception("operation type not supported") + MSG = 'OperationType({:s}) not supported' + raise Exception(MSG.format(str(operation_type))) + results.append((resource_key, True)) - except Exception as e: # pylint: disable=broad-except - LOGGER.exception( - "Unhandled error processing resource_key({:s})".format( - str(resource_key) - ) - ) + except Exception as e: + MSG = 'Unhandled error processing {:s}: resource_key({:s})' + LOGGER.exception(MSG.format(str_resource_name, str(resource_key))) results.append((resource_key, e)) return results @@ -228,24 +219,24 @@ class IetfL3VpnDriver(_Driver): if len(resources) == 0: return results with self.__lock: - for resource in resources: - LOGGER.info("resource = {:s}".format(str(resource))) + for i, resource in enumerate(resources): + str_resource_name = 'resource_key[#{:d}]'.format(i) + LOGGER.info('resource = {:s}'.format(str(resource))) resource_key, resource_value = resource + if not RE_IETF_L3VPN_DATA.match(resource_key): continue + try: resource_value = json.loads(resource_value) - service_id = resource_value["id"] - - # if service_exists(self.__tfs_nbi_root, self.__auth, service_uuid): - self.tac.delete_connectivity_service(service_id) + service_uuid = resource_value['ietf-l3vpn-svc:l3vpn-svc'][ + 'vpn-services' + ]['vpn-service'][0]['vpn-id'] + self.tac.delete_connectivity_service(service_uuid) results.append((resource_key, True)) - except Exception as e: # pylint: disable=broad-except - LOGGER.exception( - "Unhandled error processing resource_key({:s})".format( - str(resource_key) - ) - ) + except Exception as e: + MSG = 'Unhandled error processing {:s}: resource_key({:s})' + LOGGER.exception(MSG.format(str_resource_name, str(resource_key))) results.append((resource_key, e)) return results diff --git a/src/device/service/drivers/ietf_l3vpn/TfsApiClient.py b/src/device/service/drivers/ietf_l3vpn/TfsApiClient.py index 87bb6dc1e..baf723813 100644 --- a/src/device/service/drivers/ietf_l3vpn/TfsApiClient.py +++ b/src/device/service/drivers/ietf_l3vpn/TfsApiClient.py @@ -17,10 +17,15 @@ from typing import Dict, List, Optional from common.tools.client.RestApiClient import RestApiClient from device.service.driver_api.ImportTopologyEnum import ImportTopologyEnum + GET_CONTEXT_IDS_URL = '/tfs-api/context_ids' GET_DEVICES_URL = '/tfs-api/devices' GET_LINKS_URL = '/tfs-api/links' -L3VPN_URL = '/restconf/data/ietf-l3vpn-svc:l3vpn-svc/vpn-services' + + +IETF_L3VPN_ALL_URL = '/restconf/data/ietf-l3vpn-svc:l3vpn-svc/vpn-services' +IETF_L3VPN_ONE_URL = IETF_L3VPN_ALL_URL + '/vpn-service={:s}' + MAPPING_STATUS = { 'DEVICEOPERATIONALSTATUS_UNDEFINED': 0, @@ -50,8 +55,10 @@ MAPPING_DRIVER = { 'DEVICEDRIVER_RYU' : 18, } + LOGGER = logging.getLogger(__name__) + class TfsApiClient(RestApiClient): def __init__( self, address : str, port : int, scheme : str = 'http', @@ -63,9 +70,26 @@ class TfsApiClient(RestApiClient): timeout=timeout, verify_certs=False, allow_redirects=True, logger=LOGGER ) - def check_credentials(self) -> None: - self.get(GET_CONTEXT_IDS_URL, expected_status_codes={requests.codes['OK']}) - LOGGER.info('Credentials checked') + + def check_credentials(self, raise_if_fail : bool = True) -> None: + try: + LOGGER.info('Checking credentials...') + self.get(GET_CONTEXT_IDS_URL, expected_status_codes={requests.codes['OK']}) + LOGGER.info('Credentials checked') + return True + except requests.exceptions.Timeout as e: + MSG = 'Timeout connecting {:s}' + msg = MSG.format(GET_CONTEXT_IDS_URL) + LOGGER.exception(msg) + if raise_if_fail: raise Exception(msg) from e + return False + except Exception as e: + MSG = 'Exception connecting credentials: {:s}' + msg = MSG.format(GET_CONTEXT_IDS_URL) + LOGGER.exception(msg) + if raise_if_fail: raise Exception(msg) from e + return False + def get_devices_endpoints( self, import_topology : ImportTopologyEnum = ImportTopologyEnum.DEVICES @@ -142,31 +166,41 @@ class TfsApiClient(RestApiClient): LOGGER.debug('[get_devices_endpoints] topology; returning') return result - def create_connectivity_service(self, l3vpn_data : dict) -> None: - MSG = '[create_connectivity_service] l3vpn_data={:s}' - LOGGER.debug(MSG.format(str(l3vpn_data))) + + def create_connectivity_service(self, data : Dict) -> None: + MSG = '[create_connectivity_service] data={:s}' + LOGGER.debug(MSG.format(str(data))) try: - self.post(L3VPN_URL, body=l3vpn_data) + MSG = '[create_connectivity_service] POST {:s}: {:s}' + LOGGER.info(MSG.format(str(IETF_L3VPN_ALL_URL), str(data))) + self.post(IETF_L3VPN_ALL_URL, body=data) except requests.exceptions.ConnectionError as e: - MSG = 'Failed to send POST request to TFS L3VPN NBI' + MSG = 'Failed to send POST request to TFS IETF L3VPN NBI' raise Exception(MSG) from e - def update_connectivity_service(self, l3vpn_data : dict) -> None: - MSG = '[update_connectivity_service] l3vpn_data={:s}' - LOGGER.debug(MSG.format(str(l3vpn_data))) - vpn_id = l3vpn_data['ietf-l3vpn-svc:l3vpn-svc']['vpn-services']['vpn-service'][0]['vpn-id'] + + def update_connectivity_service(self, data : Dict) -> None: + MSG = '[update_connectivity_service] data={:s}' + LOGGER.debug(MSG.format(str(data))) + vpn_id = data['ietf-l3vpn-svc:l3vpn-svc']['vpn-services']['vpn-service'][0]['vpn-id'] + url = IETF_L3VPN_ONE_URL.format(vpn_id) try: - self.put(L3VPN_URL + f'/vpn-service={vpn_id}', body=l3vpn_data) + MSG = '[update_connectivity_service] PUT {:s}: {:s}' + LOGGER.info(MSG.format(str(url), str(data))) + self.put(url, body=data) except requests.exceptions.ConnectionError as e: - MSG = 'Failed to send PUT request to TFS L3VPN NBI' + MSG = 'Failed to send PUT request to TFS IETF L3VPN NBI' raise Exception(MSG) from e + def delete_connectivity_service(self, service_uuid : str) -> None: - url = L3VPN_URL + f'/vpn-service={service_uuid}' - MSG = '[delete_connectivity_service] url={:s}' - LOGGER.debug(MSG.format(str(url))) + MSG = '[delete_connectivity_service] service_uuid={:s}' + LOGGER.debug(MSG.format(str(service_uuid))) + url = IETF_L3VPN_ONE_URL.format(service_uuid) try: + MSG = '[delete_connectivity_service] DELETE {:s}' + LOGGER.info(MSG.format(str(url))) self.delete(url) except requests.exceptions.ConnectionError as e: - MSG = 'Failed to send DELETE request to TFS L3VPN NBI' + MSG = 'Failed to send DELETE request to TFS IETF L3VPN NBI' raise Exception(MSG) from e -- GitLab From 2944c65fde89f61ed7b942029072a077ef8e564f Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 4 Sep 2025 09:47:26 +0000 Subject: [PATCH 129/367] Device component - IETF Slice: - Added support for TFS-API-based topology discovery - Corrected driver selection rules - Renamed driver files to right names - Code formatting and polishing - Minor bug fixes - Improved Credential check - Improved log reporting --- src/device/service/drivers/__init__.py | 4 +- .../{driver.py => IetfSliceDriver.py} | 248 ++++++++---------- .../drivers/ietf_slice/TfsApiClient.py | 209 +++++++++++++++ .../ietf_slice/tfs_slice_nbi_client.py | 76 ------ 4 files changed, 316 insertions(+), 221 deletions(-) rename src/device/service/drivers/ietf_slice/{driver.py => IetfSliceDriver.py} (51%) create mode 100644 src/device/service/drivers/ietf_slice/TfsApiClient.py delete mode 100644 src/device/service/drivers/ietf_slice/tfs_slice_nbi_client.py diff --git a/src/device/service/drivers/__init__.py b/src/device/service/drivers/__init__.py index 788b09edd..ff1dd0050 100644 --- a/src/device/service/drivers/__init__.py +++ b/src/device/service/drivers/__init__.py @@ -100,11 +100,11 @@ DRIVERS.append( } ])) -from .ietf_slice.driver import IetfSliceDriver # pylint: disable=wrong-import-position +from .ietf_slice.IetfSliceDriver import IetfSliceDriver # pylint: disable=wrong-import-position DRIVERS.append( (IetfSliceDriver, [ { - FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.IETF_SLICE, + FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.TERAFLOWSDN_CONTROLLER, FilterFieldEnum.DRIVER: DeviceDriverEnum.DEVICEDRIVER_IETF_SLICE, } ])) diff --git a/src/device/service/drivers/ietf_slice/driver.py b/src/device/service/drivers/ietf_slice/IetfSliceDriver.py similarity index 51% rename from src/device/service/drivers/ietf_slice/driver.py rename to src/device/service/drivers/ietf_slice/IetfSliceDriver.py index a657dc1e0..dce1a6d26 100644 --- a/src/device/service/drivers/ietf_slice/driver.py +++ b/src/device/service/drivers/ietf_slice/IetfSliceDriver.py @@ -12,38 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json -import logging -import re -import threading -from typing import Any, Iterator, List, Optional, Tuple, Union - -import anytree -import requests -from requests.auth import HTTPBasicAuth +import anytree, json, logging, re, threading +from typing import Any, Iterator, List, Optional, Tuple, Union from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method from common.type_checkers.Checkers import chk_length, chk_string, chk_type -from device.service.driver_api._Driver import ( - RESOURCE_ENDPOINTS, - RESOURCE_SERVICES, - _Driver, -) -from device.service.driver_api.AnyTreeTools import ( - TreeNode, - dump_subtree, - get_subnode, - set_subnode_value, -) -from device.service.driver_api.ImportTopologyEnum import ( - ImportTopologyEnum, - get_import_topology, -) - +from device.service.driver_api._Driver import _Driver, RESOURCE_ENDPOINTS, RESOURCE_SERVICES +from device.service.driver_api.AnyTreeTools import TreeNode, dump_subtree, get_subnode, set_subnode_value +from device.service.driver_api.ImportTopologyEnum import ImportTopologyEnum, get_import_topology from .Constants import SPECIAL_RESOURCE_MAPPINGS -from .tfs_slice_nbi_client import TfsApiClient +from .TfsApiClient import TfsApiClient from .Tools import compose_resource_endpoint + LOGGER = logging.getLogger(__name__) @@ -52,43 +33,38 @@ ALL_RESOURCE_KEYS = [ RESOURCE_SERVICES, ] -RE_IETF_SLICE_DATA = re.compile(r"^\/service\[[^\]]+\]\/IETFSlice$") -RE_IETF_SLICE_OPERATION = re.compile(r"^\/service\[[^\]]+\]\/IETFSlice\/operation$") -DRIVER_NAME = "ietf_slice" -METRICS_POOL = MetricsPool("Device", "Driver", labels={"driver": DRIVER_NAME}) +RE_IETF_SLICE_DATA = re.compile(r'^\/service\[[^\]]+\]\/IETFSlice$') +RE_IETF_SLICE_OPERATION = re.compile(r'^\/service\[[^\]]+\]\/IETFSlice\/operation$') + +DRIVER_NAME = 'ietf_slice' +METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': DRIVER_NAME}) class IetfSliceDriver(_Driver): - def __init__(self, address: str, port: str, **settings) -> None: + def __init__(self, address : str, port : str, **settings) -> None: super().__init__(DRIVER_NAME, address, int(port), **settings) self.__lock = threading.Lock() self.__started = threading.Event() self.__terminate = threading.Event() - self.__running = TreeNode(".") - scheme = self.settings.get("scheme", "http") - username = self.settings.get("username") - password = self.settings.get("password") + self.__running = TreeNode('.') + username = self.settings.get('username') + password = self.settings.get('password') + scheme = self.settings.get('scheme', 'http') + timeout = int(self.settings.get('timeout', 60)) self.tac = TfsApiClient( - self.address, - self.port, - scheme=scheme, - username=username, - password=password, - ) - self.__auth = None - # ( - # HTTPBasicAuth(username, password) - # if username is not None and password is not None - # else None - # ) - self.__tfs_nbi_root = "{:s}://{:s}:{:d}".format( - scheme, self.address, int(self.port) - ) - self.__timeout = int(self.settings.get("timeout", 120)) - self.__import_topology = get_import_topology( - self.settings, default=ImportTopologyEnum.DEVICES + self.address, self.port, scheme=scheme, username=username, + password=password, timeout=timeout ) + + # Options are: + # disabled --> just import endpoints as usual + # devices --> imports sub-devices but not links connecting them. + # (a remotely-controlled transport domain might exist between them) + # topology --> imports sub-devices and links connecting them. + # (not supported by XR driver) + self.__import_topology = get_import_topology(self.settings, default=ImportTopologyEnum.DEVICES) + endpoints = self.settings.get("endpoints", []) endpoint_resources = [] for endpoint in endpoints: @@ -115,7 +91,7 @@ class IetfSliceDriver(_Driver): resource_key, resource_value = resource chk_string(str_resource_name, resource_key, allow_empty=False) resource_path = resource_key.split("/") - except Exception as e: # pylint: disable=broad-except + except Exception as e: LOGGER.exception( "Exception validating {:s}: {:s}".format( str_resource_name, str(resource_key) @@ -137,22 +113,11 @@ class IetfSliceDriver(_Driver): return results def Connect(self) -> bool: - url = self.__tfs_nbi_root + "/restconf/data/ietf-network-slice-service:ietf-nss" with self.__lock: - if self.__started.is_set(): - return True - try: - # requests.get(url, timeout=self.__timeout) - ... - except requests.exceptions.Timeout: - LOGGER.exception("Timeout connecting {:s}".format(url)) - return False - except Exception: # pylint: disable=broad-except - LOGGER.exception("Exception connecting {:s}".format(url)) - return False - else: - self.__started.set() - return True + if self.__started.is_set(): return True + checked = self.tac.check_credentials(raise_if_fail=False) + if checked: self.__started.set() + return checked def Disconnect(self) -> bool: with self.__lock: @@ -166,141 +131,138 @@ class IetfSliceDriver(_Driver): @metered_subclass_method(METRICS_POOL) def GetConfig( - self, resource_keys: List[str] = [] + self, resource_keys : List[str] = [] ) -> List[Tuple[str, Union[Any, None, Exception]]]: - chk_type("resources", resource_keys, list) + chk_type('resources', resource_keys, list) + results = [] with self.__lock: + self.tac.check_credentials() if len(resource_keys) == 0: return dump_subtree(self.__running) - results = [] - resolver = anytree.Resolver(pathattr="name") + resolver = anytree.Resolver(pathattr='name') for i, resource_key in enumerate(resource_keys): - str_resource_name = "resource_key[#{:d}]".format(i) + str_resource_name = 'resource_key[#{:d}]'.format(i) try: chk_string(str_resource_name, resource_key, allow_empty=False) - resource_key = SPECIAL_RESOURCE_MAPPINGS.get( - resource_key, resource_key - ) - resource_path = resource_key.split("/") - except Exception as e: # pylint: disable=broad-except - LOGGER.exception( - "Exception validating {:s}: {:s}".format( - str_resource_name, str(resource_key) + if resource_key == RESOURCE_ENDPOINTS: + # return endpoints through TFS NBI API and list-devices method + results.extend(self.tac.get_devices_endpoints(self.__import_topology)) + else: + resource_key = SPECIAL_RESOURCE_MAPPINGS.get( + resource_key, resource_key ) - ) - results.append( - (resource_key, e) - ) # if validation fails, store the exception - continue - resource_node = get_subnode( - resolver, self.__running, resource_path, default=None - ) - # if not found, resource_node is None - if resource_node is None: - continue - results.extend(dump_subtree(resource_node)) - return results + resource_path = resource_key.split('/') + resource_node = get_subnode( + resolver, self.__running, resource_path, default=None + ) + # if not found, resource_node is None + if resource_node is None: continue + results.extend(dump_subtree(resource_node)) + except Exception as e: + MSG = 'Unhandled error processing {:s}: resource_key({:s})' + LOGGER.exception(MSG.format(str_resource_name, str(resource_key))) + results.append((resource_key, e)) + return results @metered_subclass_method(METRICS_POOL) def SetConfig( - self, resources: List[Tuple[str, Any]] + self, resources : List[Tuple[str, Any]] ) -> List[Union[bool, Exception]]: results = [] - - if len(resources) == 0: - return results - + if len(resources) == 0: return results with self.__lock: for resource in resources: resource_key, resource_value = resource if RE_IETF_SLICE_OPERATION.match(resource_key): - operation_type = json.loads(resource_value)["type"] + operation_type = json.loads(resource_value)['type'] results.append((resource_key, True)) break else: - raise Exception("operation type not found in resources") - for resource in resources: - LOGGER.info("resource = {:s}".format(str(resource))) + raise Exception('operation type not found in resources') + + for i, resource in enumerate(resources): + str_resource_name = 'resource_key[#{:d}]'.format(i) + LOGGER.info('resource = {:s}'.format(str(resource))) resource_key, resource_value = resource if not RE_IETF_SLICE_DATA.match(resource_key): continue try: resource_value = json.loads(resource_value) - slice_name = resource_value["network-slice-services"][ - "slice-service" - ][0]["id"] + slice_data = resource_value['network-slice-services'][ + 'slice-service' + ][0] + slice_name = slice_data['id'] - if operation_type == "create": + if operation_type == 'create': self.tac.create_slice(resource_value) - - elif operation_type == "update": - connection_groups = resource_value["network-slice-services"][ - "slice-service" - ][0]["connection-groups"]["connection-group"] - + elif operation_type == 'update': + connection_groups = slice_data['connection-groups']['connection-group'] if len(connection_groups) != 1: - raise Exception("only one connection group is supported") - + MSG = 'Exactly one ConnectionGroup({:s}) is supported' + raise Exception(MSG.format(str(connection_groups))) connection_group = connection_groups[0] - self.tac.update_slice( - slice_name, connection_group["id"], connection_group + slice_name, connection_group['id'], connection_group ) - - elif operation_type == "delete": + elif operation_type == 'delete': self.tac.delete_slice(slice_name) + else: + MSG = 'OperationType({:s}) not supported' + raise Exception(MSG.format(str(operation_type))) results.append((resource_key, True)) - except Exception as e: # pylint: disable=broad-except - LOGGER.exception( - "Unhandled error processing resource_key({:s})".format( - str(resource_key) - ) - ) + except Exception as e: + MSG = 'Unhandled error processing {:s}: resource_key({:s})' + LOGGER.exception(MSG.format(str_resource_name, str(resource_key))) results.append((resource_key, e)) return results @metered_subclass_method(METRICS_POOL) def DeleteConfig( - self, resources: List[Tuple[str, Any]] + self, resources : List[Tuple[str, Any]] ) -> List[Union[bool, Exception]]: results = [] - if len(resources) == 0: return results - with self.__lock: - for resource in resources: - LOGGER.info("resource = {:s}".format(str(resource))) + for i, resource in enumerate(resources): + str_resource_name = 'resource_key[#{:d}]'.format(i) + LOGGER.info('resource = {:s}'.format(str(resource))) resource_key, resource_value = resource + + if not RE_IETF_SLICE_DATA.match(resource_key): + continue + try: + resource_value = json.loads(resource_value) + slice_name = resource_value['network-slice-services'][ + 'slice-service' + ][0]['id'] + self.tac.delete_slice(slice_name) results.append((resource_key, True)) - except Exception as e: # pylint: disable=broad-except - LOGGER.exception( - "Unhandled error processing resource_key({:s})".format( - str(resource_key) - ) - ) + except Exception as e: + MSG = 'Unhandled error processing {:s}: resource_key({:s})' + LOGGER.exception(MSG.format(str_resource_name, str(resource_key))) results.append((resource_key, e)) return results @metered_subclass_method(METRICS_POOL) def SubscribeState( - self, subscriptions: List[Tuple[str, float, float]] + self, subscriptions : List[Tuple[str, float, float]] ) -> List[Union[bool, Exception]]: - # TODO: IETF Slice does not support monitoring by now + # TODO: does not support monitoring by now return [False for _ in subscriptions] @metered_subclass_method(METRICS_POOL) def UnsubscribeState( - self, subscriptions: List[Tuple[str, float, float]] + self, subscriptions : List[Tuple[str, float, float]] ) -> List[Union[bool, Exception]]: - # TODO: IETF Slice does not support monitoring by now + # TODO: does not support monitoring by now return [False for _ in subscriptions] def GetState( - self, blocking=False, terminate: Optional[threading.Event] = None + self, blocking=False, terminate : Optional[threading.Event] = None ) -> Iterator[Tuple[float, str, Any]]: - # TODO: IETF Slice does not support monitoring by now + # TODO: does not support monitoring by now return [] diff --git a/src/device/service/drivers/ietf_slice/TfsApiClient.py b/src/device/service/drivers/ietf_slice/TfsApiClient.py new file mode 100644 index 000000000..13626ef69 --- /dev/null +++ b/src/device/service/drivers/ietf_slice/TfsApiClient.py @@ -0,0 +1,209 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, requests +from typing import Dict, List, Optional +from common.tools.client.RestApiClient import RestApiClient +from device.service.driver_api.ImportTopologyEnum import ImportTopologyEnum + + +GET_CONTEXT_IDS_URL = '/tfs-api/context_ids' +GET_DEVICES_URL = '/tfs-api/devices' +GET_LINKS_URL = '/tfs-api/links' + + +IETF_SLICE_ALL_URL = '/restconf/data/ietf-network-slice-service:network-slice-services' +IETF_SLICE_ONE_URL = IETF_SLICE_ALL_URL + '/slice-service={:s}' +IETF_SLICE_CG_URL = IETF_SLICE_ONE_URL + '/connection-groups/connection-group={:s}' + + +MAPPING_STATUS = { + 'DEVICEOPERATIONALSTATUS_UNDEFINED': 0, + 'DEVICEOPERATIONALSTATUS_DISABLED' : 1, + 'DEVICEOPERATIONALSTATUS_ENABLED' : 2, +} + +MAPPING_DRIVER = { + 'DEVICEDRIVER_UNDEFINED' : 0, + 'DEVICEDRIVER_OPENCONFIG' : 1, + 'DEVICEDRIVER_TRANSPORT_API' : 2, + 'DEVICEDRIVER_P4' : 3, + 'DEVICEDRIVER_IETF_NETWORK_TOPOLOGY': 4, + 'DEVICEDRIVER_ONF_TR_532' : 5, + 'DEVICEDRIVER_XR' : 6, + 'DEVICEDRIVER_IETF_L2VPN' : 7, + 'DEVICEDRIVER_GNMI_OPENCONFIG' : 8, + 'DEVICEDRIVER_OPTICAL_TFS' : 9, + 'DEVICEDRIVER_IETF_ACTN' : 10, + 'DEVICEDRIVER_OC' : 11, + 'DEVICEDRIVER_QKD' : 12, + 'DEVICEDRIVER_IETF_L3VPN' : 13, + 'DEVICEDRIVER_IETF_SLICE' : 14, + 'DEVICEDRIVER_NCE' : 15, + 'DEVICEDRIVER_SMARTNIC' : 16, + 'DEVICEDRIVER_MORPHEUS' : 17, + 'DEVICEDRIVER_RYU' : 18, +} + + +LOGGER = logging.getLogger(__name__) + + +class TfsApiClient(RestApiClient): + def __init__( + self, address : str, port : int, scheme : str = 'http', + username : Optional[str] = None, password : Optional[str] = None, + timeout : Optional[int] = 30 + ) -> None: + super().__init__( + address, port, scheme=scheme, username=username, password=password, + timeout=timeout, verify_certs=False, allow_redirects=True, logger=LOGGER + ) + + + def check_credentials(self, raise_if_fail : bool = True) -> None: + try: + LOGGER.info('Checking credentials...') + self.get(GET_CONTEXT_IDS_URL, expected_status_codes={requests.codes['OK']}) + LOGGER.info('Credentials checked') + return True + except requests.exceptions.Timeout as e: + MSG = 'Timeout connecting {:s}' + msg = MSG.format(GET_CONTEXT_IDS_URL) + LOGGER.exception(msg) + if raise_if_fail: raise Exception(msg) from e + return False + except Exception as e: + MSG = 'Exception connecting credentials: {:s}' + msg = MSG.format(GET_CONTEXT_IDS_URL) + LOGGER.exception(msg) + if raise_if_fail: raise Exception(msg) from e + return False + + + def get_devices_endpoints( + self, import_topology : ImportTopologyEnum = ImportTopologyEnum.DEVICES + ) -> List[Dict]: + LOGGER.debug('[get_devices_endpoints] begin') + MSG = '[get_devices_endpoints] import_topology={:s}' + LOGGER.debug(MSG.format(str(import_topology))) + + if import_topology == ImportTopologyEnum.DISABLED: + MSG = 'Unsupported import_topology mode: {:s}' + raise Exception(MSG.format(str(import_topology))) + + devices = self.get(GET_DEVICES_URL, expected_status_codes={requests.codes['OK']}) + + result = list() + for json_device in devices['devices']: + device_uuid : str = json_device['device_id']['device_uuid']['uuid'] + device_type : str = json_device['device_type'] + #if not device_type.startswith('emu-'): device_type = 'emu-' + device_type + device_status = json_device['device_operational_status'] + + ctrl_id : Dict[str, Dict] = json_device.get('controller_id', dict()) + ctrl_uuid : Optional[str] = ctrl_id.get('device_uuid', dict()).get('uuid') + + device_url = '/devices/device[{:s}]'.format(device_uuid) + device_data = { + 'uuid': json_device['device_id']['device_uuid']['uuid'], + 'name': json_device['name'], + 'type': device_type, + 'status': MAPPING_STATUS[device_status], + 'drivers': [ + MAPPING_DRIVER[driver] + for driver in json_device['device_drivers'] + ], + } + if ctrl_uuid is not None and len(ctrl_uuid) > 0: + device_data['ctrl_uuid'] = ctrl_uuid + result.append((device_url, device_data)) + + for json_endpoint in json_device['device_endpoints']: + endpoint_uuid = json_endpoint['endpoint_id']['endpoint_uuid']['uuid'] + endpoint_url = '/endpoints/endpoint[{:s}]'.format(endpoint_uuid) + endpoint_data = { + 'device_uuid': device_uuid, + 'uuid': endpoint_uuid, + 'name': json_endpoint['name'], + 'type': json_endpoint['endpoint_type'], + } + result.append((endpoint_url, endpoint_data)) + + if import_topology == ImportTopologyEnum.DEVICES: + LOGGER.debug('[get_devices_endpoints] devices only; returning') + return result + + links = self.get(GET_LINKS_URL, expected_status_codes={requests.codes['OK']}) + + for json_link in links['links']: + link_uuid : str = json_link['link_id']['link_uuid']['uuid'] + link_url = '/links/link[{:s}]'.format(link_uuid) + link_endpoint_ids = [ + ( + json_endpoint_id['device_id']['device_uuid']['uuid'], + json_endpoint_id['endpoint_uuid']['uuid'], + ) + for json_endpoint_id in json_link['link_endpoint_ids'] + ] + link_data = { + 'uuid': json_link['link_id']['link_uuid']['uuid'], + 'name': json_link['name'], + 'endpoints': link_endpoint_ids, + } + result.append((link_url, link_data)) + + LOGGER.debug('[get_devices_endpoints] topology; returning') + return result + + + def create_slice(self, data : Dict) -> None: + MSG = '[create_slice] data={:s}' + LOGGER.debug(MSG.format(str(data))) + try: + MSG = '[create_slice] POST {:s}: {:s}' + LOGGER.info(MSG.format(str(IETF_SLICE_ALL_URL), str(data))) + self.post(IETF_SLICE_ALL_URL, body=data) + except requests.exceptions.ConnectionError as e: + MSG = 'Failed to send POST request to TFS IETF Slice NBI' + raise Exception(MSG) from e + + + def update_slice( + self, slice_name : str, connection_group_id : str, + updated_connection_group_data : Dict + ) -> None: + MSG = '[update_slice] slice_name={:s} connection_group_id={:s} updated_connection_group_data={:s}' + LOGGER.debug(MSG.format(str(slice_name), str(connection_group_id), str(updated_connection_group_data))) + url = IETF_SLICE_CG_URL.format(slice_name, connection_group_id) + try: + MSG = '[update_slice] PUT {:s}: {:s}' + LOGGER.info(MSG.format(str(url), str(updated_connection_group_data))) + self.put(url, body=updated_connection_group_data) + except requests.exceptions.ConnectionError as e: + MSG = 'Failed to send PUT request to TFS IETF Slice NBI' + raise Exception(MSG) from e + + + def delete_slice(self, slice_name : str) -> None: + MSG = '[delete_slice] slice_name={:s}' + LOGGER.debug(MSG.format(str(slice_name))) + url = IETF_SLICE_ONE_URL.format(slice_name) + try: + MSG = '[delete_slice] DELETE {:s}' + LOGGER.info(MSG.format(str(url))) + self.delete(url) + except requests.exceptions.ConnectionError as e: + MSG = 'Failed to send DELETE request to TFS IETF Slice NBI' + raise Exception(MSG) from e diff --git a/src/device/service/drivers/ietf_slice/tfs_slice_nbi_client.py b/src/device/service/drivers/ietf_slice/tfs_slice_nbi_client.py deleted file mode 100644 index d8869073e..000000000 --- a/src/device/service/drivers/ietf_slice/tfs_slice_nbi_client.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -from typing import Optional - -import requests -from requests.auth import HTTPBasicAuth - -IETF_SLICE_URL = "{:s}://{:s}:{:d}/restconf/data/ietf-network-slice-service" -TIMEOUT = 30 - -LOGGER = logging.getLogger(__name__) - -HEADERS = {"Content-Type": "application/json"} - - -class TfsApiClient: - def __init__( - self, - address: str, - port: int, - scheme: str = "http", - username: Optional[str] = None, - password: Optional[str] = None, - ) -> None: - self._slice_url = IETF_SLICE_URL.format(scheme, address, port) - self._auth = None - # ( - # HTTPBasicAuth(username, password) - # if username is not None and password is not None - # else None - # ) - - def create_slice(self, slice_data: dict) -> None: - url = self._slice_url + ":network-slice-services" - try: - requests.post(url, json=slice_data, headers=HEADERS) - LOGGER.info(f"IETF Slice Post to {url}: {slice_data}") - except requests.exceptions.ConnectionError: - raise Exception("faild to send post request to TFS IETF Slice NBI") - - def update_slice( - self, - slice_name: str, - connection_group_id: str, - updated_connection_group_data: dict, - ) -> None: - url = ( - self._slice_url - + f":network-slice-services/slice-service={slice_name}/connection-groups/connection-group={connection_group_id}" - ) - try: - requests.put(url, json=updated_connection_group_data, headers=HEADERS) - LOGGER.info(f"IETF Slice Put to {url}: {updated_connection_group_data}") - except requests.exceptions.ConnectionError: - raise Exception("faild to send update request to TFS IETF Slice NBI") - - def delete_slice(self, slice_name: str) -> None: - url = self._slice_url + f":network-slice-services/slice-service={slice_name}" - try: - requests.delete(url) - LOGGER.info(f"IETF Slice Delete to {url}") - except requests.exceptions.ConnectionError: - raise Exception("faild to send delete request to TFS IETF Slice NBI") -- GitLab From 603428ecbbe0882e19132c11937211973b7a14ef Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 4 Sep 2025 11:36:18 +0000 Subject: [PATCH 130/367] PathComp component - FrontEnd: - Fixed Sub-Service Composition to identify hierarchies of controllers and use the top-level one in the underlying hierarchy - Code polishing --- .../algorithms/tools/ResourceGroups.py | 21 +++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py b/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py index 78e54128b..cbc0ecf88 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py +++ b/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py @@ -13,12 +13,12 @@ # limitations under the License. -import json from typing import Dict, Optional, Tuple from common.DeviceTypes import DeviceTypeEnum from common.proto.context_pb2 import Device from common.tools.grpc.Tools import grpc_message_to_json_string + DEVICE_TYPE_TO_DEEPNESS = { DeviceTypeEnum.EMULATED_DATACENTER.value : 90, DeviceTypeEnum.DATACENTER.value : 90, @@ -62,9 +62,11 @@ DEVICE_TYPE_TO_DEEPNESS = { DeviceTypeEnum.NETWORK.value : 0, # network out of our control; always delegate } + IGNORED_DEVICE_TYPES = {DeviceTypeEnum.EMULATED_OPTICAL_SPLITTER} REMOTEDOMAIN_DEVICE_TYPES = {DeviceTypeEnum.NETWORK} + def get_device( device_dict : Dict[str, Tuple[Dict, Device]], device_uuid : str, fail_if_not_found : bool = True @@ -75,18 +77,22 @@ def get_device( raise Exception(MSG.format(str(device_uuid))) return device_tuple + def get_device_controller_uuid( - device : Device, device_dict : Dict[str, Tuple[Dict, Device]], - retrieve_top_level_controller : bool = True + device : Device, device_dict : Dict[str, Tuple[Dict, Device]] ) -> Optional[str]: + last_controller_uuid = None while True: controller_uuid = device.controller_id.device_uuid.uuid - if retrieve_top_level_controller: return controller_uuid - if len(controller_uuid) == 0: return None + if len(controller_uuid) == 0: return last_controller_uuid controller_tuple = get_device(device_dict, controller_uuid, fail_if_not_found=False) - if controller_tuple is None: return controller_uuid + if controller_tuple is None: + MSG = 'Unable to find referenced Controller({:s})' + raise Exception(MSG.format(str(controller_uuid))) + last_controller_uuid = controller_uuid _, device = controller_tuple + def _map_device_type(device : Device) -> DeviceTypeEnum: device_type = DeviceTypeEnum._value2member_map_.get(device.device_type) # pylint: disable=no-member if device_type is None: @@ -94,11 +100,13 @@ def _map_device_type(device : Device) -> DeviceTypeEnum: raise Exception(MSG.format(str(device.device_type), grpc_message_to_json_string(device))) return device_type + def _map_resource_to_deepness(device_type : DeviceTypeEnum) -> int: deepness = DEVICE_TYPE_TO_DEEPNESS.get(device_type.value) if deepness is None: raise Exception('Unsupported DeviceType({:s})'.format(str(device_type.value))) return deepness + def get_device_type( device : Device, device_dict : Dict[str, Tuple[Dict, Device]], device_controller_uuid : Optional[str] ) -> DeviceTypeEnum: @@ -106,6 +114,7 @@ def get_device_type( _,device = get_device(device_dict, device_controller_uuid) return _map_device_type(device) + def get_resource_classification( device : Device, device_dict : Dict[str, Tuple[Dict, Device]] ) -> Tuple[int, DeviceTypeEnum, Optional[str]]: -- GitLab From 4f54cdb9f33fb5361f6bf34c702e4cbe1fb5b536 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 4 Sep 2025 12:26:01 +0000 Subject: [PATCH 131/367] Service component: - Upgraded service handler selection logic to consider controller device type and controller drivers --- .../ServiceHandlerFactory.py | 10 ++- .../service/task_scheduler/TaskExecutor.py | 65 +++++++++++++++---- 2 files changed, 58 insertions(+), 17 deletions(-) diff --git a/src/service/service/service_handler_api/ServiceHandlerFactory.py b/src/service/service/service_handler_api/ServiceHandlerFactory.py index a5b3bed2a..96ab0ae06 100644 --- a/src/service/service/service_handler_api/ServiceHandlerFactory.py +++ b/src/service/service/service_handler_api/ServiceHandlerFactory.py @@ -100,9 +100,6 @@ class ServiceHandlerFactory: candidate_service_handler_classes.items(), key=operator.itemgetter(1), reverse=True) return candidate_service_handler_classes[0][0] -def get_device_supported_drivers(device : Device) -> Set[int]: - return {device_driver for device_driver in device.device_drivers} - def get_common_device_drivers(drivers_per_device : List[Set[int]]) -> Set[int]: common_device_drivers = None for device_drivers in drivers_per_device: @@ -114,15 +111,16 @@ def get_common_device_drivers(drivers_per_device : List[Set[int]]) -> Set[int]: return common_device_drivers def get_service_handler_class( - service_handler_factory : ServiceHandlerFactory, service : Service, connection_devices : Dict[str, Device] + service_handler_factory : ServiceHandlerFactory, service : Service, + device_and_drivers: Dict[str, Tuple[Device, Set[int]]] ) -> Optional['_ServiceHandler']: str_service_key = grpc_message_to_json_string(service.service_id) # Assume all devices involved in the service's connection must support at least one driver in common common_device_drivers = get_common_device_drivers([ - get_device_supported_drivers(device) - for device in connection_devices.values() + device_drivers + for _,device_drivers in device_and_drivers.values() ]) filter_fields = { diff --git a/src/service/service/task_scheduler/TaskExecutor.py b/src/service/service/task_scheduler/TaskExecutor.py index b0efe0b53..51b3cf00b 100644 --- a/src/service/service/task_scheduler/TaskExecutor.py +++ b/src/service/service/task_scheduler/TaskExecutor.py @@ -14,7 +14,7 @@ import json, logging from enum import Enum -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Tuple, Union from common.DeviceTypes import DeviceTypeEnum from common.method_wrappers.ServiceExceptions import NotFoundException from common.proto.qkd_app_pb2 import QKDAppStatusEnum @@ -333,6 +333,38 @@ class TaskExecutor: # return devices return devices + + def get_device_type_drivers_for_connection( + self, connection : Connection + ) -> Dict[DeviceTypeEnum, Dict[str, Tuple[Device, Set[int]]]]: + + devices : Dict[DeviceTypeEnum, Dict[str, Tuple[Device, Set[int]]]] = dict() + + for endpoint_id in connection.path_hops_endpoint_ids: + device = self.get_device(endpoint_id.device_id) + device_uuid = endpoint_id.device_id.device_uuid.uuid + if device is None: raise Exception('Device({:s}) not found'.format(str(device_uuid))) + + controller = self.get_device_controller(device) + if controller is None: + device_type = DeviceTypeEnum._value2member_map_[device.device_type] + device_drivers = set(driver for driver in device.device_drivers) + devices.setdefault(device_type, dict())[device_uuid] = (device, device_drivers) + else: + # Controller device types for those underlying path is needed by service handler + device_type = DeviceTypeEnum._value2member_map_[controller.device_type] + controller_drivers = set(driver for driver in controller.device_drivers) + + if device_type not in EXPANSION_CONTROLLER_DEVICE_TYPES: + devices.setdefault(device_type, dict())[device_uuid] = (device, controller_drivers) + else: + controller_uuid = controller.device_id.device_uuid.uuid + devices.setdefault(device_type, dict())[controller_uuid] = (controller, controller_drivers) + + LOGGER.debug('[get_devices_from_connection] devices = {:s}'.format(str(devices))) + return devices + + # ----- Service-related methods ------------------------------------------------------------------------------------ def get_service(self, service_id : ServiceId) -> Service: @@ -374,18 +406,22 @@ class TaskExecutor: #LOGGER.debug('connection_device_types_included = {:s}'.format(str(connection_device_types_included))) # ================================================================================================ - connection_device_types : Dict[DeviceTypeEnum, Dict[str, Device]] = self.get_devices_from_connection( - connection, exclude_managed_by_controller=False - ) + device_type_to_device_and_drivers : Dict[DeviceTypeEnum, Dict[str, Tuple[Device, Set[int]]]] = \ + self.get_device_type_drivers_for_connection(connection) + service_handlers : Dict[DeviceTypeEnum, Tuple['_ServiceHandler', Dict[str, Device]]] = dict() # ===== Ryu original test ======================================================================== #for device_type, connection_devices in connection_device_types_excluded.items(): # ================================================================================================ - for device_type, connection_devices in connection_device_types.items(): + for device_type, device_and_drivers in device_type_to_device_and_drivers.items(): try: service_handler_class = get_service_handler_class( - self._service_handler_factory, service, connection_devices + self._service_handler_factory, service, device_and_drivers ) + connection_devices = { + device_uuid : device + for device_uuid, (device, _) in device_and_drivers.items() + } # ===== Ryu original test ======================================================================== #LOGGER.debug('service_handler_class IN CONNECTION DEVICE TYPE EXCLUDED = {:s}'.format(str(service_handler_class.__name__))) #service_handler = service_handler_class(service, self, **service_handler_settings) @@ -402,11 +438,18 @@ class TaskExecutor: UnsupportedFilterFieldValueException ): dict_connection_devices = { - cd_data.name : (cd_uuid, cd_data.name, { - (device_driver, DeviceDriverEnum.Name(device_driver)) - for device_driver in cd_data.device_drivers - }) - for cd_uuid,cd_data in connection_devices.items() + cd_data.name : ( + cd_uuid, + cd_data.name, + { + (device_driver, DeviceDriverEnum.Name(device_driver)) + for device_driver in cd_data.device_drivers + }, { + (device_driver, DeviceDriverEnum.Name(device_driver)) + for device_driver in drivers + } + ) + for cd_uuid,(cd_data, drivers) in device_and_drivers.items() } MSG = 'Unable to select service handler. service={:s} connection={:s} connection_devices={:s}' LOGGER.exception(MSG.format( -- GitLab From 34a0bd743ce1c1fcea8832692a363b964917f2fd Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 4 Sep 2025 12:48:35 +0000 Subject: [PATCH 132/367] PathComp component - FrontEnd: - Fixed Sub-Service Config Rule inference --- src/pathcomp/frontend/service/algorithms/_Algorithm.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/pathcomp/frontend/service/algorithms/_Algorithm.py b/src/pathcomp/frontend/service/algorithms/_Algorithm.py index a5bfe1352..a6dc5cb1d 100644 --- a/src/pathcomp/frontend/service/algorithms/_Algorithm.py +++ b/src/pathcomp/frontend/service/algorithms/_Algorithm.py @@ -176,7 +176,9 @@ class _Algorithm: service.service_id.context_id.context_uuid.uuid = context_uuid service.service_id.service_uuid.uuid = service_uuid service.service_type = service_type - rules_nb = len(config_rules) + + #rules_nb = len(config_rules) + rules_nb = 0 # NOTE: do we need to skip default rules if there are other rules? if service_type == ServiceTypeEnum.SERVICETYPE_L2NM and rules_nb == 0: compose_l2nm_config_rules(config_rules, service.service_config.config_rules) -- GitLab From f5c769195a6f16a10fad9974ac43aef97811f35d Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 4 Sep 2025 12:50:29 +0000 Subject: [PATCH 133/367] Service component - Service handlers IETF ACTN and IETF Slice: - Renamed service handlers to align their naming conventions --- src/service/service/service_handlers/__init__.py | 12 ++++++------ .../{l3nm_ietf_actn => l3nm_ietfactn}/Constants.py | 0 .../L3NM_IETFACTN_ServiceHandler.py} | 2 +- .../{l3nm_ietf_actn => l3nm_ietfactn}/__init__.py | 0 .../ConfigRules.py | 0 .../L3NM_IETFSlice_ServiceHandler.py} | 2 +- .../__init__.py | 0 7 files changed, 8 insertions(+), 8 deletions(-) rename src/service/service/service_handlers/{l3nm_ietf_actn => l3nm_ietfactn}/Constants.py (100%) rename src/service/service/service_handlers/{l3nm_ietf_actn/L3NMIetfActnServiceHandler.py => l3nm_ietfactn/L3NM_IETFACTN_ServiceHandler.py} (99%) rename src/service/service/service_handlers/{l3nm_ietf_actn => l3nm_ietfactn}/__init__.py (100%) rename src/service/service/service_handlers/{l3slice_ietfslice => l3nm_ietfslice}/ConfigRules.py (100%) rename src/service/service/service_handlers/{l3slice_ietfslice/L3SliceIETFSliceServiceHandler.py => l3nm_ietfslice/L3NM_IETFSlice_ServiceHandler.py} (99%) rename src/service/service/service_handlers/{l3slice_ietfslice => l3nm_ietfslice}/__init__.py (100%) diff --git a/src/service/service/service_handlers/__init__.py b/src/service/service/service_handlers/__init__.py index 1aba88e30..3ce01861f 100644 --- a/src/service/service/service_handlers/__init__.py +++ b/src/service/service/service_handlers/__init__.py @@ -16,14 +16,14 @@ from common.proto.context_pb2 import DeviceDriverEnum, ServiceTypeEnum from ..service_handler_api.FilterFields import FilterFieldEnum from .l2nm_emulated.L2NMEmulatedServiceHandler import L2NMEmulatedServiceHandler from .l2nm_ietfl2vpn.L2NM_IETFL2VPN_ServiceHandler import L2NM_IETFL2VPN_ServiceHandler -from .l3nm_ietfl3vpn.L3NM_IETFL3VPN_ServiceHandler import L3NM_IETFL3VPN_ServiceHandler from .l2nm_openconfig.L2NMOpenConfigServiceHandler import L2NMOpenConfigServiceHandler from .l3nm_emulated.L3NMEmulatedServiceHandler import L3NMEmulatedServiceHandler -from .l3nm_openconfig.L3NMOpenConfigServiceHandler import L3NMOpenConfigServiceHandler from .l3nm_gnmi_openconfig.L3NMGnmiOpenConfigServiceHandler import L3NMGnmiOpenConfigServiceHandler -from .l3nm_ietf_actn.L3NMIetfActnServiceHandler import L3NMIetfActnServiceHandler +from .l3nm_ietfactn.L3NM_IETFACTN_ServiceHandler import L3NM_IETFACTN_ServiceHandler +from .l3nm_ietfl3vpn.L3NM_IETFL3VPN_ServiceHandler import L3NM_IETFL3VPN_ServiceHandler +from .l3nm_ietfslice.L3NM_IETFSlice_ServiceHandler import L3NM_IETFSlice_ServiceHandler from .l3nm_nce.L3NMNCEServiceHandler import L3NMNCEServiceHandler -from .l3slice_ietfslice.L3SliceIETFSliceServiceHandler import L3NMSliceIETFSliceServiceHandler +from .l3nm_openconfig.L3NMOpenConfigServiceHandler import L3NMOpenConfigServiceHandler from .microwave.MicrowaveServiceHandler import MicrowaveServiceHandler from .p4_dummy_l1.p4_dummy_l1_service_handler import P4DummyL1ServiceHandler from .p4_fabric_tna_int.p4_fabric_tna_int_service_handler import P4FabricINTServiceHandler @@ -68,7 +68,7 @@ SERVICE_HANDLERS = [ FilterFieldEnum.DEVICE_DRIVER : DeviceDriverEnum.DEVICEDRIVER_GNMI_OPENCONFIG, } ]), - (L3NMIetfActnServiceHandler, [ + (L3NM_IETFACTN_ServiceHandler, [ { FilterFieldEnum.SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_L3NM, FilterFieldEnum.DEVICE_DRIVER : DeviceDriverEnum.DEVICEDRIVER_IETF_ACTN, @@ -86,7 +86,7 @@ SERVICE_HANDLERS = [ FilterFieldEnum.DEVICE_DRIVER : DeviceDriverEnum.DEVICEDRIVER_NCE, } ]), - (L3NMSliceIETFSliceServiceHandler, [ + (L3NM_IETFSlice_ServiceHandler, [ { FilterFieldEnum.SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_L3NM, FilterFieldEnum.DEVICE_DRIVER : DeviceDriverEnum.DEVICEDRIVER_IETF_SLICE, diff --git a/src/service/service/service_handlers/l3nm_ietf_actn/Constants.py b/src/service/service/service_handlers/l3nm_ietfactn/Constants.py similarity index 100% rename from src/service/service/service_handlers/l3nm_ietf_actn/Constants.py rename to src/service/service/service_handlers/l3nm_ietfactn/Constants.py diff --git a/src/service/service/service_handlers/l3nm_ietf_actn/L3NMIetfActnServiceHandler.py b/src/service/service/service_handlers/l3nm_ietfactn/L3NM_IETFACTN_ServiceHandler.py similarity index 99% rename from src/service/service/service_handlers/l3nm_ietf_actn/L3NMIetfActnServiceHandler.py rename to src/service/service/service_handlers/l3nm_ietfactn/L3NM_IETFACTN_ServiceHandler.py index 6129d07de..1f4eaccb0 100644 --- a/src/service/service/service_handlers/l3nm_ietf_actn/L3NMIetfActnServiceHandler.py +++ b/src/service/service/service_handlers/l3nm_ietfactn/L3NM_IETFACTN_ServiceHandler.py @@ -30,7 +30,7 @@ LOGGER = logging.getLogger(__name__) METRICS_POOL = MetricsPool('Service', 'Handler', labels={'handler': 'l3nm_ietf_actn'}) -class L3NMIetfActnServiceHandler(_ServiceHandler): +class L3NM_IETFACTN_ServiceHandler(_ServiceHandler): def __init__( # pylint: disable=super-init-not-called self, service : Service, task_executor : TaskExecutor, **settings ) -> None: diff --git a/src/service/service/service_handlers/l3nm_ietf_actn/__init__.py b/src/service/service/service_handlers/l3nm_ietfactn/__init__.py similarity index 100% rename from src/service/service/service_handlers/l3nm_ietf_actn/__init__.py rename to src/service/service/service_handlers/l3nm_ietfactn/__init__.py diff --git a/src/service/service/service_handlers/l3slice_ietfslice/ConfigRules.py b/src/service/service/service_handlers/l3nm_ietfslice/ConfigRules.py similarity index 100% rename from src/service/service/service_handlers/l3slice_ietfslice/ConfigRules.py rename to src/service/service/service_handlers/l3nm_ietfslice/ConfigRules.py diff --git a/src/service/service/service_handlers/l3slice_ietfslice/L3SliceIETFSliceServiceHandler.py b/src/service/service/service_handlers/l3nm_ietfslice/L3NM_IETFSlice_ServiceHandler.py similarity index 99% rename from src/service/service/service_handlers/l3slice_ietfslice/L3SliceIETFSliceServiceHandler.py rename to src/service/service/service_handlers/l3nm_ietfslice/L3NM_IETFSlice_ServiceHandler.py index 0df8b56e3..161b520e2 100644 --- a/src/service/service/service_handlers/l3slice_ietfslice/L3SliceIETFSliceServiceHandler.py +++ b/src/service/service/service_handlers/l3nm_ietfslice/L3NM_IETFSlice_ServiceHandler.py @@ -380,7 +380,7 @@ def _parse_item_added(diff: Dict) -> dict: return added_items -class L3NMSliceIETFSliceServiceHandler(_ServiceHandler): +class L3NM_IETFSlice_ServiceHandler(_ServiceHandler): def __init__( # pylint: disable=super-init-not-called self, service: Service, task_executor: TaskExecutor, **settings ) -> None: diff --git a/src/service/service/service_handlers/l3slice_ietfslice/__init__.py b/src/service/service/service_handlers/l3nm_ietfslice/__init__.py similarity index 100% rename from src/service/service/service_handlers/l3slice_ietfslice/__init__.py rename to src/service/service/service_handlers/l3nm_ietfslice/__init__.py -- GitLab From 9b99222aaf571412ee19687fa673ddce175bb5e6 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 4 Sep 2025 15:19:10 +0000 Subject: [PATCH 134/367] ECOC F5GA Telemetry Demo: - Corrected E2E/Agg topology descriptor --- .../ecoc25-f5ga-telemetry/data/topology/topology-agg.json | 4 ++-- .../ecoc25-f5ga-telemetry/data/topology/topology-e2e.json | 8 +++++++- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json index b7a882239..1bdcc2643 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json @@ -26,7 +26,7 @@ "timeout": 120, "verify_certs": false, "import_topology": "topology" }}} ]}}, - {"device_id": {"device_uuid": {"uuid": "OLT"}}, "device_type": "emu-packet-router", + {"device_id": {"device_uuid": {"uuid": "OLT-T"}}, "device_type": "emu-packet-router", "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.16.58.10"}}, @@ -65,7 +65,7 @@ "links": [ {"link_id": {"link_uuid": {"uuid": "L3"}}, "link_type" : "LINKTYPE_COPPER", "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "OLT" }}, "endpoint_uuid": {"uuid": "501"}}, + {"device_id": {"device_uuid": {"uuid": "OLT-T"}}, "endpoint_uuid": {"uuid": "501"}}, {"device_id": {"device_uuid": {"uuid": "P-PE1"}}, "endpoint_uuid": {"uuid": "200"}} ]}, {"link_id": {"link_uuid": {"uuid": "L4"}}, "link_type" : "LINKTYPE_COPPER", diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-e2e.json b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-e2e.json index 74ddab2b1..ec9fb580d 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-e2e.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-e2e.json @@ -27,5 +27,11 @@ }}} ]}} ], - "links": [] + "links": [ + {"link_id": {"link_uuid": {"uuid": "OLT-INT"}}, "link_type" : "LINKTYPE_COPPER", + "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "OLT-T"}}, "endpoint_uuid": {"uuid": "lo"}}, + {"device_id": {"device_uuid": {"uuid": "OLT-A"}}, "endpoint_uuid": {"uuid": "lo"}} + ]} + ] } -- GitLab From e9c693552390b67cfa56749674c230fafc6b4933 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 4 Sep 2025 15:19:32 +0000 Subject: [PATCH 135/367] Tests - Tools - Mock NCE-FAN Controller - Corrected stratup config --- src/tests/tools/mock_nce_fan_ctrl/startup.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/tests/tools/mock_nce_fan_ctrl/startup.json b/src/tests/tools/mock_nce_fan_ctrl/startup.json index 5c70a2da7..ba6ec6f46 100644 --- a/src/tests/tools/mock_nce_fan_ctrl/startup.json +++ b/src/tests/tools/mock_nce_fan_ctrl/startup.json @@ -29,8 +29,8 @@ ] }, { - "node-id": "OLT", "ietf-te-topology:te-node-id": "172.16.58.10", - "ietf-te-topology:te": {"te-node-attributes": {"name": "OLT", "admin-status": "up"}, "oper-status": "up"}, + "node-id": "OLT-A", "ietf-te-topology:te-node-id": "172.16.58.10", + "ietf-te-topology:te": {"te-node-attributes": {"name": "OLT-A", "admin-status": "up"}, "oper-status": "up"}, "ietf-network-topology:termination-point": [ {"tp-id": "500", "ietf-te-topology:te": {"name": "500"}, "ietf-te-topology:te-tp-id": "0.0.0.0"}, {"tp-id": "501", "ietf-te-topology:te": {"name": "501"}, "ietf-te-topology:te-tp-id": "0.0.0.0"}, @@ -40,8 +40,8 @@ } ], "ietf-network-topology:link": [ - {"link-id": "L1", "source": {"source-node": "ONT1", "source-tp": "500"}, "destination": {"dest-node": "OLT", "dest-tp": "200"}}, - {"link-id": "L2", "source": {"source-node": "ONT2", "source-tp": "500"}, "destination": {"dest-node": "OLT", "dest-tp": "201"}} + {"link-id": "L1", "source": {"source-node": "ONT1", "source-tp": "500"}, "destination": {"dest-node": "OLT-A", "dest-tp": "200"}}, + {"link-id": "L2", "source": {"source-node": "ONT2", "source-tp": "500"}, "destination": {"dest-node": "OLT-A", "dest-tp": "201"}} ] } ] -- GitLab From 309dac54502dc03050df71a369c3d25c931b19fb Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 4 Sep 2025 15:51:19 +0000 Subject: [PATCH 136/367] Tests - Tools - Mock NCE-FAN Controller - Corrected stratup config --- src/tests/tools/mock_nce_fan_ctrl/startup.json | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/tests/tools/mock_nce_fan_ctrl/startup.json b/src/tests/tools/mock_nce_fan_ctrl/startup.json index ba6ec6f46..5c70a2da7 100644 --- a/src/tests/tools/mock_nce_fan_ctrl/startup.json +++ b/src/tests/tools/mock_nce_fan_ctrl/startup.json @@ -29,8 +29,8 @@ ] }, { - "node-id": "OLT-A", "ietf-te-topology:te-node-id": "172.16.58.10", - "ietf-te-topology:te": {"te-node-attributes": {"name": "OLT-A", "admin-status": "up"}, "oper-status": "up"}, + "node-id": "OLT", "ietf-te-topology:te-node-id": "172.16.58.10", + "ietf-te-topology:te": {"te-node-attributes": {"name": "OLT", "admin-status": "up"}, "oper-status": "up"}, "ietf-network-topology:termination-point": [ {"tp-id": "500", "ietf-te-topology:te": {"name": "500"}, "ietf-te-topology:te-tp-id": "0.0.0.0"}, {"tp-id": "501", "ietf-te-topology:te": {"name": "501"}, "ietf-te-topology:te-tp-id": "0.0.0.0"}, @@ -40,8 +40,8 @@ } ], "ietf-network-topology:link": [ - {"link-id": "L1", "source": {"source-node": "ONT1", "source-tp": "500"}, "destination": {"dest-node": "OLT-A", "dest-tp": "200"}}, - {"link-id": "L2", "source": {"source-node": "ONT2", "source-tp": "500"}, "destination": {"dest-node": "OLT-A", "dest-tp": "201"}} + {"link-id": "L1", "source": {"source-node": "ONT1", "source-tp": "500"}, "destination": {"dest-node": "OLT", "dest-tp": "200"}}, + {"link-id": "L2", "source": {"source-node": "ONT2", "source-tp": "500"}, "destination": {"dest-node": "OLT", "dest-tp": "201"}} ] } ] -- GitLab From 26e0d78390f12afe23fba0e5a89c3f1d09a03ed6 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 4 Sep 2025 15:51:28 +0000 Subject: [PATCH 137/367] ECOC F5GA Telemetry Demo: - Corrected E2E/Agg topology descriptor --- .../data/topology/topology-agg.json | 21 ------------------- .../data/topology/topology-e2e.json | 11 +++++++--- 2 files changed, 8 insertions(+), 24 deletions(-) diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json index 1bdcc2643..58fe5010c 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json @@ -26,17 +26,6 @@ "timeout": 120, "verify_certs": false, "import_topology": "topology" }}} ]}}, - {"device_id": {"device_uuid": {"uuid": "OLT-T"}}, "device_type": "emu-packet-router", - "device_drivers": ["DEVICEDRIVER_UNDEFINED"], - "device_config": {"config_rules": [ - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.16.58.10"}}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ - {"uuid": "lo", "name": "lo", "type": "loopback"}, - {"uuid": "500", "name": "500", "type": "copper"}, - {"uuid": "501", "name": "501", "type": "copper"} - ]}}} - ]}}, {"device_id": {"device_uuid": {"uuid": "POP1"}}, "device_type": "emu-packet-router", "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ @@ -63,16 +52,6 @@ ]}} ], "links": [ - {"link_id": {"link_uuid": {"uuid": "L3"}}, "link_type" : "LINKTYPE_COPPER", - "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "OLT-T"}}, "endpoint_uuid": {"uuid": "501"}}, - {"device_id": {"device_uuid": {"uuid": "P-PE1"}}, "endpoint_uuid": {"uuid": "200"}} - ]}, - {"link_id": {"link_uuid": {"uuid": "L4"}}, "link_type" : "LINKTYPE_COPPER", - "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "OLT" }}, "endpoint_uuid": {"uuid": "500"}}, - {"device_id": {"device_uuid": {"uuid": "O-PE1"}}, "endpoint_uuid": {"uuid": "200"}} - ]}, {"link_id": {"link_uuid": {"uuid": "L13"}}, "link_type" : "LINKTYPE_COPPER", "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ {"device_id": {"device_uuid": {"uuid": "P-PE2"}}, "endpoint_uuid": {"uuid": "200"}}, diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-e2e.json b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-e2e.json index ec9fb580d..a1dddff25 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-e2e.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-e2e.json @@ -28,10 +28,15 @@ ]}} ], "links": [ - {"link_id": {"link_uuid": {"uuid": "OLT-INT"}}, "link_type" : "LINKTYPE_COPPER", + {"link_id": {"link_uuid": {"uuid": "L3"}}, "link_type" : "LINKTYPE_COPPER", "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "OLT-T"}}, "endpoint_uuid": {"uuid": "lo"}}, - {"device_id": {"device_uuid": {"uuid": "OLT-A"}}, "endpoint_uuid": {"uuid": "lo"}} + {"device_id": {"device_uuid": {"uuid": "OLT" }}, "endpoint_uuid": {"uuid": "501"}}, + {"device_id": {"device_uuid": {"uuid": "P-PE1"}}, "endpoint_uuid": {"uuid": "200"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "L4"}}, "link_type" : "LINKTYPE_COPPER", + "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "OLT" }}, "endpoint_uuid": {"uuid": "500"}}, + {"device_id": {"device_uuid": {"uuid": "O-PE1"}}, "endpoint_uuid": {"uuid": "200"}} ]} ] } -- GitLab From 96eb65df5712b7477b1c351ca9f810a142256815 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 4 Sep 2025 16:12:14 +0000 Subject: [PATCH 138/367] Slice component: - Fixed propagation of name from slice to service --- src/slice/service/SliceServiceServicerImpl.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/slice/service/SliceServiceServicerImpl.py b/src/slice/service/SliceServiceServicerImpl.py index e4f9ebce8..aa87ef953 100644 --- a/src/slice/service/SliceServiceServicerImpl.py +++ b/src/slice/service/SliceServiceServicerImpl.py @@ -104,6 +104,7 @@ class SliceServiceServicerImpl(SliceServiceServicer): # pylint: disable=no-member service_request = Service() service_request.service_id.CopyFrom(service_id) + service_request.name = slice_with_uuids.name service_request.service_type = ServiceTypeEnum.SERVICETYPE_UNKNOWN service_request.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PLANNED service_client.CreateService(service_request) -- GitLab From be04d181e86c59366261d3278f59ee43991de5c1 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 4 Sep 2025 18:39:55 +0000 Subject: [PATCH 139/367] Service component - L3NM IETF Slice: - Reimplemented simplified version - Moved old version to old folder --- .../l3nm_ietfslice/DataStoreDelta.py | 48 + .../L3NM_IETFSlice_ServiceHandler.py | 953 +++--------------- .../service_handlers/l3nm_ietfslice/Tools.py | 63 ++ .../l3nm_ietfslice/{ => old}/ConfigRules.py | 0 .../old/L3NM_IETFSlice_ServiceHandler.py | 943 +++++++++++++++++ 5 files changed, 1168 insertions(+), 839 deletions(-) create mode 100644 src/service/service/service_handlers/l3nm_ietfslice/DataStoreDelta.py create mode 100644 src/service/service/service_handlers/l3nm_ietfslice/Tools.py rename src/service/service/service_handlers/l3nm_ietfslice/{ => old}/ConfigRules.py (100%) create mode 100644 src/service/service/service_handlers/l3nm_ietfslice/old/L3NM_IETFSlice_ServiceHandler.py diff --git a/src/service/service/service_handlers/l3nm_ietfslice/DataStoreDelta.py b/src/service/service/service_handlers/l3nm_ietfslice/DataStoreDelta.py new file mode 100644 index 000000000..725de5c25 --- /dev/null +++ b/src/service/service/service_handlers/l3nm_ietfslice/DataStoreDelta.py @@ -0,0 +1,48 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import json +from deepdiff import DeepDiff +from typing import Dict, Optional +from common.proto.context_pb2 import Service + + +RUNNING_RESOURCE_KEY = 'running_ietf_slice' +CANDIDATE_RESOURCE_KEY = 'candidate_ietf_slice' + + +class DataStoreDelta: + def __init__(self, service : Service): + self._service = service + self._service_config = service.service_config + self._candidate_data = self._get_datastore_data(CANDIDATE_RESOURCE_KEY) + self._running_data = self._get_datastore_data(RUNNING_RESOURCE_KEY ) + + def _get_datastore_data(self, resource_key : str) -> Optional[Dict]: + for cr in self._service_config.config_rules: + if cr.WhichOneof('config_rule') != 'custom': continue + if cr.custom.resource_key != resource_key: continue + resource_value = json.loads(cr.custom.resource_value) + return resource_value.get('network-slice-services', dict()).get('slice-service') + return None + + @property + def candidate_data(self): return self._candidate_data + + @property + def running_data(self): return self._running_data + + def get_diff(self) -> Dict: + return DeepDiff(self._running_data, self._candidate_data) diff --git a/src/service/service/service_handlers/l3nm_ietfslice/L3NM_IETFSlice_ServiceHandler.py b/src/service/service/service_handlers/l3nm_ietfslice/L3NM_IETFSlice_ServiceHandler.py index 161b520e2..e7dff7043 100644 --- a/src/service/service/service_handlers/l3nm_ietfslice/L3NM_IETFSlice_ServiceHandler.py +++ b/src/service/service/service_handlers/l3nm_ietfslice/L3NM_IETFSlice_ServiceHandler.py @@ -12,372 +12,26 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json -import logging -import re -from typing import Any, Dict, List, Optional, Tuple, TypedDict, Union - -from deepdiff import DeepDiff -from dataclasses import dataclass +import ipaddress, json, logging +from typing import Any, List, Optional, Tuple, Union from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method -from common.proto.context_pb2 import ConfigRule, DeviceId, Empty, Service, ServiceConfig +from common.proto.context_pb2 import ConfigRule, DeviceId, Service +from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set from common.tools.object_factory.Device import json_device_id from common.type_checkers.Checkers import chk_type -from context.client.ContextClient import ContextClient from service.service.service_handler_api._ServiceHandler import _ServiceHandler from service.service.service_handler_api.SettingsHandler import SettingsHandler -from service.service.service_handler_api.Tools import ( - get_device_endpoint_uuids, -) +from service.service.service_handler_api.Tools import get_device_endpoint_uuids from service.service.task_scheduler.TaskExecutor import TaskExecutor +from .DataStoreDelta import DataStoreDelta +from .Tools import get_device_endpoint_name -from .ConfigRules import ( - get_link_ep_device_names, - setup_config_rules, - teardown_config_rules, -) - -RUNNING_RESOURCE_KEY = "running_ietf_slice" -CANDIDATE_RESOURCE_KEY = "candidate_ietf_slice" - -SDP_DIFF_RE = re.compile( - r"^root\[\'network-slice-services\'\]\[\'slice-service\'\]\[0\]\[\'sdps\'\]\[\'sdp\'\]\[(\d)\]$" -) -CONNECTION_GROUP_DIFF_RE = re.compile( - r"^root\[\'network-slice-services\'\]\[\'slice-service\'\]\[0\]\[\'connection-groups\'\]\[\'connection-group\'\]\[(\d)\]$" -) -MATCH_CRITERION_DIFF_RE = re.compile( - r"^root\[\'network-slice-services\'\]\[\'slice-service\'\]\[0\]\[\'sdps\'\]\[\'sdp\'\]\[(\d)\]\[\'service-match-criteria\'\]\[\'match-criterion\'\]\[(\d)\]$" -) - -RE_GET_ENDPOINT_FROM_INTERFACE = re.compile(r"^\/interface\[([^\]]+)\].*") LOGGER = logging.getLogger(__name__) -METRICS_POOL = MetricsPool( - "Service", "Handler", labels={"handler": "l3slice_ietfslice"} -) - - -RAISE_IF_DIFFERS = True - - -class Ipv4Info(TypedDict): - src_lan: str - dst_lan: str - src_port: str - dst_port: str - vlan: str - - -class DeviceEpInfo(TypedDict): - ipv4_info: Ipv4Info - node_name: str - endpoint_name: str - one_way_delay: int - one_way_bandwidth: int - one_way_packet_loss: float - - -@dataclass -class ConnectivityConstructInfo: - bandwidth: int = 0 - delay: int = 0 - packet_loss: float = 0.0 - - -def get_custom_config_rule( - service_config: ServiceConfig, resource_key: str -) -> Optional[ConfigRule]: - """ - Returns the ConfigRule from service_config matching the provided resource_key - if found, otherwise returns None. - """ - for cr in service_config.config_rules: - if ( - cr.WhichOneof("config_rule") == "custom" - and cr.custom.resource_key == resource_key - ): - return cr - return None - - -def get_running_candidate_ietf_slice_data_diff(service_config: ServiceConfig) -> Dict: - """ - Loads the JSON from the running/candidate resource ConfigRules and returns - their DeepDiff comparison. - """ - running_cr = get_custom_config_rule(service_config, RUNNING_RESOURCE_KEY) - candidate_cr = get_custom_config_rule(service_config, CANDIDATE_RESOURCE_KEY) - running_value_dict = json.loads(running_cr.custom.resource_value) - candidate_value_dict = json.loads(candidate_cr.custom.resource_value) - return DeepDiff(running_value_dict, candidate_value_dict) - - -def extract_match_criterion_ipv4_info(match_criterion: Dict) -> Ipv4Info: - """ - Extracts IPv4 info from the match criterion dictionary. - """ - src_lan = dst_lan = src_port = dst_port = vlan = "" - for type_value in match_criterion["match-type"]: - m_type = type_value["type"] - val = type_value["value"][0] - if m_type == "ietf-network-slice-service:source-ip-prefix": - src_lan = val - elif m_type == "ietf-network-slice-service:destination-ip-prefix": - dst_lan = val - elif m_type == "ietf-network-slice-service:source-tcp-port": - src_port = val - elif m_type == "ietf-network-slice-service:destination-tcp-port": - dst_port = val - elif m_type == "ietf-network-slice-service:vlan": - vlan = val - return Ipv4Info( - src_lan=src_lan, - dst_lan=dst_lan, - src_port=src_port, - dst_port=dst_port, - vlan=vlan, - ) - - -def get_removed_items( - candidate_ietf_slice_dict: dict, running_ietf_slice_dict: dict -) -> dict: - """ - For the 'iterable_item_removed' scenario, returns dict with removed sdp / connection_group / match_criterion info. - Raises an exception if there's inconsistent data or multiple items removed (which is not supported). - """ - removed_items = { - "sdp": {"sdp_idx": None, "value": {}}, - "connection_group": {"connection_group_idx": None, "value": {}}, - "match_criterion": { - "sdp_idx": None, - "match_criterion_idx": None, - "value": {}, - }, - } - - running_slice_services = running_ietf_slice_dict["network-slice-services"][ - "slice-service" - ][0] - candidate_slice_services = candidate_ietf_slice_dict["network-slice-services"][ - "slice-service" - ][0] - - running_slice_sdps = [sdp["id"] for sdp in running_slice_services["sdps"]["sdp"]] - candidiate_slice_sdps = [ - sdp["id"] for sdp in candidate_slice_services["sdps"]["sdp"] - ] - removed_sdps = set(running_slice_sdps) - set(candidiate_slice_sdps) - - if len(removed_sdps) > 1: - raise Exception("Multiple SDPs removed - not supported.") - removed_sdp_id = removed_sdps.pop() - - removed_items["sdp"]["sdp_idx"] = running_slice_sdps.index(removed_sdp_id) - removed_items["sdp"]["value"] = next( - sdp - for sdp in running_slice_services["sdps"]["sdp"] - if sdp["id"] == removed_sdp_id - ) - - match_criteria = removed_items["sdp"]["value"]["service-match-criteria"][ - "match-criterion" - ] - if len(match_criteria) > 1: - raise Exception("Multiple match criteria found - not supported") - match_criterion = match_criteria[0] - connection_grp_id = match_criterion["target-connection-group-id"] - connection_groups = running_slice_services["connection-groups"]["connection-group"] - connection_group = next( - (idx, cg) - for idx, cg in enumerate(connection_groups) - if cg["id"] == connection_grp_id - ) - removed_items["connection_group"]["connection_group_idx"] = connection_group[0] - removed_items["connection_group"]["value"] = connection_group[1] - - for sdp in running_slice_services["sdps"]["sdp"]: - if sdp["id"] == removed_sdp_id: - continue - for mc in sdp["service-match-criteria"]["match-criterion"]: - if mc["target-connection-group-id"] == connection_grp_id: - removed_items["match_criterion"]["sdp_idx"] = running_slice_sdps.index( - sdp["id"] - ) - removed_items["match_criterion"]["match_criterion_idx"] = sdp[ - "service-match-criteria" - ]["match-criterion"].index(mc) - removed_items["match_criterion"]["value"] = mc - break - - if ( - removed_items["match_criterion"]["sdp_idx"] is None - or removed_items["sdp"]["sdp_idx"] is None - or removed_items["connection_group"]["connection_group_idx"] is None - ): - raise Exception("sdp, connection group or match criterion not found") - return removed_items - - -def gather_connectivity_construct_info( - candidate_connection_groups: List[Dict], -) -> Dict[Tuple[str, str], ConnectivityConstructInfo]: - """ - Creates a dict mapping (sender_sdp, receiver_sdp) -> ConnectivityConstructInfo - from the given list of candidate connection groups. - """ - cc_info: Dict[Tuple[str, str], ConnectivityConstructInfo] = {} - for cg in candidate_connection_groups: - for cc in cg["connectivity-construct"]: - cc_sender = cc["p2p-sender-sdp"] - cc_receiver = cc["p2p-receiver-sdp"] - cc_key = (cc_sender, cc_receiver) - cc_info[cc_key] = ConnectivityConstructInfo() - for metric_bound in cc["service-slo-sle-policy"]["slo-policy"][ - "metric-bound" - ]: - if ( - metric_bound["metric-type"] - == "ietf-network-slice-service:one-way-delay-maximum" - and metric_bound["metric-unit"] == "milliseconds" - ): - cc_info[cc_key].delay = int(metric_bound["bound"]) - elif ( - metric_bound["metric-type"] - == "ietf-network-slice-service:two-way-packet-loss" - and metric_bound["metric-unit"] == "percentage" - ): - cc_info[cc_key].packet_loss = float( - metric_bound["percentile-value"] - ) - elif ( - metric_bound["metric-type"] - == "ietf-network-slice-service:one-way-bandwidth" - and metric_bound["metric-unit"] == "Mbps" - ): - cc_info[cc_key].bandwidth = int(metric_bound["bound"]) - return cc_info - - -def extract_source_destination_device_endpoint_info( - device_ep_pairs: list, connection_group: Dict, candidate_connection_groups: List -) -> Tuple[DeviceEpInfo, DeviceEpInfo]: - """ - Given device_ep_pairs, the relevant connection_group data, and all candidate - connection groups, figure out the final DeviceEpInfo for source and destination. - This includes computing the combined bandwidth, min delay, etc. - """ - connectivity_construct = connection_group["connectivity-construct"][0] - sender_sdp = connectivity_construct["p2p-sender-sdp"] - receiver_sdp = connectivity_construct["p2p-receiver-sdp"] - - # If the first pair is not the sender, we invert them - if sender_sdp == device_ep_pairs[0][4]: - ... - elif sender_sdp == device_ep_pairs[1][4]: - device_ep_pairs = device_ep_pairs[::-1] - else: - raise Exception("Sender SDP not found in device_ep_pairs") - - # Gather info from candidate connection groups - cc_info = gather_connectivity_construct_info(candidate_connection_groups) - - source_delay = int(1e6) - source_bandwidth = 0 - source_packet_loss = 1.0 - destination_delay = int(1e6) - destination_bandwidth = 0 - destination_packet_loss = 1.0 - - if cc_info: - common_sdps = set.intersection(*[set(key) for key in cc_info.keys()]) - if len(cc_info) > 2 and len(common_sdps) != 1: - raise Exception( - "There should be one common sdp in all connectivity constructs, otherwise, it is not supported" - ) - if len(common_sdps) == 1: - common_sdp = common_sdps.pop() - elif len(common_sdps) == 2: - # Fallback if intersection is 2 => pick sender_sdp - common_sdp = sender_sdp - else: - raise Exception("Invalid number of common sdps") - - for (sender, receiver), metrics in cc_info.items(): - cc_bandwidth = metrics.bandwidth - cc_max_delay = metrics.delay - cc_packet_loss = metrics.packet_loss - if sender == common_sdp: - source_bandwidth += cc_bandwidth - if cc_max_delay < source_delay: - source_delay = cc_max_delay - if cc_packet_loss < source_packet_loss: - source_packet_loss = cc_packet_loss - else: - destination_bandwidth += cc_bandwidth - if cc_max_delay < destination_delay: - destination_delay = cc_max_delay - if cc_packet_loss < destination_packet_loss: - destination_packet_loss = cc_packet_loss - - source_device_ep_info = DeviceEpInfo( - ipv4_info=device_ep_pairs[0][5], - node_name=device_ep_pairs[0][2], - endpoint_name=device_ep_pairs[0][3], - one_way_delay=source_delay, - one_way_bandwidth=source_bandwidth, - one_way_packet_loss=source_packet_loss, - ) - destination_device_ep_info = DeviceEpInfo( - ipv4_info=device_ep_pairs[1][5], - node_name=device_ep_pairs[1][2], - endpoint_name=device_ep_pairs[1][3], - one_way_delay=destination_delay, - one_way_bandwidth=destination_bandwidth, - one_way_packet_loss=destination_packet_loss, - ) - - return source_device_ep_info, destination_device_ep_info - - -def _parse_item_added(diff: Dict) -> dict: - """ - Helper to parse 'iterable_item_added' from the running_candidate_diff - and return the relevant items for sdp, connection_group, match_criterion, etc. - """ - added_items = { - "sdp": {"sdp_idx": None, "value": {}}, - "connection_group": {"connection_group_idx": None, "value": {}}, - "match_criterion": { - "sdp_idx": None, - "match_criterion_idx": None, - "value": {}, - }, - } - for added_key, added_value in diff["iterable_item_added"].items(): - sdp_match = SDP_DIFF_RE.match(added_key) - connection_group_match = CONNECTION_GROUP_DIFF_RE.match(added_key) - match_criterion_match = MATCH_CRITERION_DIFF_RE.match(added_key) - if sdp_match: - added_items["sdp"] = { - "sdp_idx": int(sdp_match.groups()[0]), - "value": added_value, - } - elif connection_group_match: - added_items["connection_group"] = { - "connection_group_idx": int(connection_group_match.groups()[0]), - "value": added_value, - } - elif match_criterion_match: - added_items["match_criterion"] = { - "sdp_idx": int(match_criterion_match.groups()[0]), - "match_criterion_idx": int(match_criterion_match.groups()[1]), - "value": added_value, - } - return added_items + +METRICS_POOL = MetricsPool('Service', 'Handler', labels={'handler': 'l3nm_ietfslice'}) class L3NM_IETFSlice_ServiceHandler(_ServiceHandler): @@ -394,475 +48,108 @@ class L3NM_IETFSlice_ServiceHandler(_ServiceHandler): endpoints: List[Tuple[str, str, Optional[str]]], connection_uuid: Optional[str] = None, ) -> List[Union[bool, Exception]]: - chk_type("endpoints", endpoints, list) - if len(endpoints) == 0: - return [] + chk_type('endpoints', endpoints, list) + if len(endpoints) == 0: return [] results = [] try: - service_config = self.__service.service_config - # 1. Identify source and destination devices src_device_uuid, src_endpoint_uuid = get_device_endpoint_uuids(endpoints[0]) - src_device_obj = self.__task_executor.get_device( - DeviceId(**json_device_id(src_device_uuid)) - ) + src_device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(src_device_uuid))) src_device_name = src_device_obj.name + src_endpoint_name = get_device_endpoint_name(src_device_obj, src_endpoint_uuid) src_controller = self.__task_executor.get_device_controller(src_device_obj) - dst_device_uuid, dst_endpoint_uuid = get_device_endpoint_uuids( - endpoints[-1] - ) - dst_device_obj = self.__task_executor.get_device( - DeviceId(**json_device_id(dst_device_uuid)) - ) + dst_device_uuid, dst_endpoint_uuid = get_device_endpoint_uuids(endpoints[-1]) + dst_device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(dst_device_uuid))) dst_device_name = dst_device_obj.name + dst_endpoint_name = get_device_endpoint_name(dst_device_obj, dst_endpoint_uuid) dst_controller = self.__task_executor.get_device_controller(dst_device_obj) - if ( - src_controller.device_id.device_uuid.uuid - != dst_controller.device_id.device_uuid.uuid - ): - raise Exception("Different Src-Dst devices not supported by now") + # 2. Identify controller to be used + if src_controller.device_id.device_uuid.uuid != dst_controller.device_id.device_uuid.uuid: + raise Exception('Different Src-Dst devices not supported by now') controller = src_controller # same device controller - # 2. Determine how the candidate & running resources differ - running_candidate_diff = get_running_candidate_ietf_slice_data_diff( - service_config - ) - candidate_ietf_slice_cr = get_custom_config_rule( - service_config, CANDIDATE_RESOURCE_KEY - ) - candidate_resource_value_dict = json.loads( - candidate_ietf_slice_cr.custom.resource_value - ) - running_ietf_slice_cr = get_custom_config_rule( - service_config, RUNNING_RESOURCE_KEY - ) - running_resource_value_dict = json.loads( - running_ietf_slice_cr.custom.resource_value - ) - slice_name = running_resource_value_dict["network-slice-services"][ - "slice-service" - ][0]["id"] - - # 3. Retrieve the context links for matching endpoints - context_client = ContextClient() - links = context_client.ListLinks(Empty()).links - - device_ep_pairs = [] - sdp_ids = [] - target_connection_group_id = None - operation_type = "update" # default fallback - - # 4. Handle creation vs additions vs removals - if not running_candidate_diff: # Slice Creation - # 4a. New Slice Creation - operation_type = "create" - - candidate_slice_service = candidate_resource_value_dict[ - "network-slice-services" - ]["slice-service"][0] - full_connection_groups = candidate_slice_service["connection-groups"][ - "connection-group" - ] - sdps = candidate_slice_service["sdps"]["sdp"] - sdp_ids = [sdp["node-id"] for sdp in sdps] - - # figure out which device is connected to which link - edge_device_names = [src_device_name, dst_device_name] - for sdp in sdps: - node_id = sdp["node-id"] - for link in links: - info = get_link_ep_device_names(link, context_client) - dev1, ep1, _, dev2, ep2, _ = info - if dev1 == node_id and dev2 in edge_device_names: - edge_device_names.remove(dev2) - match_criteria = sdp["service-match-criteria"][ - "match-criterion" - ] - if len(match_criteria) != 1: - raise Exception( - "Only one match criteria allowed for initial slice creation" - ) - match_criterion = match_criteria[0] - ipv4_info = extract_match_criterion_ipv4_info( - match_criterion - ) - device_ep_pairs.append( - ( - node_id, - ep1, - dev2, - ep2, - sdp["id"], - ipv4_info, - ) - ) - target_connection_group_id = match_criterion[ - "target-connection-group-id" - ] - sdp_ids.remove(node_id) - break - - # find the second link - if not edge_device_names: - raise Exception("Edge device names exhausted unexpectedly.") - - # second link logic - for link in links: - info = get_link_ep_device_names(link, context_client) - dev1, ep1, device_obj_1, dev2, ep2, device_obj_2 = info - if ( - dev1 == edge_device_names[0] - and device_obj_2.controller_id != device_obj_1.controller_id - ): - for sdp in sdps: - if sdp["node-id"] != sdp_ids[0]: - continue - match_criteria = sdp["service-match-criteria"][ - "match-criterion" - ] - if len(match_criteria) != 1: - raise Exception( - "Only one match criteria allowed for initial slice creation" - ) - match_criterion = match_criteria[0] - ipv4_info = extract_match_criterion_ipv4_info( - match_criterion - ) - device_ep_pairs.append( - ( - dev2, - ep2, - dev1, - ep1, - sdp["id"], - ipv4_info, - ) - ) - break - else: - raise Exception("No matching sdp found for second link.") - break - else: - raise Exception("sdp between the domains not found") - - elif "iterable_item_added" in running_candidate_diff: # new SDP added - # 4b. A new SDP was added - operation_type = "update" - - candidate_slice_service = candidate_resource_value_dict[ - "network-slice-services" - ]["slice-service"][0] - sdps = candidate_slice_service["sdps"]["sdp"] - full_connection_groups = candidate_slice_service["connection-groups"][ - "connection-group" - ] - added_items = _parse_item_added(running_candidate_diff) - - new_sdp = sdps[added_items["sdp"]["sdp_idx"]] - src_sdp_name = new_sdp["node-id"] - dst_sdp_idx = sdps[added_items["match_criterion"]["sdp_idx"]]["id"] - dst_sdp_name = sdps[added_items["match_criterion"]["sdp_idx"]][ - "node-id" - ] - for link in links: - info = get_link_ep_device_names(link, context_client) - dev1, ep1, device_obj_1, dev2, ep2, device_obj_2 = info - if ( - dev1 == src_sdp_name - and device_obj_2.controller_id != device_obj_1.controller_id - ): - for sdp in sdps: - if sdp["node-id"] != src_sdp_name: - continue - match_criteria = sdp["service-match-criteria"][ - "match-criterion" - ] - if len(match_criteria) != 1: - raise Exception( - "Only one match criteria allowed for initial slice creation" - ) - match_criterion = match_criteria[0] - ipv4_info = extract_match_criterion_ipv4_info( - match_criterion - ) - device_ep_pairs.append( - ( - dev2, - ep2, - dev1, - ep1, - sdp["id"], - ipv4_info, - ) - ) - target_connection_group_id = match_criterion[ - "target-connection-group-id" - ] - break - else: - raise Exception("sdp between the domains not found") - for link in links: - info = get_link_ep_device_names(link, context_client) - dev1, ep1, device_obj_1, dev2, ep2, device_obj_2 = info - if ( - dev1 == dst_sdp_name - and device_obj_2.controller_id != device_obj_1.controller_id - ): - for sdp in sdps: - if sdp["node-id"] != dst_sdp_name: - continue - match_criteria = sdp["service-match-criteria"][ - "match-criterion" - ] - vlan_id = set() - for match in match_criteria: - for type_value in match["match-type"]: - if ( - type_value["type"] - == "ietf-network-slice-service:vlan" - ): - vlan_id.add(type_value["value"][0]) - if len(vlan_id) != 1: - raise Exception( - "one vlan id found in SDP match criteria" - ) - match_criterion = match_criteria[ - added_items["match_criterion"]["match_criterion_idx"] - ] - ipv4_info = extract_match_criterion_ipv4_info( - match_criterion - ) - device_ep_pairs.append( - ( - dev2, - ep2, - dev1, - ep1, - sdp["id"], - ipv4_info, - ) - ) - break - else: - raise Exception("sdp between the domains not found") - elif "iterable_item_removed" in running_candidate_diff: # an SDP removed - # 4c. An existing SDP was removed - operation_type = "update" - - slice_services = running_resource_value_dict["network-slice-services"][ - "slice-service" - ] - candidate_slice_services = candidate_resource_value_dict[ - "network-slice-services" - ]["slice-service"] - candidate_slice_service = candidate_slice_services[0] - slice_service = slice_services[0] - full_connection_groups = slice_service["connection-groups"][ - "connection-group" - ] - sdps = slice_service["sdps"]["sdp"] - removed_items = get_removed_items( - candidate_resource_value_dict, running_resource_value_dict - ) - new_sdp = sdps[removed_items["sdp"]["sdp_idx"]] - src_sdp_name = new_sdp["node-id"] - dst_sdp_idx = sdps[removed_items["match_criterion"]["sdp_idx"]]["id"] - dst_sdp_name = sdps[removed_items["match_criterion"]["sdp_idx"]][ - "node-id" - ] - for link in links: - ( - device_obj_name_1, - ep_name_1, - device_obj_1, - device_obj_name_2, - ep_name_2, - device_obj_2, - ) = get_link_ep_device_names(link, context_client) - if ( - device_obj_name_1 == src_sdp_name - and device_obj_2.controller_id != device_obj_1.controller_id - ): - for sdp in sdps: - if sdp["node-id"] != src_sdp_name: - continue - match_criteria = sdp["service-match-criteria"][ - "match-criterion" - ] - if len(match_criteria) != 1: - raise Exception( - "Only one match criteria allowed for new SDP addition" - ) - match_criterion = match_criteria[0] - ipv4_info = extract_match_criterion_ipv4_info( - match_criterion - ) - device_ep_pairs.append( - ( - device_obj_name_2, - ep_name_2, - device_obj_name_1, - ep_name_1, - sdp["id"], - ipv4_info, - ) - ) - target_connection_group_id = match_criterion[ - "target-connection-group-id" - ] - break - else: - raise Exception("sdp between the domains not found") - for link in links: - ( - device_obj_name_1, - ep_name_1, - device_obj_1, - device_obj_name_2, - ep_name_2, - device_obj_2, - ) = get_link_ep_device_names(link, context_client) - if ( - device_obj_name_1 == dst_sdp_name - and device_obj_2.controller_id != device_obj_1.controller_id - ): - for sdp in sdps: - if sdp["node-id"] != dst_sdp_name: - continue - match_criteria = sdp["service-match-criteria"][ - "match-criterion" - ] - vlan_id = set() - for match in match_criteria: - for type_value in match["match-type"]: - if ( - type_value["type"] - == "ietf-network-slice-service:vlan" - ): - vlan_id.add(type_value["value"][0]) - if len(vlan_id) != 1: - raise Exception( - "one vlan id found in SDP match criteria" - ) - match_criterion = match_criteria[ - removed_items["match_criterion"]["match_criterion_idx"] - ] - ipv4_info = extract_match_criterion_ipv4_info( - match_criterion - ) - device_ep_pairs.append( - ( - device_obj_name_2, - ep_name_2, - device_obj_name_1, - ep_name_1, - sdp["id"], - ipv4_info, - ) - ) - break - else: - raise Exception("sdp between the domains not found") + + # 3. Load DataStore configuration + datastore_delta = DataStoreDelta(self.__service) + candidate_slice = datastore_delta.candidate_data + + + # 4. Extract data from SDP matching src/dst node + candidate_sdps = candidate_slice.get('sdps', dict()).get('sdp', list()) + if len(candidate_sdps) != 2: + MSG = 'Unsupported number of SDPs[{:d}]({:s})' + raise Exception(MSG.format(len(candidate_sdps), str(candidate_sdps))) + + sdp_dict = {sdp['node-id'] : sdp for sdp in candidate_sdps} + if src_device_name in sdp_dict and dst_device_name not in sdp_dict: + src_device_sdp = sdp_dict.get(src_device_name) + + other_device_names = set(sdp_dict.keys()) + other_device_names.remove(src_device_name) + unneeded_sdp_id = other_device_names.pop() + + dst_device_sdp = sdp_dict.get(unneeded_sdp_id) + dst_device_sdp['node-id'] = dst_device_name + + try: + dst_mgmt_ip_address = str(ipaddress.ip_address(dst_device_name)) + except ValueError: + dst_mgmt_ip_address = '0.0.0.0' + dst_device_sdp['sdp-ip-address'] = dst_mgmt_ip_address + + dst_ac = dst_device_sdp['attachment-circuits']['attachment-circuit'][0] + dst_ac['id'] = 'AC {:s}'.format(str(dst_device_name)) + dst_ac['description'] = 'AC {:s}'.format(str(dst_device_name)) + dst_ac['ac-node-id'] = dst_device_name + dst_ac['ac-tp-id'] = dst_endpoint_name + + elif dst_device_name in sdp_dict and src_device_name not in sdp_dict: + dst_device_sdp = sdp_dict.get(dst_device_name) + + other_device_names = set(sdp_dict.keys()) + other_device_names.remove(dst_device_name) + unneeded_sdp_id = other_device_names.pop() + + src_device_sdp = sdp_dict.get(unneeded_sdp_id) + src_device_sdp['node-id'] = src_device_name + + try: + src_mgmt_ip_address = str(ipaddress.ip_address(src_device_name)) + except ValueError: + src_mgmt_ip_address = '0.0.0.0' + src_device_sdp['sdp-ip-address'] = src_mgmt_ip_address + + src_ac = src_device_sdp['attachment-circuits']['attachment-circuit'][0] + src_ac['id'] = 'AC {:s}'.format(str(src_device_name)) + src_ac['description'] = 'AC {:s}'.format(str(src_device_name)) + src_ac['ac-node-id'] = src_device_name + src_ac['ac-tp-id'] = src_endpoint_name + else: - raise Exception( - "transition from candidate to running info not supported" - ) - - candidate_connection_groups = candidate_slice_service["connection-groups"][ - "connection-group" - ] - - if ( - len( - candidate_resource_value_dict["network-slice-services"][ - "slice-service" - ][0]["connection-groups"]["connection-group"] - ) - == 0 - ): - # 5. If connection_groups is now empty => operation = delete - operation_type = "delete" - - # 6. Retrieve actual target connection_group from the full connection groups - if not target_connection_group_id: - raise Exception("No target_connection_group_id found.") - target_connection_group = next( - cg - for cg in full_connection_groups - if cg["id"] == target_connection_group_id - ) + MSG = 'Unsupported case: sdp_dict={:s} src_device_name={:s} dst_device_name={:s}' + raise Exception(MSG.format(str(sdp_dict), str(src_device_name), str(dst_device_name))) - # 7. Build source/destination device info - source_device_ep_info, destination_device_ep_info = ( - extract_source_destination_device_endpoint_info( - device_ep_pairs, - target_connection_group, - candidate_connection_groups, - ) - ) - resource_value_dict = { - "uuid": slice_name, - "operation_type": operation_type, - "src_node_id": source_device_ep_info["node_name"], - "src_mgmt_ip_address": source_device_ep_info["node_name"], - "src_ac_node_id": source_device_ep_info["node_name"], - "src_ac_ep_id": source_device_ep_info["endpoint_name"], - "src_vlan": source_device_ep_info["ipv4_info"]["vlan"], - "src_source_ip_prefix": source_device_ep_info["ipv4_info"]["src_lan"], - "src_source_tcp_port": source_device_ep_info["ipv4_info"]["src_port"], - "src_destination_ip_prefix": source_device_ep_info["ipv4_info"][ - "dst_lan" - ], - "src_destination_tcp_port": source_device_ep_info["ipv4_info"][ - "dst_port" - ], - "source_one_way_delay": source_device_ep_info["one_way_delay"], - "source_one_way_bandwidth": source_device_ep_info["one_way_bandwidth"], - "source_one_way_packet_loss": source_device_ep_info[ - "one_way_packet_loss" - ], - "dst_node_id": destination_device_ep_info["node_name"], - "dst_mgmt_ip_address": destination_device_ep_info["node_name"], - "dst_ac_node_id": destination_device_ep_info["node_name"], - "dst_ac_ep_id": destination_device_ep_info["endpoint_name"], - "dst_vlan": destination_device_ep_info["ipv4_info"]["vlan"], - "dst_source_ip_prefix": destination_device_ep_info["ipv4_info"][ - "src_lan" - ], - "dst_source_tcp_port": destination_device_ep_info["ipv4_info"][ - "src_port" - ], - "dst_destination_ip_prefix": destination_device_ep_info["ipv4_info"][ - "dst_lan" - ], - "dst_destination_tcp_port": destination_device_ep_info["ipv4_info"][ - "dst_port" - ], - "destination_one_way_delay": destination_device_ep_info[ - "one_way_delay" - ], - "destination_one_way_bandwidth": destination_device_ep_info[ - "one_way_bandwidth" - ], - "destination_one_way_packet_loss": destination_device_ep_info[ - "one_way_packet_loss" - ], - "slice_id": slice_name, - } - - # 9. Create config rules and configure device - json_config_rules = setup_config_rules(slice_name, resource_value_dict) - del controller.device_config.config_rules[:] - for jcr in json_config_rules: - controller.device_config.config_rules.append(ConfigRule(**jcr)) + conn_groups = candidate_slice.get('connection-groups', dict()) + slice_name = self.__service.name + slice_data_model = {'network-slice-services': {'slice-service': [{ + 'id': slice_name, + 'description': slice_name, + 'sdps': {'sdp': [src_device_sdp, dst_device_sdp]}, + 'connection-groups': conn_groups, + }]}} + + del controller.device_config.config_rules[:] + controller.device_config.config_rules.append(ConfigRule(**json_config_rule_set( + '/service[{:s}]/IETFSlice'.format(self.__service.name), slice_data_model + ))) self.__task_executor.configure_device(controller) - except Exception as e: # pylint: disable=broad-except - raise e + except Exception as e: + LOGGER.exception('Unable to handle Slice Setup') results.append(e) return results @@ -883,16 +170,14 @@ class L3NM_IETFSlice_ServiceHandler(_ServiceHandler): DeviceId(**json_device_id(src_device_uuid)) ) controller = self.__task_executor.get_device_controller(src_device_obj) - json_config_rules = teardown_config_rules(service_uuid, {}) - if len(json_config_rules) > 0: - del controller.device_config.config_rules[:] - for json_config_rule in json_config_rules: - controller.device_config.config_rules.append( - ConfigRule(**json_config_rule) - ) - self.__task_executor.configure_device(controller) + del controller.device_config.config_rules[:] + controller.device_config.config_rules.append(ConfigRule(**json_config_rule_delete( + '/service[{:s}]/IETFSlice'.format(service_uuid), {} + ))) + self.__task_executor.configure_device(controller) results.append(True) - except Exception as e: # pylint: disable=broad-except + except Exception as e: + LOGGER.exception('Unable to handle Slice Tear Down') results.append(e) return results @@ -900,60 +185,50 @@ class L3NM_IETFSlice_ServiceHandler(_ServiceHandler): def SetConstraint( self, constraints: List[Tuple[str, Any]] ) -> List[Union[bool, Exception]]: - chk_type("constraints", constraints, list) - if len(constraints) == 0: - return [] + chk_type('constraints', constraints, list) + if len(constraints) == 0: return [] - msg = "[SetConstraint] Method not implemented. Constraints({:s}) are being ignored." - LOGGER.warning(msg.format(str(constraints))) + MSG = '[SetConstraint] Method not implemented. Constraints({:s}) are being ignored.' + LOGGER.warning(MSG.format(str(constraints))) return [True for _ in range(len(constraints))] @metered_subclass_method(METRICS_POOL) def DeleteConstraint( self, constraints: List[Tuple[str, Any]] ) -> List[Union[bool, Exception]]: - chk_type("constraints", constraints, list) - if len(constraints) == 0: - return [] + chk_type('constraints', constraints, list) + if len(constraints) == 0: return [] - msg = "[DeleteConstraint] Method not implemented. Constraints({:s}) are being ignored." - LOGGER.warning(msg.format(str(constraints))) + MSG = '[DeleteConstraint] Method not implemented. Constraints({:s}) are being ignored.' + LOGGER.warning(MSG.format(str(constraints))) return [True for _ in range(len(constraints))] @metered_subclass_method(METRICS_POOL) def SetConfig( self, resources: List[Tuple[str, Any]] ) -> List[Union[bool, Exception]]: - chk_type("resources", resources, list) - if len(resources) == 0: - return [] - + chk_type('resources', resources, list) results = [] for resource in resources: try: resource_value = json.loads(resource[1]) self.__settings_handler.set(resource[0], resource_value) results.append(True) - except Exception as e: # pylint: disable=broad-except - LOGGER.exception("Unable to SetConfig({:s})".format(str(resource))) + except Exception as e: + LOGGER.exception('Unable to SetConfig({:s})'.format(str(resource))) results.append(e) - return results @metered_subclass_method(METRICS_POOL) def DeleteConfig( self, resources: List[Tuple[str, Any]] ) -> List[Union[bool, Exception]]: - chk_type("resources", resources, list) - if len(resources) == 0: - return [] - + chk_type('resources', resources, list) results = [] for resource in resources: try: self.__settings_handler.delete(resource[0]) - except Exception as e: # pylint: disable=broad-except - LOGGER.exception("Unable to DeleteConfig({:s})".format(str(resource))) + except Exception as e: + LOGGER.exception('Unable to DeleteConfig({:s})'.format(str(resource))) results.append(e) - return results diff --git a/src/service/service/service_handlers/l3nm_ietfslice/Tools.py b/src/service/service/service_handlers/l3nm_ietfslice/Tools.py new file mode 100644 index 000000000..ce5c26e7a --- /dev/null +++ b/src/service/service/service_handlers/l3nm_ietfslice/Tools.py @@ -0,0 +1,63 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from dataclasses import dataclass +from typing import Dict, TypedDict +from common.proto.context_pb2 import Device + + +def get_device_endpoint_name(device_obj : Device, endpoint_uuid : str) -> str: + ''' + Given a device object and an endpoint UUID, return the device endpoint name. + Raises an exception if not found. + ''' + for d_ep in device_obj.device_endpoints: + if d_ep.endpoint_id.endpoint_uuid.uuid == endpoint_uuid: + return d_ep.name + + device_uuid = str(device_obj.device_id.device_uuid.uuid) + device_name = str(device_obj.name) + MSG = 'Device({:s},{:s})/Endpoint({:s}) not found' + raise Exception(MSG.format(device_uuid, device_name, str(endpoint_uuid))) + + +@dataclass +class Ipv4Info: + src_prefix : str = '' + dst_prefix : str = '' + src_port : str = '' + dst_port : str = '' + vlan : str = '' + + +def extract_match_criterion_ipv4_info(match_criterion : Dict) -> Ipv4Info: + ipv4_info = Ipv4Info() + + for type_value in match_criterion['match-type']: + match_type = type_value['type'] + value = type_value['value'][0] + + if match_type == 'ietf-network-slice-service:source-ip-prefix': + ipv4_info.src_prefix = value + elif match_type == 'ietf-network-slice-service:destination-ip-prefix': + ipv4_info.dst_prefix = value + elif match_type == 'ietf-network-slice-service:source-tcp-port': + ipv4_info.src_port = value + elif match_type == 'ietf-network-slice-service:destination-tcp-port': + ipv4_info.dst_port = value + elif match_type == 'ietf-network-slice-service:vlan': + ipv4_info.vlan = value + + return ipv4_info diff --git a/src/service/service/service_handlers/l3nm_ietfslice/ConfigRules.py b/src/service/service/service_handlers/l3nm_ietfslice/old/ConfigRules.py similarity index 100% rename from src/service/service/service_handlers/l3nm_ietfslice/ConfigRules.py rename to src/service/service/service_handlers/l3nm_ietfslice/old/ConfigRules.py diff --git a/src/service/service/service_handlers/l3nm_ietfslice/old/L3NM_IETFSlice_ServiceHandler.py b/src/service/service/service_handlers/l3nm_ietfslice/old/L3NM_IETFSlice_ServiceHandler.py new file mode 100644 index 000000000..1bef7e457 --- /dev/null +++ b/src/service/service/service_handlers/l3nm_ietfslice/old/L3NM_IETFSlice_ServiceHandler.py @@ -0,0 +1,943 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import logging +import re +from typing import Any, Dict, List, Optional, Tuple, TypedDict, Union + +from deepdiff import DeepDiff +from dataclasses import dataclass + +from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method +from common.proto.context_pb2 import ConfigRule, DeviceId, Empty, Service, ServiceConfig +from common.tools.object_factory.Device import json_device_id +from common.type_checkers.Checkers import chk_type +from context.client.ContextClient import ContextClient +from service.service.service_handler_api._ServiceHandler import _ServiceHandler +from service.service.service_handler_api.SettingsHandler import SettingsHandler +from service.service.service_handler_api.Tools import ( + get_device_endpoint_uuids, +) +from service.service.task_scheduler.TaskExecutor import TaskExecutor + +from .ConfigRules import ( + get_link_ep_device_names, + setup_config_rules, + teardown_config_rules, +) + +RUNNING_RESOURCE_KEY = "running_ietf_slice" +CANDIDATE_RESOURCE_KEY = "candidate_ietf_slice" + +SDP_DIFF_RE = re.compile( + r"^root\[\'network-slice-services\'\]\[\'slice-service\'\]\[0\]\[\'sdps\'\]\[\'sdp\'\]\[(\d)\]$" +) +CONNECTION_GROUP_DIFF_RE = re.compile( + r"^root\[\'network-slice-services\'\]\[\'slice-service\'\]\[0\]\[\'connection-groups\'\]\[\'connection-group\'\]\[(\d)\]$" +) +MATCH_CRITERION_DIFF_RE = re.compile( + r"^root\[\'network-slice-services\'\]\[\'slice-service\'\]\[0\]\[\'sdps\'\]\[\'sdp\'\]\[(\d)\]\[\'service-match-criteria\'\]\[\'match-criterion\'\]\[(\d)\]$" +) + +RE_GET_ENDPOINT_FROM_INTERFACE = re.compile(r"^\/interface\[([^\]]+)\].*") + +LOGGER = logging.getLogger(__name__) + +METRICS_POOL = MetricsPool( + "Service", "Handler", labels={"handler": "l3slice_ietfslice"} +) + + +RAISE_IF_DIFFERS = True + + +class Ipv4Info(TypedDict): + src_lan: str + dst_lan: str + src_port: str + dst_port: str + vlan: str + + +class DeviceEpInfo(TypedDict): + ipv4_info: Ipv4Info + node_name: str + endpoint_name: str + one_way_delay: int + one_way_bandwidth: int + one_way_packet_loss: float + + +@dataclass +class ConnectivityConstructInfo: + bandwidth: int = 0 + delay: int = 0 + packet_loss: float = 0.0 + + +def get_custom_config_rule( + service_config: ServiceConfig, resource_key: str +) -> Optional[ConfigRule]: + """ + Returns the ConfigRule from service_config matching the provided resource_key + if found, otherwise returns None. + """ + for cr in service_config.config_rules: + if ( + cr.WhichOneof("config_rule") == "custom" + and cr.custom.resource_key == resource_key + ): + return cr + return None + + +def get_running_candidate_ietf_slice_data_diff(service_config: ServiceConfig) -> Dict: + """ + Loads the JSON from the running/candidate resource ConfigRules and returns + their DeepDiff comparison. + """ + running_cr = get_custom_config_rule(service_config, RUNNING_RESOURCE_KEY) + candidate_cr = get_custom_config_rule(service_config, CANDIDATE_RESOURCE_KEY) + running_value_dict = json.loads(running_cr.custom.resource_value) + candidate_value_dict = json.loads(candidate_cr.custom.resource_value) + return DeepDiff(running_value_dict, candidate_value_dict) + + +def extract_match_criterion_ipv4_info(match_criterion: Dict) -> Ipv4Info: + """ + Extracts IPv4 info from the match criterion dictionary. + """ + src_lan = dst_lan = src_port = dst_port = vlan = "" + for type_value in match_criterion["match-type"]: + m_type = type_value["type"] + val = type_value["value"][0] + if m_type == "ietf-network-slice-service:source-ip-prefix": + src_lan = val + elif m_type == "ietf-network-slice-service:destination-ip-prefix": + dst_lan = val + elif m_type == "ietf-network-slice-service:source-tcp-port": + src_port = val + elif m_type == "ietf-network-slice-service:destination-tcp-port": + dst_port = val + elif m_type == "ietf-network-slice-service:vlan": + vlan = val + return Ipv4Info( + src_lan=src_lan, + dst_lan=dst_lan, + src_port=src_port, + dst_port=dst_port, + vlan=vlan, + ) + + +def get_removed_items( + candidate_ietf_slice_dict: dict, running_ietf_slice_dict: dict +) -> dict: + """ + For the 'iterable_item_removed' scenario, returns dict with removed sdp / connection_group / match_criterion info. + Raises an exception if there's inconsistent data or multiple items removed (which is not supported). + """ + removed_items = { + "sdp": {"sdp_idx": None, "value": {}}, + "connection_group": {"connection_group_idx": None, "value": {}}, + "match_criterion": { + "sdp_idx": None, + "match_criterion_idx": None, + "value": {}, + }, + } + + running_slice_services = running_ietf_slice_dict["network-slice-services"][ + "slice-service" + ][0] + candidate_slice_services = candidate_ietf_slice_dict["network-slice-services"][ + "slice-service" + ][0] + + running_slice_sdps = [sdp["id"] for sdp in running_slice_services["sdps"]["sdp"]] + candidiate_slice_sdps = [ + sdp["id"] for sdp in candidate_slice_services["sdps"]["sdp"] + ] + removed_sdps = set(running_slice_sdps) - set(candidiate_slice_sdps) + + if len(removed_sdps) > 1: + raise Exception("Multiple SDPs removed - not supported.") + removed_sdp_id = removed_sdps.pop() + + removed_items["sdp"]["sdp_idx"] = running_slice_sdps.index(removed_sdp_id) + removed_items["sdp"]["value"] = next( + sdp + for sdp in running_slice_services["sdps"]["sdp"] + if sdp["id"] == removed_sdp_id + ) + + match_criteria = removed_items["sdp"]["value"]["service-match-criteria"][ + "match-criterion" + ] + if len(match_criteria) > 1: + raise Exception("Multiple match criteria found - not supported") + match_criterion = match_criteria[0] + connection_grp_id = match_criterion["target-connection-group-id"] + connection_groups = running_slice_services["connection-groups"]["connection-group"] + connection_group = next( + (idx, cg) + for idx, cg in enumerate(connection_groups) + if cg["id"] == connection_grp_id + ) + removed_items["connection_group"]["connection_group_idx"] = connection_group[0] + removed_items["connection_group"]["value"] = connection_group[1] + + for sdp in running_slice_services["sdps"]["sdp"]: + if sdp["id"] == removed_sdp_id: + continue + for mc in sdp["service-match-criteria"]["match-criterion"]: + if mc["target-connection-group-id"] == connection_grp_id: + removed_items["match_criterion"]["sdp_idx"] = running_slice_sdps.index( + sdp["id"] + ) + removed_items["match_criterion"]["match_criterion_idx"] = sdp[ + "service-match-criteria" + ]["match-criterion"].index(mc) + removed_items["match_criterion"]["value"] = mc + break + + if ( + removed_items["match_criterion"]["sdp_idx"] is None + or removed_items["sdp"]["sdp_idx"] is None + or removed_items["connection_group"]["connection_group_idx"] is None + ): + raise Exception("sdp, connection group or match criterion not found") + return removed_items + + +def gather_connectivity_construct_info( + candidate_connection_groups: List[Dict], +) -> Dict[Tuple[str, str], ConnectivityConstructInfo]: + """ + Creates a dict mapping (sender_sdp, receiver_sdp) -> ConnectivityConstructInfo + from the given list of candidate connection groups. + """ + cc_info: Dict[Tuple[str, str], ConnectivityConstructInfo] = {} + for cg in candidate_connection_groups: + for cc in cg["connectivity-construct"]: + cc_sender = cc["p2p-sender-sdp"] + cc_receiver = cc["p2p-receiver-sdp"] + cc_key = (cc_sender, cc_receiver) + cc_info[cc_key] = ConnectivityConstructInfo() + for metric_bound in cc["service-slo-sle-policy"]["slo-policy"][ + "metric-bound" + ]: + if ( + metric_bound["metric-type"] + == "ietf-network-slice-service:one-way-delay-maximum" + and metric_bound["metric-unit"] == "milliseconds" + ): + cc_info[cc_key].delay = int(metric_bound["bound"]) + elif ( + metric_bound["metric-type"] + == "ietf-network-slice-service:two-way-packet-loss" + and metric_bound["metric-unit"] == "percentage" + ): + cc_info[cc_key].packet_loss = float( + metric_bound["percentile-value"] + ) + elif ( + metric_bound["metric-type"] + == "ietf-network-slice-service:one-way-bandwidth" + and metric_bound["metric-unit"] == "Mbps" + ): + cc_info[cc_key].bandwidth = int(metric_bound["bound"]) + return cc_info + + +def extract_source_destination_device_endpoint_info( + device_ep_pairs: list, connection_group: Dict, candidate_connection_groups: List +) -> Tuple[DeviceEpInfo, DeviceEpInfo]: + """ + Given device_ep_pairs, the relevant connection_group data, and all candidate + connection groups, figure out the final DeviceEpInfo for source and destination. + This includes computing the combined bandwidth, min delay, etc. + """ + connectivity_construct = connection_group["connectivity-construct"][0] + sender_sdp = connectivity_construct["p2p-sender-sdp"] + receiver_sdp = connectivity_construct["p2p-receiver-sdp"] + + # If the first pair is not the sender, we invert them + if sender_sdp == device_ep_pairs[0][4]: + ... + elif sender_sdp == device_ep_pairs[1][4]: + device_ep_pairs = device_ep_pairs[::-1] + else: + raise Exception("Sender SDP not found in device_ep_pairs") + + # Gather info from candidate connection groups + cc_info = gather_connectivity_construct_info(candidate_connection_groups) + + source_delay = int(1e6) + source_bandwidth = 0 + source_packet_loss = 1.0 + destination_delay = int(1e6) + destination_bandwidth = 0 + destination_packet_loss = 1.0 + + if cc_info: + common_sdps = set.intersection(*[set(key) for key in cc_info.keys()]) + if len(cc_info) > 2 and len(common_sdps) != 1: + raise Exception( + "There should be one common sdp in all connectivity constructs, otherwise, it is not supported" + ) + if len(common_sdps) == 1: + common_sdp = common_sdps.pop() + elif len(common_sdps) == 2: + # Fallback if intersection is 2 => pick sender_sdp + common_sdp = sender_sdp + else: + raise Exception("Invalid number of common sdps") + + for (sender, receiver), metrics in cc_info.items(): + cc_bandwidth = metrics.bandwidth + cc_max_delay = metrics.delay + cc_packet_loss = metrics.packet_loss + if sender == common_sdp: + source_bandwidth += cc_bandwidth + if cc_max_delay < source_delay: + source_delay = cc_max_delay + if cc_packet_loss < source_packet_loss: + source_packet_loss = cc_packet_loss + else: + destination_bandwidth += cc_bandwidth + if cc_max_delay < destination_delay: + destination_delay = cc_max_delay + if cc_packet_loss < destination_packet_loss: + destination_packet_loss = cc_packet_loss + + source_device_ep_info = DeviceEpInfo( + ipv4_info=device_ep_pairs[0][5], + node_name=device_ep_pairs[0][2], + endpoint_name=device_ep_pairs[0][3], + one_way_delay=source_delay, + one_way_bandwidth=source_bandwidth, + one_way_packet_loss=source_packet_loss, + ) + destination_device_ep_info = DeviceEpInfo( + ipv4_info=device_ep_pairs[1][5], + node_name=device_ep_pairs[1][2], + endpoint_name=device_ep_pairs[1][3], + one_way_delay=destination_delay, + one_way_bandwidth=destination_bandwidth, + one_way_packet_loss=destination_packet_loss, + ) + + return source_device_ep_info, destination_device_ep_info + + +def _parse_item_added(diff: Dict) -> dict: + """ + Helper to parse 'iterable_item_added' from the running_candidate_diff + and return the relevant items for sdp, connection_group, match_criterion, etc. + """ + added_items = { + "sdp": {"sdp_idx": None, "value": {}}, + "connection_group": {"connection_group_idx": None, "value": {}}, + "match_criterion": { + "sdp_idx": None, + "match_criterion_idx": None, + "value": {}, + }, + } + for added_key, added_value in diff["iterable_item_added"].items(): + sdp_match = SDP_DIFF_RE.match(added_key) + connection_group_match = CONNECTION_GROUP_DIFF_RE.match(added_key) + match_criterion_match = MATCH_CRITERION_DIFF_RE.match(added_key) + if sdp_match: + added_items["sdp"] = { + "sdp_idx": int(sdp_match.groups()[0]), + "value": added_value, + } + elif connection_group_match: + added_items["connection_group"] = { + "connection_group_idx": int(connection_group_match.groups()[0]), + "value": added_value, + } + elif match_criterion_match: + added_items["match_criterion"] = { + "sdp_idx": int(match_criterion_match.groups()[0]), + "match_criterion_idx": int(match_criterion_match.groups()[1]), + "value": added_value, + } + return added_items + + +class L3NM_IETFSlice_ServiceHandler(_ServiceHandler): + def __init__( # pylint: disable=super-init-not-called + self, service: Service, task_executor: TaskExecutor, **settings + ) -> None: + self.__service = service + self.__task_executor = task_executor + self.__settings_handler = SettingsHandler(service.service_config, **settings) + + @metered_subclass_method(METRICS_POOL) + def SetEndpoint( + self, + endpoints: List[Tuple[str, str, Optional[str]]], + connection_uuid: Optional[str] = None, + ) -> List[Union[bool, Exception]]: + chk_type('endpoints', endpoints, list) + if len(endpoints) == 0: + return [] + + results = [] + try: + service_config = self.__service.service_config + + # 1. Identify source and destination devices + src_device_uuid, src_endpoint_uuid = get_device_endpoint_uuids(endpoints[0]) + src_device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(src_device_uuid))) + src_device_name = src_device_obj.name + src_controller = self.__task_executor.get_device_controller(src_device_obj) + + dst_device_uuid, dst_endpoint_uuid = get_device_endpoint_uuids(endpoints[-1]) + dst_device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(dst_device_uuid))) + dst_device_name = dst_device_obj.name + dst_controller = self.__task_executor.get_device_controller(dst_device_obj) + + if src_controller.device_id.device_uuid.uuid != dst_controller.device_id.device_uuid.uuid: + raise Exception("Different Src-Dst devices not supported by now") + + controller = src_controller # same device controller + + # 2. Determine how the candidate & running resources differ + running_candidate_diff = get_running_candidate_ietf_slice_data_diff(service_config) + + candidate_ietf_slice_cr = get_custom_config_rule(service_config, CANDIDATE_RESOURCE_KEY) + candidate_resource_value_dict = json.loads(candidate_ietf_slice_cr.custom.resource_value) + + running_ietf_slice_cr = get_custom_config_rule(service_config, RUNNING_RESOURCE_KEY) + running_resource_value_dict = json.loads(running_ietf_slice_cr.custom.resource_value) + + slice_name = running_resource_value_dict["network-slice-services"]["slice-service"][0]["id"] + + # 3. Retrieve the context links for matching endpoints + context_client = ContextClient() + links = context_client.ListLinks(Empty()).links + + device_ep_pairs = [] + sdp_ids = [] + target_connection_group_id = None + operation_type = "update" # default fallback + + # 4. Handle creation vs additions vs removals + if not running_candidate_diff: # Slice Creation + # 4a. New Slice Creation + operation_type = "create" + + candidate_slice_service = candidate_resource_value_dict[ + "network-slice-services" + ]["slice-service"][0] + full_connection_groups = candidate_slice_service["connection-groups"][ + "connection-group" + ] + sdps = candidate_slice_service["sdps"]["sdp"] + sdp_ids = [sdp["node-id"] for sdp in sdps] + + # figure out which device is connected to which link + edge_device_names = [src_device_name, dst_device_name] + for sdp in sdps: + node_id = sdp["node-id"] + for link in links: + info = get_link_ep_device_names(link, context_client) + dev1, ep1, _, dev2, ep2, _ = info + if dev1 != node_id: continue + if dev2 not in edge_device_names: continue + + edge_device_names.remove(dev2) + match_criteria = sdp["service-match-criteria"][ + "match-criterion" + ] + if len(match_criteria) != 1: + raise Exception( + "Only one match criteria allowed for initial slice creation" + ) + match_criterion = match_criteria[0] + ipv4_info = extract_match_criterion_ipv4_info( + match_criterion + ) + device_ep_pairs.append( + ( + node_id, + ep1, + dev2, + ep2, + sdp["id"], + ipv4_info, + ) + ) + target_connection_group_id = match_criterion[ + "target-connection-group-id" + ] + sdp_ids.remove(node_id) + break + + # find the second link + if not edge_device_names: + raise Exception("Edge device names exhausted unexpectedly.") + + # second link logic + for link in links: + info = get_link_ep_device_names(link, context_client) + dev1, ep1, device_obj_1, dev2, ep2, device_obj_2 = info + if ( + dev1 == edge_device_names[0] + and device_obj_2.controller_id != device_obj_1.controller_id + ): + for sdp in sdps: + if sdp["node-id"] != sdp_ids[0]: + continue + match_criteria = sdp["service-match-criteria"][ + "match-criterion" + ] + if len(match_criteria) != 1: + raise Exception( + "Only one match criteria allowed for initial slice creation" + ) + match_criterion = match_criteria[0] + ipv4_info = extract_match_criterion_ipv4_info( + match_criterion + ) + device_ep_pairs.append( + ( + dev2, + ep2, + dev1, + ep1, + sdp["id"], + ipv4_info, + ) + ) + break + else: + raise Exception("No matching sdp found for second link.") + break + else: + raise Exception("sdp between the domains not found") + + elif "iterable_item_added" in running_candidate_diff: # new SDP added + # 4b. A new SDP was added + operation_type = "update" + + candidate_slice_service = candidate_resource_value_dict[ + "network-slice-services" + ]["slice-service"][0] + sdps = candidate_slice_service["sdps"]["sdp"] + full_connection_groups = candidate_slice_service["connection-groups"][ + "connection-group" + ] + added_items = _parse_item_added(running_candidate_diff) + + new_sdp = sdps[added_items["sdp"]["sdp_idx"]] + src_sdp_name = new_sdp["node-id"] + dst_sdp_idx = sdps[added_items["match_criterion"]["sdp_idx"]]["id"] + dst_sdp_name = sdps[added_items["match_criterion"]["sdp_idx"]][ + "node-id" + ] + for link in links: + info = get_link_ep_device_names(link, context_client) + dev1, ep1, device_obj_1, dev2, ep2, device_obj_2 = info + if ( + dev1 == src_sdp_name + and device_obj_2.controller_id != device_obj_1.controller_id + ): + for sdp in sdps: + if sdp["node-id"] != src_sdp_name: + continue + match_criteria = sdp["service-match-criteria"][ + "match-criterion" + ] + if len(match_criteria) != 1: + raise Exception( + "Only one match criteria allowed for initial slice creation" + ) + match_criterion = match_criteria[0] + ipv4_info = extract_match_criterion_ipv4_info( + match_criterion + ) + device_ep_pairs.append( + ( + dev2, + ep2, + dev1, + ep1, + sdp["id"], + ipv4_info, + ) + ) + target_connection_group_id = match_criterion[ + "target-connection-group-id" + ] + break + else: + raise Exception("sdp between the domains not found") + for link in links: + info = get_link_ep_device_names(link, context_client) + dev1, ep1, device_obj_1, dev2, ep2, device_obj_2 = info + if ( + dev1 == dst_sdp_name + and device_obj_2.controller_id != device_obj_1.controller_id + ): + for sdp in sdps: + if sdp["node-id"] != dst_sdp_name: + continue + match_criteria = sdp["service-match-criteria"][ + "match-criterion" + ] + vlan_id = set() + for match in match_criteria: + for type_value in match["match-type"]: + if ( + type_value["type"] + == "ietf-network-slice-service:vlan" + ): + vlan_id.add(type_value["value"][0]) + if len(vlan_id) != 1: + raise Exception( + "one vlan id found in SDP match criteria" + ) + match_criterion = match_criteria[ + added_items["match_criterion"]["match_criterion_idx"] + ] + ipv4_info = extract_match_criterion_ipv4_info( + match_criterion + ) + device_ep_pairs.append( + ( + dev2, + ep2, + dev1, + ep1, + sdp["id"], + ipv4_info, + ) + ) + break + else: + raise Exception("sdp between the domains not found") + elif "iterable_item_removed" in running_candidate_diff: # an SDP removed + # 4c. An existing SDP was removed + operation_type = "update" + + slice_services = running_resource_value_dict["network-slice-services"][ + "slice-service" + ] + candidate_slice_services = candidate_resource_value_dict[ + "network-slice-services" + ]["slice-service"] + candidate_slice_service = candidate_slice_services[0] + slice_service = slice_services[0] + full_connection_groups = slice_service["connection-groups"][ + "connection-group" + ] + sdps = slice_service["sdps"]["sdp"] + removed_items = get_removed_items( + candidate_resource_value_dict, running_resource_value_dict + ) + new_sdp = sdps[removed_items["sdp"]["sdp_idx"]] + src_sdp_name = new_sdp["node-id"] + dst_sdp_idx = sdps[removed_items["match_criterion"]["sdp_idx"]]["id"] + dst_sdp_name = sdps[removed_items["match_criterion"]["sdp_idx"]][ + "node-id" + ] + for link in links: + ( + device_obj_name_1, + ep_name_1, + device_obj_1, + device_obj_name_2, + ep_name_2, + device_obj_2, + ) = get_link_ep_device_names(link, context_client) + if ( + device_obj_name_1 == src_sdp_name + and device_obj_2.controller_id != device_obj_1.controller_id + ): + for sdp in sdps: + if sdp["node-id"] != src_sdp_name: + continue + match_criteria = sdp["service-match-criteria"][ + "match-criterion" + ] + if len(match_criteria) != 1: + raise Exception( + "Only one match criteria allowed for new SDP addition" + ) + match_criterion = match_criteria[0] + ipv4_info = extract_match_criterion_ipv4_info( + match_criterion + ) + device_ep_pairs.append( + ( + device_obj_name_2, + ep_name_2, + device_obj_name_1, + ep_name_1, + sdp["id"], + ipv4_info, + ) + ) + target_connection_group_id = match_criterion[ + "target-connection-group-id" + ] + break + else: + raise Exception("sdp between the domains not found") + for link in links: + ( + device_obj_name_1, + ep_name_1, + device_obj_1, + device_obj_name_2, + ep_name_2, + device_obj_2, + ) = get_link_ep_device_names(link, context_client) + if ( + device_obj_name_1 == dst_sdp_name + and device_obj_2.controller_id != device_obj_1.controller_id + ): + for sdp in sdps: + if sdp["node-id"] != dst_sdp_name: + continue + match_criteria = sdp["service-match-criteria"][ + "match-criterion" + ] + vlan_id = set() + for match in match_criteria: + for type_value in match["match-type"]: + if ( + type_value["type"] + == "ietf-network-slice-service:vlan" + ): + vlan_id.add(type_value["value"][0]) + if len(vlan_id) != 1: + raise Exception( + "one vlan id found in SDP match criteria" + ) + match_criterion = match_criteria[ + removed_items["match_criterion"]["match_criterion_idx"] + ] + ipv4_info = extract_match_criterion_ipv4_info( + match_criterion + ) + device_ep_pairs.append( + ( + device_obj_name_2, + ep_name_2, + device_obj_name_1, + ep_name_1, + sdp["id"], + ipv4_info, + ) + ) + break + else: + raise Exception("sdp between the domains not found") + else: + raise Exception( + "transition from candidate to running info not supported" + ) + + candidate_connection_groups = candidate_slice_service["connection-groups"][ + "connection-group" + ] + + if ( + len( + candidate_resource_value_dict["network-slice-services"][ + "slice-service" + ][0]["connection-groups"]["connection-group"] + ) + == 0 + ): + # 5. If connection_groups is now empty => operation = delete + operation_type = "delete" + + # 6. Retrieve actual target connection_group from the full connection groups + if not target_connection_group_id: + raise Exception("No target_connection_group_id found.") + target_connection_group = next( + cg + for cg in full_connection_groups + if cg["id"] == target_connection_group_id + ) + + # 7. Build source/destination device info + source_device_ep_info, destination_device_ep_info = ( + extract_source_destination_device_endpoint_info( + device_ep_pairs, + target_connection_group, + candidate_connection_groups, + ) + ) + resource_value_dict = { + "uuid": slice_name, + "operation_type": operation_type, + "src_node_id": source_device_ep_info["node_name"], + "src_mgmt_ip_address": source_device_ep_info["node_name"], + "src_ac_node_id": source_device_ep_info["node_name"], + "src_ac_ep_id": source_device_ep_info["endpoint_name"], + "src_vlan": source_device_ep_info["ipv4_info"]["vlan"], + "src_source_ip_prefix": source_device_ep_info["ipv4_info"]["src_lan"], + "src_source_tcp_port": source_device_ep_info["ipv4_info"]["src_port"], + "src_destination_ip_prefix": source_device_ep_info["ipv4_info"][ + "dst_lan" + ], + "src_destination_tcp_port": source_device_ep_info["ipv4_info"][ + "dst_port" + ], + "source_one_way_delay": source_device_ep_info["one_way_delay"], + "source_one_way_bandwidth": source_device_ep_info["one_way_bandwidth"], + "source_one_way_packet_loss": source_device_ep_info[ + "one_way_packet_loss" + ], + "dst_node_id": destination_device_ep_info["node_name"], + "dst_mgmt_ip_address": destination_device_ep_info["node_name"], + "dst_ac_node_id": destination_device_ep_info["node_name"], + "dst_ac_ep_id": destination_device_ep_info["endpoint_name"], + "dst_vlan": destination_device_ep_info["ipv4_info"]["vlan"], + "dst_source_ip_prefix": destination_device_ep_info["ipv4_info"][ + "src_lan" + ], + "dst_source_tcp_port": destination_device_ep_info["ipv4_info"][ + "src_port" + ], + "dst_destination_ip_prefix": destination_device_ep_info["ipv4_info"][ + "dst_lan" + ], + "dst_destination_tcp_port": destination_device_ep_info["ipv4_info"][ + "dst_port" + ], + "destination_one_way_delay": destination_device_ep_info[ + "one_way_delay" + ], + "destination_one_way_bandwidth": destination_device_ep_info[ + "one_way_bandwidth" + ], + "destination_one_way_packet_loss": destination_device_ep_info[ + "one_way_packet_loss" + ], + "slice_id": slice_name, + } + + # 9. Create config rules and configure device + json_config_rules = setup_config_rules(slice_name, resource_value_dict) + del controller.device_config.config_rules[:] + for jcr in json_config_rules: + controller.device_config.config_rules.append(ConfigRule(**jcr)) + + self.__task_executor.configure_device(controller) + except Exception as e: # pylint: disable=broad-except + raise e + results.append(e) + return results + + @metered_subclass_method(METRICS_POOL) + def DeleteEndpoint( + self, + endpoints: List[Tuple[str, str, Optional[str]]], + connection_uuid: Optional[str] = None, + ) -> List[Union[bool, Exception]]: + chk_type("endpoints", endpoints, list) + if len(endpoints) == 0: + return [] + service_uuid = self.__service.service_id.service_uuid.uuid + results = [] + try: + src_device_uuid, src_endpoint_uuid = get_device_endpoint_uuids(endpoints[0]) + src_device_obj = self.__task_executor.get_device( + DeviceId(**json_device_id(src_device_uuid)) + ) + controller = self.__task_executor.get_device_controller(src_device_obj) + json_config_rules = teardown_config_rules(service_uuid, {}) + if len(json_config_rules) > 0: + del controller.device_config.config_rules[:] + for json_config_rule in json_config_rules: + controller.device_config.config_rules.append( + ConfigRule(**json_config_rule) + ) + self.__task_executor.configure_device(controller) + results.append(True) + except Exception as e: # pylint: disable=broad-except + results.append(e) + return results + + @metered_subclass_method(METRICS_POOL) + def SetConstraint( + self, constraints: List[Tuple[str, Any]] + ) -> List[Union[bool, Exception]]: + chk_type("constraints", constraints, list) + if len(constraints) == 0: + return [] + + msg = "[SetConstraint] Method not implemented. Constraints({:s}) are being ignored." + LOGGER.warning(msg.format(str(constraints))) + return [True for _ in range(len(constraints))] + + @metered_subclass_method(METRICS_POOL) + def DeleteConstraint( + self, constraints: List[Tuple[str, Any]] + ) -> List[Union[bool, Exception]]: + chk_type("constraints", constraints, list) + if len(constraints) == 0: + return [] + + msg = "[DeleteConstraint] Method not implemented. Constraints({:s}) are being ignored." + LOGGER.warning(msg.format(str(constraints))) + return [True for _ in range(len(constraints))] + + @metered_subclass_method(METRICS_POOL) + def SetConfig( + self, resources: List[Tuple[str, Any]] + ) -> List[Union[bool, Exception]]: + chk_type("resources", resources, list) + if len(resources) == 0: + return [] + + results = [] + for resource in resources: + try: + resource_value = json.loads(resource[1]) + self.__settings_handler.set(resource[0], resource_value) + results.append(True) + except Exception as e: # pylint: disable=broad-except + LOGGER.exception("Unable to SetConfig({:s})".format(str(resource))) + results.append(e) + + return results + + @metered_subclass_method(METRICS_POOL) + def DeleteConfig( + self, resources: List[Tuple[str, Any]] + ) -> List[Union[bool, Exception]]: + chk_type("resources", resources, list) + if len(resources) == 0: + return [] + + results = [] + for resource in resources: + try: + self.__settings_handler.delete(resource[0]) + except Exception as e: # pylint: disable=broad-except + LOGGER.exception("Unable to DeleteConfig({:s})".format(str(resource))) + results.append(e) + + return results -- GitLab From dcf7dbd2839d6a8171cbc14c6b55149ef90b37b9 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 4 Sep 2025 18:40:40 +0000 Subject: [PATCH 140/367] Service component - L3NM IETF Slice: - Reverted changes in original version --- .../old/L3NM_IETFSlice_ServiceHandler.py | 100 ++++++++++-------- 1 file changed, 58 insertions(+), 42 deletions(-) diff --git a/src/service/service/service_handlers/l3nm_ietfslice/old/L3NM_IETFSlice_ServiceHandler.py b/src/service/service/service_handlers/l3nm_ietfslice/old/L3NM_IETFSlice_ServiceHandler.py index 1bef7e457..161b520e2 100644 --- a/src/service/service/service_handlers/l3nm_ietfslice/old/L3NM_IETFSlice_ServiceHandler.py +++ b/src/service/service/service_handlers/l3nm_ietfslice/old/L3NM_IETFSlice_ServiceHandler.py @@ -394,7 +394,7 @@ class L3NM_IETFSlice_ServiceHandler(_ServiceHandler): endpoints: List[Tuple[str, str, Optional[str]]], connection_uuid: Optional[str] = None, ) -> List[Union[bool, Exception]]: - chk_type('endpoints', endpoints, list) + chk_type("endpoints", endpoints, list) if len(endpoints) == 0: return [] @@ -404,30 +404,48 @@ class L3NM_IETFSlice_ServiceHandler(_ServiceHandler): # 1. Identify source and destination devices src_device_uuid, src_endpoint_uuid = get_device_endpoint_uuids(endpoints[0]) - src_device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(src_device_uuid))) + src_device_obj = self.__task_executor.get_device( + DeviceId(**json_device_id(src_device_uuid)) + ) src_device_name = src_device_obj.name src_controller = self.__task_executor.get_device_controller(src_device_obj) - dst_device_uuid, dst_endpoint_uuid = get_device_endpoint_uuids(endpoints[-1]) - dst_device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(dst_device_uuid))) + dst_device_uuid, dst_endpoint_uuid = get_device_endpoint_uuids( + endpoints[-1] + ) + dst_device_obj = self.__task_executor.get_device( + DeviceId(**json_device_id(dst_device_uuid)) + ) dst_device_name = dst_device_obj.name dst_controller = self.__task_executor.get_device_controller(dst_device_obj) - if src_controller.device_id.device_uuid.uuid != dst_controller.device_id.device_uuid.uuid: + if ( + src_controller.device_id.device_uuid.uuid + != dst_controller.device_id.device_uuid.uuid + ): raise Exception("Different Src-Dst devices not supported by now") controller = src_controller # same device controller # 2. Determine how the candidate & running resources differ - running_candidate_diff = get_running_candidate_ietf_slice_data_diff(service_config) - - candidate_ietf_slice_cr = get_custom_config_rule(service_config, CANDIDATE_RESOURCE_KEY) - candidate_resource_value_dict = json.loads(candidate_ietf_slice_cr.custom.resource_value) - - running_ietf_slice_cr = get_custom_config_rule(service_config, RUNNING_RESOURCE_KEY) - running_resource_value_dict = json.loads(running_ietf_slice_cr.custom.resource_value) - - slice_name = running_resource_value_dict["network-slice-services"]["slice-service"][0]["id"] + running_candidate_diff = get_running_candidate_ietf_slice_data_diff( + service_config + ) + candidate_ietf_slice_cr = get_custom_config_rule( + service_config, CANDIDATE_RESOURCE_KEY + ) + candidate_resource_value_dict = json.loads( + candidate_ietf_slice_cr.custom.resource_value + ) + running_ietf_slice_cr = get_custom_config_rule( + service_config, RUNNING_RESOURCE_KEY + ) + running_resource_value_dict = json.loads( + running_ietf_slice_cr.custom.resource_value + ) + slice_name = running_resource_value_dict["network-slice-services"][ + "slice-service" + ][0]["id"] # 3. Retrieve the context links for matching endpoints context_client = ContextClient() @@ -459,36 +477,34 @@ class L3NM_IETFSlice_ServiceHandler(_ServiceHandler): for link in links: info = get_link_ep_device_names(link, context_client) dev1, ep1, _, dev2, ep2, _ = info - if dev1 != node_id: continue - if dev2 not in edge_device_names: continue - - edge_device_names.remove(dev2) - match_criteria = sdp["service-match-criteria"][ - "match-criterion" - ] - if len(match_criteria) != 1: - raise Exception( - "Only one match criteria allowed for initial slice creation" + if dev1 == node_id and dev2 in edge_device_names: + edge_device_names.remove(dev2) + match_criteria = sdp["service-match-criteria"][ + "match-criterion" + ] + if len(match_criteria) != 1: + raise Exception( + "Only one match criteria allowed for initial slice creation" + ) + match_criterion = match_criteria[0] + ipv4_info = extract_match_criterion_ipv4_info( + match_criterion ) - match_criterion = match_criteria[0] - ipv4_info = extract_match_criterion_ipv4_info( - match_criterion - ) - device_ep_pairs.append( - ( - node_id, - ep1, - dev2, - ep2, - sdp["id"], - ipv4_info, + device_ep_pairs.append( + ( + node_id, + ep1, + dev2, + ep2, + sdp["id"], + ipv4_info, + ) ) - ) - target_connection_group_id = match_criterion[ - "target-connection-group-id" - ] - sdp_ids.remove(node_id) - break + target_connection_group_id = match_criterion[ + "target-connection-group-id" + ] + sdp_ids.remove(node_id) + break # find the second link if not edge_device_names: -- GitLab From e43f5c6fe727c4d17e1ed207cb4d9766a64dd426 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 4 Sep 2025 18:48:12 +0000 Subject: [PATCH 141/367] Service component - L3NM IETF Slice: - Bug fix --- .../l3nm_ietfslice/L3NM_IETFSlice_ServiceHandler.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/service/service/service_handlers/l3nm_ietfslice/L3NM_IETFSlice_ServiceHandler.py b/src/service/service/service_handlers/l3nm_ietfslice/L3NM_IETFSlice_ServiceHandler.py index e7dff7043..8897a01b0 100644 --- a/src/service/service/service_handlers/l3nm_ietfslice/L3NM_IETFSlice_ServiceHandler.py +++ b/src/service/service/service_handlers/l3nm_ietfslice/L3NM_IETFSlice_ServiceHandler.py @@ -77,6 +77,11 @@ class L3NM_IETFSlice_ServiceHandler(_ServiceHandler): datastore_delta = DataStoreDelta(self.__service) candidate_slice = datastore_delta.candidate_data + if len(candidate_slice) != 1: + MSG = 'Unsupported number of Slices[{:d}]({:s})' + raise Exception(MSG.format(len(candidate_slice), str(candidate_slice))) + candidate_slice = candidate_slice[0] + # 4. Extract data from SDP matching src/dst node candidate_sdps = candidate_slice.get('sdps', dict()).get('sdp', list()) -- GitLab From b0d4c140bbee833a64770115d0e79704731f8d7a Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 4 Sep 2025 19:03:04 +0000 Subject: [PATCH 142/367] Service component - L3NM IETF Slice: - Bug fixes --- .../L3NM_IETFSlice_ServiceHandler.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/src/service/service/service_handlers/l3nm_ietfslice/L3NM_IETFSlice_ServiceHandler.py b/src/service/service/service_handlers/l3nm_ietfslice/L3NM_IETFSlice_ServiceHandler.py index 8897a01b0..01c08188d 100644 --- a/src/service/service/service_handlers/l3nm_ietfslice/L3NM_IETFSlice_ServiceHandler.py +++ b/src/service/service/service_handlers/l3nm_ietfslice/L3NM_IETFSlice_ServiceHandler.py @@ -82,8 +82,12 @@ class L3NM_IETFSlice_ServiceHandler(_ServiceHandler): raise Exception(MSG.format(len(candidate_slice), str(candidate_slice))) candidate_slice = candidate_slice[0] + slice_name = candidate_slice['id'] - # 4. Extract data from SDP matching src/dst node + conn_groups = candidate_slice.get('connection-groups', dict()) + + + # 4. Adapt SDPs candidate_sdps = candidate_slice.get('sdps', dict()).get('sdp', list()) if len(candidate_sdps) != 2: MSG = 'Unsupported number of SDPs[{:d}]({:s})' @@ -104,7 +108,7 @@ class L3NM_IETFSlice_ServiceHandler(_ServiceHandler): dst_mgmt_ip_address = str(ipaddress.ip_address(dst_device_name)) except ValueError: dst_mgmt_ip_address = '0.0.0.0' - dst_device_sdp['sdp-ip-address'] = dst_mgmt_ip_address + dst_device_sdp['sdp-ip-address'] = [dst_mgmt_ip_address] dst_ac = dst_device_sdp['attachment-circuits']['attachment-circuit'][0] dst_ac['id'] = 'AC {:s}'.format(str(dst_device_name)) @@ -126,7 +130,7 @@ class L3NM_IETFSlice_ServiceHandler(_ServiceHandler): src_mgmt_ip_address = str(ipaddress.ip_address(src_device_name)) except ValueError: src_mgmt_ip_address = '0.0.0.0' - src_device_sdp['sdp-ip-address'] = src_mgmt_ip_address + src_device_sdp['sdp-ip-address'] = [src_mgmt_ip_address] src_ac = src_device_sdp['attachment-circuits']['attachment-circuit'][0] src_ac['id'] = 'AC {:s}'.format(str(src_device_name)) @@ -138,9 +142,10 @@ class L3NM_IETFSlice_ServiceHandler(_ServiceHandler): MSG = 'Unsupported case: sdp_dict={:s} src_device_name={:s} dst_device_name={:s}' raise Exception(MSG.format(str(sdp_dict), str(src_device_name), str(dst_device_name))) - conn_groups = candidate_slice.get('connection-groups', dict()) + + + # 5. Compose slice and setup it - slice_name = self.__service.name slice_data_model = {'network-slice-services': {'slice-service': [{ 'id': slice_name, 'description': slice_name, -- GitLab From d7826918e8260f6ed0d0af4dd618d91aba6cbc2a Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 5 Sep 2025 08:11:57 +0000 Subject: [PATCH 143/367] Device component - IETF Slice: - Simplified code and removed unneeded files - Moved old code to separate folder - Multiple bug fixes --- .../service/drivers/ietf_slice/Constants.py | 25 -- .../drivers/ietf_slice/IetfSliceDriver.py | 156 ++++------ .../drivers/ietf_slice/TfsApiClient.py | 23 ++ .../drivers/ietf_slice/old/IetfSliceDriver.py | 268 ++++++++++++++++++ .../drivers/ietf_slice/{ => old}/Tools.py | 2 +- 5 files changed, 340 insertions(+), 134 deletions(-) delete mode 100644 src/device/service/drivers/ietf_slice/Constants.py create mode 100644 src/device/service/drivers/ietf_slice/old/IetfSliceDriver.py rename src/device/service/drivers/ietf_slice/{ => old}/Tools.py (99%) diff --git a/src/device/service/drivers/ietf_slice/Constants.py b/src/device/service/drivers/ietf_slice/Constants.py deleted file mode 100644 index 70ce2da17..000000000 --- a/src/device/service/drivers/ietf_slice/Constants.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from device.service.driver_api._Driver import ( - RESOURCE_ENDPOINTS, - RESOURCE_INTERFACES, - RESOURCE_NETWORK_INSTANCES, -) - -SPECIAL_RESOURCE_MAPPINGS = { - RESOURCE_ENDPOINTS: "/endpoints", - RESOURCE_INTERFACES: "/interfaces", - RESOURCE_NETWORK_INSTANCES: "/net-instances", -} diff --git a/src/device/service/drivers/ietf_slice/IetfSliceDriver.py b/src/device/service/drivers/ietf_slice/IetfSliceDriver.py index dce1a6d26..456fea2a8 100644 --- a/src/device/service/drivers/ietf_slice/IetfSliceDriver.py +++ b/src/device/service/drivers/ietf_slice/IetfSliceDriver.py @@ -13,16 +13,13 @@ # limitations under the License. -import anytree, json, logging, re, threading +import json, logging, re, threading from typing import Any, Iterator, List, Optional, Tuple, Union from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method -from common.type_checkers.Checkers import chk_length, chk_string, chk_type +from common.type_checkers.Checkers import chk_string, chk_type from device.service.driver_api._Driver import _Driver, RESOURCE_ENDPOINTS, RESOURCE_SERVICES -from device.service.driver_api.AnyTreeTools import TreeNode, dump_subtree, get_subnode, set_subnode_value from device.service.driver_api.ImportTopologyEnum import ImportTopologyEnum, get_import_topology -from .Constants import SPECIAL_RESOURCE_MAPPINGS from .TfsApiClient import TfsApiClient -from .Tools import compose_resource_endpoint LOGGER = logging.getLogger(__name__) @@ -34,8 +31,8 @@ ALL_RESOURCE_KEYS = [ ] -RE_IETF_SLICE_DATA = re.compile(r'^\/service\[[^\]]+\]\/IETFSlice$') -RE_IETF_SLICE_OPERATION = re.compile(r'^\/service\[[^\]]+\]\/IETFSlice\/operation$') +RE_IETF_SLICE_DATA = re.compile(r'^\/service\[([^\]]+)\]\/IETFSlice$') + DRIVER_NAME = 'ietf_slice' METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': DRIVER_NAME}) @@ -47,7 +44,7 @@ class IetfSliceDriver(_Driver): self.__lock = threading.Lock() self.__started = threading.Event() self.__terminate = threading.Event() - self.__running = TreeNode('.') + username = self.settings.get('username') password = self.settings.get('password') scheme = self.settings.get('scheme', 'http') @@ -65,52 +62,6 @@ class IetfSliceDriver(_Driver): # (not supported by XR driver) self.__import_topology = get_import_topology(self.settings, default=ImportTopologyEnum.DEVICES) - endpoints = self.settings.get("endpoints", []) - endpoint_resources = [] - for endpoint in endpoints: - endpoint_resource = compose_resource_endpoint(endpoint) - if endpoint_resource is None: - continue - endpoint_resources.append(endpoint_resource) - self._set_initial_config(endpoint_resources) - - def _set_initial_config( - self, resources: List[Tuple[str, Any]] - ) -> List[Union[bool, Exception]]: - chk_type("resources", resources, list) - if len(resources) == 0: - return [] - results = [] - resolver = anytree.Resolver(pathattr="name") - with self.__lock: - for i, resource in enumerate(resources): - str_resource_name = "resources[#{:d}]".format(i) - try: - chk_type(str_resource_name, resource, (list, tuple)) - chk_length(str_resource_name, resource, min_length=2, max_length=2) - resource_key, resource_value = resource - chk_string(str_resource_name, resource_key, allow_empty=False) - resource_path = resource_key.split("/") - except Exception as e: - LOGGER.exception( - "Exception validating {:s}: {:s}".format( - str_resource_name, str(resource_key) - ) - ) - results.append(e) # if validation fails, store the exception - continue - - try: - resource_value = json.loads(resource_value) - except: # pylint: disable=bare-except - pass - - set_subnode_value( - resolver, self.__running, resource_path, resource_value - ) - - results.append(True) - return results def Connect(self) -> bool: with self.__lock: @@ -119,16 +70,19 @@ class IetfSliceDriver(_Driver): if checked: self.__started.set() return checked + def Disconnect(self) -> bool: with self.__lock: self.__terminate.set() return True + @metered_subclass_method(METRICS_POOL) def GetInitialConfig(self) -> List[Tuple[str, Any]]: with self.__lock: return [] + @metered_subclass_method(METRICS_POOL) def GetConfig( self, resource_keys : List[str] = [] @@ -137,9 +91,8 @@ class IetfSliceDriver(_Driver): results = [] with self.__lock: self.tac.check_credentials() - if len(resource_keys) == 0: - return dump_subtree(self.__running) - resolver = anytree.Resolver(pathattr='name') + if len(resource_keys) == 0: resource_keys = ALL_RESOURCE_KEYS + for i, resource_key in enumerate(resource_keys): str_resource_name = 'resource_key[#{:d}]'.format(i) try: @@ -147,23 +100,40 @@ class IetfSliceDriver(_Driver): if resource_key == RESOURCE_ENDPOINTS: # return endpoints through TFS NBI API and list-devices method results.extend(self.tac.get_devices_endpoints(self.__import_topology)) - else: - resource_key = SPECIAL_RESOURCE_MAPPINGS.get( - resource_key, resource_key - ) - resource_path = resource_key.split('/') - resource_node = get_subnode( - resolver, self.__running, resource_path, default=None + elif resource_key == RESOURCE_SERVICES: + slices_data = self.tac.list_slices() + slices_list = ( + slices_data + .get('network-slice-services', dict()) + .get('slice-service', list()) ) - # if not found, resource_node is None - if resource_node is None: continue - results.extend(dump_subtree(resource_node)) + for slice_data in slices_list: + slice_name = slice_data['id'] + slice_resource_key = '/service[{:s}]/IETFSlice'.format(str(slice_name)) + results.append((slice_resource_key, slice_data)) + else: + match_slice_data = RE_IETF_SLICE_DATA.match(resource_key) + if match_slice_data is not None: + slice_name = match_slice_data.groups()[0] + slices_data = self.tac.retrieve_slice(slice_name) + slices_list = ( + slices_data + .get('network-slice-services', dict()) + .get('slice-service', list()) + ) + for slice_data in slices_list: + slice_name = slice_data['id'] + slice_resource_key = '/service[{:s}]/IETFSlice'.format(str(slice_name)) + results.append((slice_resource_key, slice_data)) + else: + results.append((resource_key, None)) except Exception as e: MSG = 'Unhandled error processing {:s}: resource_key({:s})' LOGGER.exception(MSG.format(str_resource_name, str(resource_key))) results.append((resource_key, e)) return results + @metered_subclass_method(METRICS_POOL) def SetConfig( self, resources : List[Tuple[str, Any]] @@ -171,46 +141,16 @@ class IetfSliceDriver(_Driver): results = [] if len(resources) == 0: return results with self.__lock: - for resource in resources: - resource_key, resource_value = resource - if RE_IETF_SLICE_OPERATION.match(resource_key): - operation_type = json.loads(resource_value)['type'] - results.append((resource_key, True)) - break - else: - raise Exception('operation type not found in resources') - for i, resource in enumerate(resources): str_resource_name = 'resource_key[#{:d}]'.format(i) LOGGER.info('resource = {:s}'.format(str(resource))) resource_key, resource_value = resource - if not RE_IETF_SLICE_DATA.match(resource_key): - continue - try: - resource_value = json.loads(resource_value) - slice_data = resource_value['network-slice-services'][ - 'slice-service' - ][0] - slice_name = slice_data['id'] - - if operation_type == 'create': - self.tac.create_slice(resource_value) - elif operation_type == 'update': - connection_groups = slice_data['connection-groups']['connection-group'] - if len(connection_groups) != 1: - MSG = 'Exactly one ConnectionGroup({:s}) is supported' - raise Exception(MSG.format(str(connection_groups))) - connection_group = connection_groups[0] - self.tac.update_slice( - slice_name, connection_group['id'], connection_group - ) - elif operation_type == 'delete': - self.tac.delete_slice(slice_name) - else: - MSG = 'OperationType({:s}) not supported' - raise Exception(MSG.format(str(operation_type))) + if not RE_IETF_SLICE_DATA.match(resource_key): continue + try: + resource_value = json.loads(resource_value) + self.tac.create_slice(resource_value) results.append((resource_key, True)) except Exception as e: MSG = 'Unhandled error processing {:s}: resource_key({:s})' @@ -218,27 +158,24 @@ class IetfSliceDriver(_Driver): results.append((resource_key, e)) return results + @metered_subclass_method(METRICS_POOL) def DeleteConfig( self, resources : List[Tuple[str, Any]] ) -> List[Union[bool, Exception]]: results = [] - if len(resources) == 0: - return results + if len(resources) == 0: return results with self.__lock: for i, resource in enumerate(resources): str_resource_name = 'resource_key[#{:d}]'.format(i) LOGGER.info('resource = {:s}'.format(str(resource))) resource_key, resource_value = resource - if not RE_IETF_SLICE_DATA.match(resource_key): - continue + if not RE_IETF_SLICE_DATA.match(resource_key): continue try: resource_value = json.loads(resource_value) - slice_name = resource_value['network-slice-services'][ - 'slice-service' - ][0]['id'] + slice_name = resource_value['network-slice-services']['slice-service'][0]['id'] self.tac.delete_slice(slice_name) results.append((resource_key, True)) except Exception as e: @@ -247,6 +184,7 @@ class IetfSliceDriver(_Driver): results.append((resource_key, e)) return results + @metered_subclass_method(METRICS_POOL) def SubscribeState( self, subscriptions : List[Tuple[str, float, float]] @@ -254,6 +192,7 @@ class IetfSliceDriver(_Driver): # TODO: does not support monitoring by now return [False for _ in subscriptions] + @metered_subclass_method(METRICS_POOL) def UnsubscribeState( self, subscriptions : List[Tuple[str, float, float]] @@ -261,6 +200,7 @@ class IetfSliceDriver(_Driver): # TODO: does not support monitoring by now return [False for _ in subscriptions] + def GetState( self, blocking=False, terminate : Optional[threading.Event] = None ) -> Iterator[Tuple[float, str, Any]]: diff --git a/src/device/service/drivers/ietf_slice/TfsApiClient.py b/src/device/service/drivers/ietf_slice/TfsApiClient.py index 13626ef69..3073d905f 100644 --- a/src/device/service/drivers/ietf_slice/TfsApiClient.py +++ b/src/device/service/drivers/ietf_slice/TfsApiClient.py @@ -180,6 +180,29 @@ class TfsApiClient(RestApiClient): raise Exception(MSG) from e + def list_slices(self) -> Dict: + try: + MSG = '[list_slices] GET {:s}' + LOGGER.info(MSG.format(str(IETF_SLICE_ALL_URL))) + return self.get(IETF_SLICE_ALL_URL) + except requests.exceptions.ConnectionError as e: + MSG = 'Failed to send GET request to TFS IETF Slice NBI' + raise Exception(MSG) from e + + + def retrieve_slice(self, slice_name : str) -> Dict: + MSG = '[retrieve_slice] slice_name={:s}' + LOGGER.debug(MSG.format(str(slice_name))) + url = IETF_SLICE_ONE_URL.format(slice_name) + try: + MSG = '[retrieve_slice] GET {:s}' + LOGGER.info(MSG.format(str(url))) + return self.get(url) + except requests.exceptions.ConnectionError as e: + MSG = 'Failed to send GET request to TFS IETF Slice NBI' + raise Exception(MSG) from e + + def update_slice( self, slice_name : str, connection_group_id : str, updated_connection_group_data : Dict diff --git a/src/device/service/drivers/ietf_slice/old/IetfSliceDriver.py b/src/device/service/drivers/ietf_slice/old/IetfSliceDriver.py new file mode 100644 index 000000000..dce1a6d26 --- /dev/null +++ b/src/device/service/drivers/ietf_slice/old/IetfSliceDriver.py @@ -0,0 +1,268 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import anytree, json, logging, re, threading +from typing import Any, Iterator, List, Optional, Tuple, Union +from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method +from common.type_checkers.Checkers import chk_length, chk_string, chk_type +from device.service.driver_api._Driver import _Driver, RESOURCE_ENDPOINTS, RESOURCE_SERVICES +from device.service.driver_api.AnyTreeTools import TreeNode, dump_subtree, get_subnode, set_subnode_value +from device.service.driver_api.ImportTopologyEnum import ImportTopologyEnum, get_import_topology +from .Constants import SPECIAL_RESOURCE_MAPPINGS +from .TfsApiClient import TfsApiClient +from .Tools import compose_resource_endpoint + + +LOGGER = logging.getLogger(__name__) + + +ALL_RESOURCE_KEYS = [ + RESOURCE_ENDPOINTS, + RESOURCE_SERVICES, +] + + +RE_IETF_SLICE_DATA = re.compile(r'^\/service\[[^\]]+\]\/IETFSlice$') +RE_IETF_SLICE_OPERATION = re.compile(r'^\/service\[[^\]]+\]\/IETFSlice\/operation$') + +DRIVER_NAME = 'ietf_slice' +METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': DRIVER_NAME}) + + +class IetfSliceDriver(_Driver): + def __init__(self, address : str, port : str, **settings) -> None: + super().__init__(DRIVER_NAME, address, int(port), **settings) + self.__lock = threading.Lock() + self.__started = threading.Event() + self.__terminate = threading.Event() + self.__running = TreeNode('.') + username = self.settings.get('username') + password = self.settings.get('password') + scheme = self.settings.get('scheme', 'http') + timeout = int(self.settings.get('timeout', 60)) + self.tac = TfsApiClient( + self.address, self.port, scheme=scheme, username=username, + password=password, timeout=timeout + ) + + # Options are: + # disabled --> just import endpoints as usual + # devices --> imports sub-devices but not links connecting them. + # (a remotely-controlled transport domain might exist between them) + # topology --> imports sub-devices and links connecting them. + # (not supported by XR driver) + self.__import_topology = get_import_topology(self.settings, default=ImportTopologyEnum.DEVICES) + + endpoints = self.settings.get("endpoints", []) + endpoint_resources = [] + for endpoint in endpoints: + endpoint_resource = compose_resource_endpoint(endpoint) + if endpoint_resource is None: + continue + endpoint_resources.append(endpoint_resource) + self._set_initial_config(endpoint_resources) + + def _set_initial_config( + self, resources: List[Tuple[str, Any]] + ) -> List[Union[bool, Exception]]: + chk_type("resources", resources, list) + if len(resources) == 0: + return [] + results = [] + resolver = anytree.Resolver(pathattr="name") + with self.__lock: + for i, resource in enumerate(resources): + str_resource_name = "resources[#{:d}]".format(i) + try: + chk_type(str_resource_name, resource, (list, tuple)) + chk_length(str_resource_name, resource, min_length=2, max_length=2) + resource_key, resource_value = resource + chk_string(str_resource_name, resource_key, allow_empty=False) + resource_path = resource_key.split("/") + except Exception as e: + LOGGER.exception( + "Exception validating {:s}: {:s}".format( + str_resource_name, str(resource_key) + ) + ) + results.append(e) # if validation fails, store the exception + continue + + try: + resource_value = json.loads(resource_value) + except: # pylint: disable=bare-except + pass + + set_subnode_value( + resolver, self.__running, resource_path, resource_value + ) + + results.append(True) + return results + + def Connect(self) -> bool: + with self.__lock: + if self.__started.is_set(): return True + checked = self.tac.check_credentials(raise_if_fail=False) + if checked: self.__started.set() + return checked + + def Disconnect(self) -> bool: + with self.__lock: + self.__terminate.set() + return True + + @metered_subclass_method(METRICS_POOL) + def GetInitialConfig(self) -> List[Tuple[str, Any]]: + with self.__lock: + return [] + + @metered_subclass_method(METRICS_POOL) + def GetConfig( + self, resource_keys : List[str] = [] + ) -> List[Tuple[str, Union[Any, None, Exception]]]: + chk_type('resources', resource_keys, list) + results = [] + with self.__lock: + self.tac.check_credentials() + if len(resource_keys) == 0: + return dump_subtree(self.__running) + resolver = anytree.Resolver(pathattr='name') + for i, resource_key in enumerate(resource_keys): + str_resource_name = 'resource_key[#{:d}]'.format(i) + try: + chk_string(str_resource_name, resource_key, allow_empty=False) + if resource_key == RESOURCE_ENDPOINTS: + # return endpoints through TFS NBI API and list-devices method + results.extend(self.tac.get_devices_endpoints(self.__import_topology)) + else: + resource_key = SPECIAL_RESOURCE_MAPPINGS.get( + resource_key, resource_key + ) + resource_path = resource_key.split('/') + resource_node = get_subnode( + resolver, self.__running, resource_path, default=None + ) + # if not found, resource_node is None + if resource_node is None: continue + results.extend(dump_subtree(resource_node)) + except Exception as e: + MSG = 'Unhandled error processing {:s}: resource_key({:s})' + LOGGER.exception(MSG.format(str_resource_name, str(resource_key))) + results.append((resource_key, e)) + return results + + @metered_subclass_method(METRICS_POOL) + def SetConfig( + self, resources : List[Tuple[str, Any]] + ) -> List[Union[bool, Exception]]: + results = [] + if len(resources) == 0: return results + with self.__lock: + for resource in resources: + resource_key, resource_value = resource + if RE_IETF_SLICE_OPERATION.match(resource_key): + operation_type = json.loads(resource_value)['type'] + results.append((resource_key, True)) + break + else: + raise Exception('operation type not found in resources') + + for i, resource in enumerate(resources): + str_resource_name = 'resource_key[#{:d}]'.format(i) + LOGGER.info('resource = {:s}'.format(str(resource))) + resource_key, resource_value = resource + if not RE_IETF_SLICE_DATA.match(resource_key): + continue + try: + resource_value = json.loads(resource_value) + + slice_data = resource_value['network-slice-services'][ + 'slice-service' + ][0] + slice_name = slice_data['id'] + + if operation_type == 'create': + self.tac.create_slice(resource_value) + elif operation_type == 'update': + connection_groups = slice_data['connection-groups']['connection-group'] + if len(connection_groups) != 1: + MSG = 'Exactly one ConnectionGroup({:s}) is supported' + raise Exception(MSG.format(str(connection_groups))) + connection_group = connection_groups[0] + self.tac.update_slice( + slice_name, connection_group['id'], connection_group + ) + elif operation_type == 'delete': + self.tac.delete_slice(slice_name) + else: + MSG = 'OperationType({:s}) not supported' + raise Exception(MSG.format(str(operation_type))) + + results.append((resource_key, True)) + except Exception as e: + MSG = 'Unhandled error processing {:s}: resource_key({:s})' + LOGGER.exception(MSG.format(str_resource_name, str(resource_key))) + results.append((resource_key, e)) + return results + + @metered_subclass_method(METRICS_POOL) + def DeleteConfig( + self, resources : List[Tuple[str, Any]] + ) -> List[Union[bool, Exception]]: + results = [] + if len(resources) == 0: + return results + with self.__lock: + for i, resource in enumerate(resources): + str_resource_name = 'resource_key[#{:d}]'.format(i) + LOGGER.info('resource = {:s}'.format(str(resource))) + resource_key, resource_value = resource + + if not RE_IETF_SLICE_DATA.match(resource_key): + continue + + try: + resource_value = json.loads(resource_value) + slice_name = resource_value['network-slice-services'][ + 'slice-service' + ][0]['id'] + self.tac.delete_slice(slice_name) + results.append((resource_key, True)) + except Exception as e: + MSG = 'Unhandled error processing {:s}: resource_key({:s})' + LOGGER.exception(MSG.format(str_resource_name, str(resource_key))) + results.append((resource_key, e)) + return results + + @metered_subclass_method(METRICS_POOL) + def SubscribeState( + self, subscriptions : List[Tuple[str, float, float]] + ) -> List[Union[bool, Exception]]: + # TODO: does not support monitoring by now + return [False for _ in subscriptions] + + @metered_subclass_method(METRICS_POOL) + def UnsubscribeState( + self, subscriptions : List[Tuple[str, float, float]] + ) -> List[Union[bool, Exception]]: + # TODO: does not support monitoring by now + return [False for _ in subscriptions] + + def GetState( + self, blocking=False, terminate : Optional[threading.Event] = None + ) -> Iterator[Tuple[float, str, Any]]: + # TODO: does not support monitoring by now + return [] diff --git a/src/device/service/drivers/ietf_slice/Tools.py b/src/device/service/drivers/ietf_slice/old/Tools.py similarity index 99% rename from src/device/service/drivers/ietf_slice/Tools.py rename to src/device/service/drivers/ietf_slice/old/Tools.py index 9aed37fa4..270569b2b 100644 --- a/src/device/service/drivers/ietf_slice/Tools.py +++ b/src/device/service/drivers/ietf_slice/old/Tools.py @@ -20,7 +20,7 @@ from common.proto.kpi_sample_types_pb2 import KpiSampleType from common.type_checkers.Checkers import chk_attribute, chk_string, chk_type from device.service.driver_api._Driver import RESOURCE_ENDPOINTS -from .Constants import SPECIAL_RESOURCE_MAPPINGS +from ..Constants import SPECIAL_RESOURCE_MAPPINGS LOGGER = logging.getLogger(__name__) -- GitLab From e6a11302bd8ed5f68ed90b31515d260b9805b53e Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 5 Sep 2025 08:43:32 +0000 Subject: [PATCH 144/367] Device component - NCE: - Properly implemented set/delete - Code polishing --- src/device/service/drivers/__init__.py | 2 +- .../drivers/nce/{driver.py => NCEDriver.py} | 38 +++++++------------ 2 files changed, 14 insertions(+), 26 deletions(-) rename src/device/service/drivers/nce/{driver.py => NCEDriver.py} (87%) diff --git a/src/device/service/drivers/__init__.py b/src/device/service/drivers/__init__.py index ff1dd0050..bf422130a 100644 --- a/src/device/service/drivers/__init__.py +++ b/src/device/service/drivers/__init__.py @@ -109,7 +109,7 @@ DRIVERS.append( } ])) -from .nce.driver import NCEDriver # pylint: disable=wrong-import-position +from .nce.NCEDriver import NCEDriver # pylint: disable=wrong-import-position DRIVERS.append( (NCEDriver, [ { diff --git a/src/device/service/drivers/nce/driver.py b/src/device/service/drivers/nce/NCEDriver.py similarity index 87% rename from src/device/service/drivers/nce/driver.py rename to src/device/service/drivers/nce/NCEDriver.py index 009b373ca..b852aa5ef 100644 --- a/src/device/service/drivers/nce/driver.py +++ b/src/device/service/drivers/nce/NCEDriver.py @@ -40,8 +40,7 @@ ALL_RESOURCE_KEYS = [ LOGGER = logging.getLogger(__name__) -RE_NCE_APP_FLOW_DATA = re.compile(r'^\/service\[[^\]]+\]\/AppFlow$') -RE_NCE_APP_FLOW_OPERATION = re.compile(r'^\/service\[[^\]]+\]\/AppFlow\/operation$') +RE_NCE_APP_FLOW_DATA = re.compile(r'^\/service\[([^\]]+)\]\/AppFlow$') DRIVER_NAME = 'nce' @@ -172,15 +171,6 @@ class NCEDriver(_Driver): results = [] if len(resources) == 0: return results with self.__lock: - for resource in resources: - resource_key, resource_value = resource - LOGGER.debug('resource = {:s}'.format(str(resource))) - if RE_NCE_APP_FLOW_OPERATION.match(resource_key): - operation_type = json.loads(resource_value)['type'] - results.append((resource_key, True)) - break - else: - raise Exception('operation type not found in resources') for resource in resources: LOGGER.info('resource = {:s}'.format(str(resource))) resource_key, resource_value = resource @@ -188,18 +178,11 @@ class NCEDriver(_Driver): continue try: resource_value = json.loads(resource_value) - if operation_type == 'create': - self.nce.create_app_flow(resource_value) - elif operation_type == 'delete': - app_flow_name = resource_value['huawei-nce-app-flow:app-flows']['app-flow'][ - 0 - ]['app-name'] - self.nce.delete_app_flow(app_flow_name) + self.nce.create_app_flow(resource_value) results.append((resource_key, True)) except Exception as e: # pylint: disable=broad-except - LOGGER.exception( - 'Unhandled error processing resource_key({:s})'.format(str(resource_key)) - ) + MSG = 'Unhandled error processing SET resource_key({:s})' + LOGGER.exception(MSG.format(str(resource_key))) results.append((resource_key, e)) return results @@ -211,12 +194,17 @@ class NCEDriver(_Driver): for resource in resources: LOGGER.info('resource = {:s}'.format(str(resource))) resource_key, resource_value = resource + if not RE_NCE_APP_FLOW_DATA.match(resource_key): + continue try: + resource_value = json.loads(resource_value) + app_flows = resource_value['huawei-nce-app-flow:app-flows'] + app_flow_name = app_flows['app-flow'][0]['app-name'] + self.nce.delete_app_flow(app_flow_name) results.append((resource_key, True)) - except Exception as e: # pylint: disable=broad-except - LOGGER.exception( - 'Unhandled error processing resource_key({:s})'.format(str(resource_key)) - ) + except Exception as e: + MSG = 'Unhandled error processing DELETE resource_key({:s})' + LOGGER.exception(MSG.format(str(resource_key))) results.append((resource_key, e)) return results -- GitLab From e48528d9b1fc70b271a0298a75e4b573bfa8e345 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 5 Sep 2025 08:44:11 +0000 Subject: [PATCH 145/367] Service component - L3NM IETF Slice: - Bug fixes - Moved old Tools to old folder --- .../L3NM_IETFSlice_ServiceHandler.py | 2 +- .../service_handlers/l3nm_ietfslice/Tools.py | 32 ------------- .../l3nm_ietfslice/old/Tools.py | 47 +++++++++++++++++++ 3 files changed, 48 insertions(+), 33 deletions(-) create mode 100644 src/service/service/service_handlers/l3nm_ietfslice/old/Tools.py diff --git a/src/service/service/service_handlers/l3nm_ietfslice/L3NM_IETFSlice_ServiceHandler.py b/src/service/service/service_handlers/l3nm_ietfslice/L3NM_IETFSlice_ServiceHandler.py index 01c08188d..e597c40d2 100644 --- a/src/service/service/service_handlers/l3nm_ietfslice/L3NM_IETFSlice_ServiceHandler.py +++ b/src/service/service/service_handlers/l3nm_ietfslice/L3NM_IETFSlice_ServiceHandler.py @@ -155,7 +155,7 @@ class L3NM_IETFSlice_ServiceHandler(_ServiceHandler): del controller.device_config.config_rules[:] controller.device_config.config_rules.append(ConfigRule(**json_config_rule_set( - '/service[{:s}]/IETFSlice'.format(self.__service.name), slice_data_model + '/service[{:s}]/IETFSlice'.format(slice_name), slice_data_model ))) self.__task_executor.configure_device(controller) except Exception as e: diff --git a/src/service/service/service_handlers/l3nm_ietfslice/Tools.py b/src/service/service/service_handlers/l3nm_ietfslice/Tools.py index ce5c26e7a..3e06d0b77 100644 --- a/src/service/service/service_handlers/l3nm_ietfslice/Tools.py +++ b/src/service/service/service_handlers/l3nm_ietfslice/Tools.py @@ -13,8 +13,6 @@ # limitations under the License. -from dataclasses import dataclass -from typing import Dict, TypedDict from common.proto.context_pb2 import Device @@ -31,33 +29,3 @@ def get_device_endpoint_name(device_obj : Device, endpoint_uuid : str) -> str: device_name = str(device_obj.name) MSG = 'Device({:s},{:s})/Endpoint({:s}) not found' raise Exception(MSG.format(device_uuid, device_name, str(endpoint_uuid))) - - -@dataclass -class Ipv4Info: - src_prefix : str = '' - dst_prefix : str = '' - src_port : str = '' - dst_port : str = '' - vlan : str = '' - - -def extract_match_criterion_ipv4_info(match_criterion : Dict) -> Ipv4Info: - ipv4_info = Ipv4Info() - - for type_value in match_criterion['match-type']: - match_type = type_value['type'] - value = type_value['value'][0] - - if match_type == 'ietf-network-slice-service:source-ip-prefix': - ipv4_info.src_prefix = value - elif match_type == 'ietf-network-slice-service:destination-ip-prefix': - ipv4_info.dst_prefix = value - elif match_type == 'ietf-network-slice-service:source-tcp-port': - ipv4_info.src_port = value - elif match_type == 'ietf-network-slice-service:destination-tcp-port': - ipv4_info.dst_port = value - elif match_type == 'ietf-network-slice-service:vlan': - ipv4_info.vlan = value - - return ipv4_info diff --git a/src/service/service/service_handlers/l3nm_ietfslice/old/Tools.py b/src/service/service/service_handlers/l3nm_ietfslice/old/Tools.py new file mode 100644 index 000000000..7fd152cc4 --- /dev/null +++ b/src/service/service/service_handlers/l3nm_ietfslice/old/Tools.py @@ -0,0 +1,47 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from dataclasses import dataclass +from typing import Dict + + +@dataclass +class Ipv4Info: + src_prefix : str = '' + dst_prefix : str = '' + src_port : str = '' + dst_port : str = '' + vlan : str = '' + + +def extract_match_criterion_ipv4_info(match_criterion : Dict) -> Ipv4Info: + ipv4_info = Ipv4Info() + + for type_value in match_criterion['match-type']: + match_type = type_value['type'] + value = type_value['value'][0] + + if match_type == 'ietf-network-slice-service:source-ip-prefix': + ipv4_info.src_prefix = value + elif match_type == 'ietf-network-slice-service:destination-ip-prefix': + ipv4_info.dst_prefix = value + elif match_type == 'ietf-network-slice-service:source-tcp-port': + ipv4_info.src_port = value + elif match_type == 'ietf-network-slice-service:destination-tcp-port': + ipv4_info.dst_port = value + elif match_type == 'ietf-network-slice-service:vlan': + ipv4_info.vlan = value + + return ipv4_info -- GitLab From 558a9c9f6baf78f7aa7d9d8ccf38bb8a680c41f6 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 5 Sep 2025 08:45:04 +0000 Subject: [PATCH 146/367] Service component - L3NM NCE: - Renamed for consistency - Copied old code in old folder - Initial upgrades --- .../service/service_handlers/__init__.py | 4 +- .../l3nm_ncefan/DataStoreDelta.py | 48 ++ .../l3nm_ncefan/L3NM_NCEFAN_ServiceHandler.py | 559 ++++++++++++++++++ .../service_handlers/l3nm_ncefan/Tools.py | 31 + .../{l3nm_nce => l3nm_ncefan}/__init__.py | 0 .../old}/ConfigRules.py | 0 .../old}/L3NMNCEServiceHandler.py | 0 7 files changed, 640 insertions(+), 2 deletions(-) create mode 100644 src/service/service/service_handlers/l3nm_ncefan/DataStoreDelta.py create mode 100644 src/service/service/service_handlers/l3nm_ncefan/L3NM_NCEFAN_ServiceHandler.py create mode 100644 src/service/service/service_handlers/l3nm_ncefan/Tools.py rename src/service/service/service_handlers/{l3nm_nce => l3nm_ncefan}/__init__.py (100%) rename src/service/service/service_handlers/{l3nm_nce => l3nm_ncefan/old}/ConfigRules.py (100%) rename src/service/service/service_handlers/{l3nm_nce => l3nm_ncefan/old}/L3NMNCEServiceHandler.py (100%) diff --git a/src/service/service/service_handlers/__init__.py b/src/service/service/service_handlers/__init__.py index 3ce01861f..2b06a1c34 100644 --- a/src/service/service/service_handlers/__init__.py +++ b/src/service/service/service_handlers/__init__.py @@ -22,7 +22,7 @@ from .l3nm_gnmi_openconfig.L3NMGnmiOpenConfigServiceHandler import L3NMGnmiOpenC from .l3nm_ietfactn.L3NM_IETFACTN_ServiceHandler import L3NM_IETFACTN_ServiceHandler from .l3nm_ietfl3vpn.L3NM_IETFL3VPN_ServiceHandler import L3NM_IETFL3VPN_ServiceHandler from .l3nm_ietfslice.L3NM_IETFSlice_ServiceHandler import L3NM_IETFSlice_ServiceHandler -from .l3nm_nce.L3NMNCEServiceHandler import L3NMNCEServiceHandler +from .l3nm_ncefan.L3NM_NCEFAN_ServiceHandler import L3NM_NCEFAN_ServiceHandler from .l3nm_openconfig.L3NMOpenConfigServiceHandler import L3NMOpenConfigServiceHandler from .microwave.MicrowaveServiceHandler import MicrowaveServiceHandler from .p4_dummy_l1.p4_dummy_l1_service_handler import P4DummyL1ServiceHandler @@ -80,7 +80,7 @@ SERVICE_HANDLERS = [ FilterFieldEnum.DEVICE_DRIVER : DeviceDriverEnum.DEVICEDRIVER_IETF_L3VPN, } ]), - (L3NMNCEServiceHandler, [ + (L3NM_NCEFAN_ServiceHandler, [ { FilterFieldEnum.SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_L3NM, FilterFieldEnum.DEVICE_DRIVER : DeviceDriverEnum.DEVICEDRIVER_NCE, diff --git a/src/service/service/service_handlers/l3nm_ncefan/DataStoreDelta.py b/src/service/service/service_handlers/l3nm_ncefan/DataStoreDelta.py new file mode 100644 index 000000000..725de5c25 --- /dev/null +++ b/src/service/service/service_handlers/l3nm_ncefan/DataStoreDelta.py @@ -0,0 +1,48 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import json +from deepdiff import DeepDiff +from typing import Dict, Optional +from common.proto.context_pb2 import Service + + +RUNNING_RESOURCE_KEY = 'running_ietf_slice' +CANDIDATE_RESOURCE_KEY = 'candidate_ietf_slice' + + +class DataStoreDelta: + def __init__(self, service : Service): + self._service = service + self._service_config = service.service_config + self._candidate_data = self._get_datastore_data(CANDIDATE_RESOURCE_KEY) + self._running_data = self._get_datastore_data(RUNNING_RESOURCE_KEY ) + + def _get_datastore_data(self, resource_key : str) -> Optional[Dict]: + for cr in self._service_config.config_rules: + if cr.WhichOneof('config_rule') != 'custom': continue + if cr.custom.resource_key != resource_key: continue + resource_value = json.loads(cr.custom.resource_value) + return resource_value.get('network-slice-services', dict()).get('slice-service') + return None + + @property + def candidate_data(self): return self._candidate_data + + @property + def running_data(self): return self._running_data + + def get_diff(self) -> Dict: + return DeepDiff(self._running_data, self._candidate_data) diff --git a/src/service/service/service_handlers/l3nm_ncefan/L3NM_NCEFAN_ServiceHandler.py b/src/service/service/service_handlers/l3nm_ncefan/L3NM_NCEFAN_ServiceHandler.py new file mode 100644 index 000000000..231ea7d93 --- /dev/null +++ b/src/service/service/service_handlers/l3nm_ncefan/L3NM_NCEFAN_ServiceHandler.py @@ -0,0 +1,559 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging, re +from typing import Any, List, Optional, Tuple, Union, TypedDict, Dict +from uuid import uuid4 + +from deepdiff import DeepDiff + +from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method +from common.proto.context_pb2 import ConfigRule, DeviceId, Empty, Service, ServiceConfig +from common.tools.object_factory.Device import json_device_id +from common.type_checkers.Checkers import chk_type +from context.client.ContextClient import ContextClient +from service.service.service_handler_api._ServiceHandler import _ServiceHandler +from service.service.service_handler_api.SettingsHandler import SettingsHandler +from service.service.service_handler_api.Tools import ( + get_device_endpoint_uuids, +) +from service.service.task_scheduler.TaskExecutor import TaskExecutor + +from .ConfigRules import setup_config_rules, teardown_config_rules + +RUNNING_RESOURCE_KEY = "running_ietf_slice" +CANDIDATE_RESOURCE_KEY = "candidate_ietf_slice" + +LOGGER = logging.getLogger(__name__) + +METRICS_POOL = MetricsPool("Service", "Handler", labels={"handler": "l3nm_nce"}) + +SDP_DIFF_RE = re.compile( + r"^root\[\'network-slice-services\'\]\[\'slice-service\'\]\[0\]\[\'sdps\'\]\[\'sdp\'\]\[(\d)\]$" +) +CONNECTION_GROUP_DIFF_RE = re.compile( + r"^root\[\'network-slice-services\'\]\[\'slice-service\'\]\[0\]\[\'connection-groups\'\]\[\'connection-group\'\]\[(\d)\]$" +) +MATCH_CRITERION_DIFF_RE = re.compile( + r"^root\[\'network-slice-services\'\]\[\'slice-service\'\]\[0\]\[\'sdps\'\]\[\'sdp\'\]\[(\d)\]\[\'service-match-criteria\'\]\[\'match-criterion\'\]\[(\d)\]$" +) + + +class Ipv4Info(TypedDict): + src_ip: str + dst_ip: str + src_port: str + dst_port: str + + +def get_removed_items( + candidate_ietf_slice_dict: dict, running_ietf_slice_dict: dict +) -> dict: + """ + For the 'iterable_item_removed' scenario, returns dict with removed sdp / connection_group / match_criterion info. + Raises an exception if there's inconsistent data or multiple items removed (which is not supported). + """ + removed_items = { + "sdp": {"sdp_idx": None, "value": {}}, + "connection_group": {"connection_group_idx": None, "value": {}}, + "match_criterion": { + "sdp_idx": None, + "match_criterion_idx": None, + "value": {}, + }, + } + + running_slice_services = running_ietf_slice_dict["network-slice-services"][ + "slice-service" + ][0] + candidate_slice_services = candidate_ietf_slice_dict["network-slice-services"][ + "slice-service" + ][0] + + running_slice_sdps = [sdp["id"] for sdp in running_slice_services["sdps"]["sdp"]] + candidiate_slice_sdps = [ + sdp["id"] for sdp in candidate_slice_services["sdps"]["sdp"] + ] + removed_sdps = set(running_slice_sdps) - set(candidiate_slice_sdps) + + if len(removed_sdps) > 1: + raise Exception("Multiple SDPs removed - not supported.") + removed_sdp_id = removed_sdps.pop() + + removed_items["sdp"]["sdp_idx"] = running_slice_sdps.index(removed_sdp_id) + removed_items["sdp"]["value"] = next( + sdp + for sdp in running_slice_services["sdps"]["sdp"] + if sdp["id"] == removed_sdp_id + ) + + match_criteria = removed_items["sdp"]["value"]["service-match-criteria"][ + "match-criterion" + ] + if len(match_criteria) > 1: + raise Exception("Multiple match criteria found - not supported") + match_criterion = match_criteria[0] + connection_grp_id = match_criterion["target-connection-group-id"] + connection_groups = running_slice_services["connection-groups"]["connection-group"] + connection_group = next( + (idx, cg) + for idx, cg in enumerate(connection_groups) + if cg["id"] == connection_grp_id + ) + removed_items["connection_group"]["connection_group_idx"] = connection_group[0] + removed_items["connection_group"]["value"] = connection_group[1] + + for sdp in running_slice_services["sdps"]["sdp"]: + if sdp["id"] == removed_sdp_id: + continue + for mc in sdp["service-match-criteria"]["match-criterion"]: + if mc["target-connection-group-id"] == connection_grp_id: + removed_items["match_criterion"]["sdp_idx"] = running_slice_sdps.index( + sdp["id"] + ) + removed_items["match_criterion"]["match_criterion_idx"] = sdp[ + "service-match-criteria" + ]["match-criterion"].index(mc) + removed_items["match_criterion"]["value"] = mc + break + + if ( + removed_items["match_criterion"]["sdp_idx"] is None + or removed_items["sdp"]["sdp_idx"] is None + or removed_items["connection_group"]["connection_group_idx"] is None + ): + raise Exception("sdp, connection group or match criterion not found") + + return removed_items + + +def get_custom_config_rule( + service_config: ServiceConfig, resource_key: str +) -> Optional[ConfigRule]: + """ + Returns the ConfigRule from service_config matching the provided resource_key + if found, otherwise returns None. + """ + for cr in service_config.config_rules: + if ( + cr.WhichOneof("config_rule") == "custom" + and cr.custom.resource_key == resource_key + ): + return cr + return None + + +def get_running_candidate_ietf_slice_data_diff(service_config: ServiceConfig) -> Dict: + """ + Loads the JSON from the running/candidate resource ConfigRules and returns + their DeepDiff comparison. + """ + running_cr = get_custom_config_rule(service_config, RUNNING_RESOURCE_KEY) + candidate_cr = get_custom_config_rule(service_config, CANDIDATE_RESOURCE_KEY) + + running_value_dict = json.loads(running_cr.custom.resource_value) + candidate_value_dict = json.loads(candidate_cr.custom.resource_value) + + return DeepDiff(running_value_dict, candidate_value_dict) + + +def extract_qos_info( + connection_groups: List, connection_grp_id: str, src_sdp_idx: str, dst_sdp_idx: str +) -> Dict: + """ + Extract QoS information from connection groups based on the connection group ID. + """ + qos_info = { + "upstream": {"max_delay": "0", "bw": "0", "packet_loss": "0"}, + "downstream": {"max_delay": "0", "bw": "0", "packet_loss": "0"}, + } + connection_group = next( + (cg for cg in connection_groups if cg["id"] == connection_grp_id), None + ) + + if not connection_group: + return qos_info + + for cc in connection_group["connectivity-construct"]: + if ( + cc["p2p-sender-sdp"] == src_sdp_idx + and cc["p2p-receiver-sdp"] == dst_sdp_idx + ): + direction = "upstream" + elif ( + cc["p2p-sender-sdp"] == dst_sdp_idx + and cc["p2p-receiver-sdp"] == src_sdp_idx + ): + direction = "downstream" + else: + raise Exception("invalid sender and receiver sdp ids") + for metric_bound in cc["service-slo-sle-policy"]["slo-policy"]["metric-bound"]: + if ( + metric_bound["metric-type"] + == "ietf-network-slice-service:one-way-delay-maximum" + and metric_bound["metric-unit"] == "milliseconds" + ): + qos_info[direction]["max_delay"] = metric_bound["bound"] + elif ( + metric_bound["metric-type"] + == "ietf-network-slice-service:one-way-bandwidth" + and metric_bound["metric-unit"] == "Mbps" + ): + qos_info[direction]["bw"] = metric_bound["bound"] + elif ( + metric_bound["metric-type"] + == "ietf-network-slice-service:two-way-packet-loss" + and metric_bound["metric-unit"] == "percentage" + ): + qos_info[direction]["packet_loss"] = metric_bound["percentile-value"] + + return qos_info + + +def extract_match_criterion_ipv4_info(match_criterion: Dict) -> Ipv4Info: + """ + Extracts IPv4 info from the match criterion dictionary. + """ + src_ip = dst_ip = src_port = dst_port = "" + + for type_value in match_criterion["match-type"]: + m_type = type_value["type"] + val = type_value["value"][0] + if m_type == "ietf-network-slice-service:source-ip-prefix": + src_ip = val.split("/")[0] + elif m_type == "ietf-network-slice-service:destination-ip-prefix": + dst_ip = val.split("/")[0] + elif m_type == "ietf-network-slice-service:source-tcp-port": + src_port = val + elif m_type == "ietf-network-slice-service:destination-tcp-port": + dst_port = val + + return Ipv4Info( + src_ip=src_ip, + dst_ip=dst_ip, + src_port=src_port, + dst_port=dst_port, + ) + + +class L3NM_NCEFAN_ServiceHandler(_ServiceHandler): + def __init__( # pylint: disable=super-init-not-called + self, service: Service, task_executor: TaskExecutor, **settings + ) -> None: + self.__service = service + self.__task_executor = task_executor + self.__settings_handler = SettingsHandler(service.service_config, **settings) + + @metered_subclass_method(METRICS_POOL) + def SetEndpoint( + self, + endpoints: List[Tuple[str, str, Optional[str]]], + connection_uuid: Optional[str] = None, + ) -> List[Union[bool, Exception]]: + chk_type('endpoints', endpoints, list) + if len(endpoints) == 0: return [] + + results = [] + try: + context_client = ContextClient() + service_config = self.__service.service_config + settings = self.__settings_handler.get("/settings") + + src_device_uuid, src_endpoint_uuid = get_device_endpoint_uuids(endpoints[0]) + src_device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(src_device_uuid))) + controller = self.__task_executor.get_device_controller(src_device_obj) + + list_devices = context_client.ListDevices(Empty()) + devices = list_devices.devices + device_name_map = {d.name: d for d in devices} + + running_candidate_diff = get_running_candidate_ietf_slice_data_diff( + service_config + ) + candidate_ietf_slice_cr = get_custom_config_rule( + service_config, CANDIDATE_RESOURCE_KEY + ) + candidate_resource_value_dict = json.loads( + candidate_ietf_slice_cr.custom.resource_value + ) + running_ietf_slice_cr = get_custom_config_rule( + service_config, RUNNING_RESOURCE_KEY + ) + running_resource_value_dict = json.loads( + running_ietf_slice_cr.custom.resource_value + ) + + service_name = running_resource_value_dict["network-slice-services"][ + "slice-service" + ][0]["id"] + + if not running_candidate_diff: # Slice Creation + operation_type = "create" + + slice_service = candidate_resource_value_dict["network-slice-services"][ + "slice-service" + ][0] + sdps = slice_service["sdps"]["sdp"] + connection_groups = slice_service["connection-groups"][ + "connection-group" + ] + sdp_ids = [sdp["id"] for sdp in sdps] + for sdp in sdps: + node_id = sdp["node-id"] + device_obj = device_name_map[node_id] + device_controller = self.__task_executor.get_device_controller( + device_obj + ) + if ( + device_controller is None + or controller.name != device_controller.name + ): + continue + src_sdp_idx = sdp_ids.pop(sdp_ids.index(sdp["id"])) + dst_sdp_idx = sdp_ids[0] + match_criteria = sdp["service-match-criteria"]["match-criterion"] + match_criterion = match_criteria[0] + connection_grp_id = match_criterion["target-connection-group-id"] + break + else: + raise Exception("connection group id not found") + elif "iterable_item_added" in running_candidate_diff: # new SDP added + operation_type = "create" + + slice_service = candidate_resource_value_dict["network-slice-services"][ + "slice-service" + ][0] + sdps = slice_service["sdps"]["sdp"] + connection_groups = slice_service["connection-groups"][ + "connection-group" + ] + added_items = { + "sdp": {"sdp_idx": None, "value": {}}, + "connection_group": {"connection_group_idx": None, "value": {}}, + "match_criterion": { + "sdp_idx": None, + "match_criterion_idx": None, + "value": {}, + }, + } + for added_key, added_value in running_candidate_diff[ + "iterable_item_added" + ].items(): + sdp_match = SDP_DIFF_RE.match(added_key) + connection_group_match = CONNECTION_GROUP_DIFF_RE.match(added_key) + match_criterion_match = MATCH_CRITERION_DIFF_RE.match(added_key) + if sdp_match: + added_items["sdp"] = { + "sdp_idx": int(sdp_match.groups()[0]), + "value": added_value, + } + elif connection_group_match: + added_items["connection_group"] = { + "connection_group_idx": int( + connection_group_match.groups()[0] + ), + "value": added_value, + } + elif match_criterion_match: + added_items["match_criterion"] = { + "sdp_idx": int(match_criterion_match.groups()[0]), + "match_criterion_idx": int( + match_criterion_match.groups()[1] + ), + "value": added_value, + } + new_sdp = sdps[added_items["sdp"]["sdp_idx"]] + src_sdp_idx = new_sdp["id"] + dst_sdp_idx = sdps[added_items["match_criterion"]["sdp_idx"]]["id"] + connection_grp_id = connection_groups[ + added_items["connection_group"]["connection_group_idx"] + ]["id"] + + if ( + connection_grp_id + != added_items["match_criterion"]["value"][ + "target-connection-group-id" + ] + ): + raise Exception( + "connection group missmatch in destination sdp and added connection group" + ) + match_criteria = new_sdp["service-match-criteria"]["match-criterion"] + match_criterion = match_criteria[0] + elif "iterable_item_removed" in running_candidate_diff: # new SDP added + operation_type = "delete" + + slice_service = running_resource_value_dict["network-slice-services"][ + "slice-service" + ][0] + sdps = slice_service["sdps"]["sdp"] + connection_groups = slice_service["connection-groups"][ + "connection-group" + ] + removed_items = get_removed_items( + candidate_resource_value_dict, running_resource_value_dict + ) + removed_sdp = sdps[removed_items["sdp"]["sdp_idx"]] + src_sdp_idx = removed_sdp["id"] + dst_sdp_idx = sdps[removed_items["match_criterion"]["sdp_idx"]]["id"] + connection_grp_id = connection_groups[ + removed_items["connection_group"]["connection_group_idx"] + ]["id"] + + if ( + connection_grp_id + != removed_items["match_criterion"]["value"][ + "target-connection-group-id" + ] + ): + raise Exception( + "connection group missmatch in destination sdp and added connection group" + ) + match_criteria = removed_sdp["service-match-criteria"][ + "match-criterion" + ] + match_criterion = match_criteria[0] + else: + raise Exception( + "transition from candidate to running info not supported" + ) + + ip_info = extract_match_criterion_ipv4_info(match_criterion) + + qos_info = extract_qos_info( + connection_groups, connection_grp_id, src_sdp_idx, dst_sdp_idx + ) + + resource_value_dict = { + "uuid": service_name, + "operation_type": operation_type, + "app_flow_id": f"{src_sdp_idx}_{dst_sdp_idx}_{service_name}", + "app_flow_user_id": str(uuid4()), + "max_latency": int(qos_info["upstream"]["max_delay"]), + "max_jitter": 10, + "max_loss": float(qos_info["upstream"]["packet_loss"]), + "upstream_assure_bw": int(qos_info["upstream"]["bw"]) * 1e6, + "upstream_max_bw": 2 * int(qos_info["upstream"]["bw"]) * 1e6, + "downstream_assure_bw": int(qos_info["downstream"]["bw"]) * 1e6, + "downstream_max_bw": 2 * int(qos_info["downstream"]["bw"]) * 1e6, + "src_ip": ip_info["src_ip"], + "src_port": ip_info["src_port"], + "dst_ip": ip_info["dst_ip"], + "dst_port": ip_info["dst_port"], + } + json_config_rules = setup_config_rules(service_name, resource_value_dict) + + del controller.device_config.config_rules[:] + for jcr in json_config_rules: + controller.device_config.config_rules.append(ConfigRule(**jcr)) + + self.__task_executor.configure_device(controller) + LOGGER.debug('Configured device "{:s}"'.format(controller.name)) + + except Exception as e: # pylint: disable=broad-except + results.append(e) + + return results + + @metered_subclass_method(METRICS_POOL) + def DeleteEndpoint( + self, + endpoints: List[Tuple[str, str, Optional[str]]], + connection_uuid: Optional[str] = None, + ) -> List[Union[bool, Exception]]: + chk_type("endpoints", endpoints, list) + if len(endpoints) == 0: + return [] + service_uuid = self.__service.service_id.service_uuid.uuid + results = [] + try: + src_device_uuid, src_endpoint_uuid = get_device_endpoint_uuids(endpoints[0]) + src_device_obj = self.__task_executor.get_device( + DeviceId(**json_device_id(src_device_uuid)) + ) + controller = self.__task_executor.get_device_controller(src_device_obj) + json_config_rules = teardown_config_rules(service_uuid, {}) + if len(json_config_rules) > 0: + del controller.device_config.config_rules[:] + for json_config_rule in json_config_rules: + controller.device_config.config_rules.append( + ConfigRule(**json_config_rule) + ) + self.__task_executor.configure_device(controller) + results.append(True) + except Exception as e: # pylint: disable=broad-except + results.append(e) + return results + + @metered_subclass_method(METRICS_POOL) + def SetConstraint( + self, constraints: List[Tuple[str, Any]] + ) -> List[Union[bool, Exception]]: + chk_type("constraints", constraints, list) + if len(constraints) == 0: + return [] + + msg = "[SetConstraint] Method not implemented. Constraints({:s}) are being ignored." + LOGGER.warning(msg.format(str(constraints))) + return [True for _ in range(len(constraints))] + + @metered_subclass_method(METRICS_POOL) + def DeleteConstraint( + self, constraints: List[Tuple[str, Any]] + ) -> List[Union[bool, Exception]]: + chk_type("constraints", constraints, list) + if len(constraints) == 0: + return [] + + msg = "[DeleteConstraint] Method not implemented. Constraints({:s}) are being ignored." + LOGGER.warning(msg.format(str(constraints))) + return [True for _ in range(len(constraints))] + + @metered_subclass_method(METRICS_POOL) + def SetConfig( + self, resources: List[Tuple[str, Any]] + ) -> List[Union[bool, Exception]]: + chk_type("resources", resources, list) + if len(resources) == 0: + return [] + + results = [] + for resource in resources: + try: + resource_value = json.loads(resource[1]) + self.__settings_handler.set(resource[0], resource_value) + results.append(True) + except Exception as e: # pylint: disable=broad-except + LOGGER.exception("Unable to SetConfig({:s})".format(str(resource))) + results.append(e) + + return results + + @metered_subclass_method(METRICS_POOL) + def DeleteConfig( + self, resources: List[Tuple[str, Any]] + ) -> List[Union[bool, Exception]]: + chk_type("resources", resources, list) + if len(resources) == 0: + return [] + + results = [] + for resource in resources: + try: + self.__settings_handler.delete(resource[0]) + except Exception as e: # pylint: disable=broad-except + LOGGER.exception("Unable to DeleteConfig({:s})".format(str(resource))) + results.append(e) + + return results diff --git a/src/service/service/service_handlers/l3nm_ncefan/Tools.py b/src/service/service/service_handlers/l3nm_ncefan/Tools.py new file mode 100644 index 000000000..3e06d0b77 --- /dev/null +++ b/src/service/service/service_handlers/l3nm_ncefan/Tools.py @@ -0,0 +1,31 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from common.proto.context_pb2 import Device + + +def get_device_endpoint_name(device_obj : Device, endpoint_uuid : str) -> str: + ''' + Given a device object and an endpoint UUID, return the device endpoint name. + Raises an exception if not found. + ''' + for d_ep in device_obj.device_endpoints: + if d_ep.endpoint_id.endpoint_uuid.uuid == endpoint_uuid: + return d_ep.name + + device_uuid = str(device_obj.device_id.device_uuid.uuid) + device_name = str(device_obj.name) + MSG = 'Device({:s},{:s})/Endpoint({:s}) not found' + raise Exception(MSG.format(device_uuid, device_name, str(endpoint_uuid))) diff --git a/src/service/service/service_handlers/l3nm_nce/__init__.py b/src/service/service/service_handlers/l3nm_ncefan/__init__.py similarity index 100% rename from src/service/service/service_handlers/l3nm_nce/__init__.py rename to src/service/service/service_handlers/l3nm_ncefan/__init__.py diff --git a/src/service/service/service_handlers/l3nm_nce/ConfigRules.py b/src/service/service/service_handlers/l3nm_ncefan/old/ConfigRules.py similarity index 100% rename from src/service/service/service_handlers/l3nm_nce/ConfigRules.py rename to src/service/service/service_handlers/l3nm_ncefan/old/ConfigRules.py diff --git a/src/service/service/service_handlers/l3nm_nce/L3NMNCEServiceHandler.py b/src/service/service/service_handlers/l3nm_ncefan/old/L3NMNCEServiceHandler.py similarity index 100% rename from src/service/service/service_handlers/l3nm_nce/L3NMNCEServiceHandler.py rename to src/service/service/service_handlers/l3nm_ncefan/old/L3NMNCEServiceHandler.py -- GitLab From 6be8ffdce861cc40bc1226fef3b941875586dd3f Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 5 Sep 2025 08:52:58 +0000 Subject: [PATCH 147/367] Service component - L3NM NCE: - Bug fix --- .../service_handlers/l3nm_ncefan/{old => }/ConfigRules.py | 0 .../service_handlers/l3nm_ncefan/old/L3NMNCEServiceHandler.py | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename src/service/service/service_handlers/l3nm_ncefan/{old => }/ConfigRules.py (100%) diff --git a/src/service/service/service_handlers/l3nm_ncefan/old/ConfigRules.py b/src/service/service/service_handlers/l3nm_ncefan/ConfigRules.py similarity index 100% rename from src/service/service/service_handlers/l3nm_ncefan/old/ConfigRules.py rename to src/service/service/service_handlers/l3nm_ncefan/ConfigRules.py diff --git a/src/service/service/service_handlers/l3nm_ncefan/old/L3NMNCEServiceHandler.py b/src/service/service/service_handlers/l3nm_ncefan/old/L3NMNCEServiceHandler.py index 1317bd061..9af1dfbba 100644 --- a/src/service/service/service_handlers/l3nm_ncefan/old/L3NMNCEServiceHandler.py +++ b/src/service/service/service_handlers/l3nm_ncefan/old/L3NMNCEServiceHandler.py @@ -32,7 +32,7 @@ from service.service.service_handler_api.Tools import ( ) from service.service.task_scheduler.TaskExecutor import TaskExecutor -from .ConfigRules import setup_config_rules, teardown_config_rules +from ..ConfigRules import setup_config_rules, teardown_config_rules RUNNING_RESOURCE_KEY = "running_ietf_slice" CANDIDATE_RESOURCE_KEY = "candidate_ietf_slice" -- GitLab From 4d733960151dfa76c1a101f7520c4074e2f5d029 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 5 Sep 2025 09:34:46 +0000 Subject: [PATCH 148/367] Service component - L3NM NCE: - Removed unused Settings Handler - Added log messages --- .../l3nm_ncefan/L3NM_NCEFAN_ServiceHandler.py | 70 +++++++------------ 1 file changed, 25 insertions(+), 45 deletions(-) diff --git a/src/service/service/service_handlers/l3nm_ncefan/L3NM_NCEFAN_ServiceHandler.py b/src/service/service/service_handlers/l3nm_ncefan/L3NM_NCEFAN_ServiceHandler.py index 231ea7d93..cc9f1eafc 100644 --- a/src/service/service/service_handlers/l3nm_ncefan/L3NM_NCEFAN_ServiceHandler.py +++ b/src/service/service/service_handlers/l3nm_ncefan/L3NM_NCEFAN_ServiceHandler.py @@ -20,11 +20,11 @@ from deepdiff import DeepDiff from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method from common.proto.context_pb2 import ConfigRule, DeviceId, Empty, Service, ServiceConfig +from common.tools.grpc.Tools import grpc_message_to_json_string from common.tools.object_factory.Device import json_device_id from common.type_checkers.Checkers import chk_type from context.client.ContextClient import ContextClient from service.service.service_handler_api._ServiceHandler import _ServiceHandler -from service.service.service_handler_api.SettingsHandler import SettingsHandler from service.service.service_handler_api.Tools import ( get_device_endpoint_uuids, ) @@ -253,7 +253,6 @@ class L3NM_NCEFAN_ServiceHandler(_ServiceHandler): ) -> None: self.__service = service self.__task_executor = task_executor - self.__settings_handler = SettingsHandler(service.service_config, **settings) @metered_subclass_method(METRICS_POOL) def SetEndpoint( @@ -261,6 +260,10 @@ class L3NM_NCEFAN_ServiceHandler(_ServiceHandler): endpoints: List[Tuple[str, str, Optional[str]]], connection_uuid: Optional[str] = None, ) -> List[Union[bool, Exception]]: + LOGGER.debug('[SetEndpoint] service={:s}'.format(grpc_message_to_json_string(self.__service))) + LOGGER.debug('[SetEndpoint] endpoints={:s}'.format(str(endpoints))) + LOGGER.debug('[SetEndpoint] connection_uuid={:s}'.format(str(connection_uuid))) + chk_type('endpoints', endpoints, list) if len(endpoints) == 0: return [] @@ -268,7 +271,6 @@ class L3NM_NCEFAN_ServiceHandler(_ServiceHandler): try: context_client = ContextClient() service_config = self.__service.service_config - settings = self.__settings_handler.get("/settings") src_device_uuid, src_endpoint_uuid = get_device_endpoint_uuids(endpoints[0]) src_device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(src_device_uuid))) @@ -500,60 +502,38 @@ class L3NM_NCEFAN_ServiceHandler(_ServiceHandler): def SetConstraint( self, constraints: List[Tuple[str, Any]] ) -> List[Union[bool, Exception]]: - chk_type("constraints", constraints, list) - if len(constraints) == 0: - return [] - - msg = "[SetConstraint] Method not implemented. Constraints({:s}) are being ignored." - LOGGER.warning(msg.format(str(constraints))) - return [True for _ in range(len(constraints))] + chk_type('constraints', constraints, list) + if len(constraints) == 0: return [] + MSG = '[SetConstraint] Method not implemented. Constraints({:s}) are being ignored.' + LOGGER.warning(MSG.format(str(constraints))) + return [True for _ in constraints] @metered_subclass_method(METRICS_POOL) def DeleteConstraint( self, constraints: List[Tuple[str, Any]] ) -> List[Union[bool, Exception]]: - chk_type("constraints", constraints, list) - if len(constraints) == 0: - return [] - - msg = "[DeleteConstraint] Method not implemented. Constraints({:s}) are being ignored." - LOGGER.warning(msg.format(str(constraints))) - return [True for _ in range(len(constraints))] + chk_type('constraints', constraints, list) + if len(constraints) == 0: return [] + MSG = '[DeleteConstraint] Method not implemented. Constraints({:s}) are being ignored.' + LOGGER.warning(MSG.format(str(constraints))) + return [True for _ in constraints] @metered_subclass_method(METRICS_POOL) def SetConfig( self, resources: List[Tuple[str, Any]] ) -> List[Union[bool, Exception]]: - chk_type("resources", resources, list) - if len(resources) == 0: - return [] - - results = [] - for resource in resources: - try: - resource_value = json.loads(resource[1]) - self.__settings_handler.set(resource[0], resource_value) - results.append(True) - except Exception as e: # pylint: disable=broad-except - LOGGER.exception("Unable to SetConfig({:s})".format(str(resource))) - results.append(e) - - return results + chk_type('resources', resources, list) + if len(resources) == 0: return [] + MSG = '[SetConfig] Method not implemented. Resources({:s}) are being ignored.' + LOGGER.warning(MSG.format(str(resources))) + return [True for _ in resources] @metered_subclass_method(METRICS_POOL) def DeleteConfig( self, resources: List[Tuple[str, Any]] ) -> List[Union[bool, Exception]]: - chk_type("resources", resources, list) - if len(resources) == 0: - return [] - - results = [] - for resource in resources: - try: - self.__settings_handler.delete(resource[0]) - except Exception as e: # pylint: disable=broad-except - LOGGER.exception("Unable to DeleteConfig({:s})".format(str(resource))) - results.append(e) - - return results + chk_type('resources', resources, list) + if len(resources) == 0: return [] + MSG = '[DeleteConfig] Method not implemented. Resources({:s}) are being ignored.' + LOGGER.warning(MSG.format(str(resources))) + return [True for _ in resources] -- GitLab From 289dca4e19e7785088fe9a1fa5f2c4de23bbd177 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 5 Sep 2025 09:35:33 +0000 Subject: [PATCH 149/367] Service component - L3NM IETF Slice: - Removed unused Settings Handler - Added log messages --- .../L3NM_IETFSlice_ServiceHandler.py | 41 ++++++++----------- 1 file changed, 16 insertions(+), 25 deletions(-) diff --git a/src/service/service/service_handlers/l3nm_ietfslice/L3NM_IETFSlice_ServiceHandler.py b/src/service/service/service_handlers/l3nm_ietfslice/L3NM_IETFSlice_ServiceHandler.py index e597c40d2..143eb37d2 100644 --- a/src/service/service/service_handlers/l3nm_ietfslice/L3NM_IETFSlice_ServiceHandler.py +++ b/src/service/service/service_handlers/l3nm_ietfslice/L3NM_IETFSlice_ServiceHandler.py @@ -13,15 +13,15 @@ # limitations under the License. -import ipaddress, json, logging +import ipaddress, logging from typing import Any, List, Optional, Tuple, Union from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method from common.proto.context_pb2 import ConfigRule, DeviceId, Service +from common.tools.grpc.Tools import grpc_message_to_json_string from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set from common.tools.object_factory.Device import json_device_id from common.type_checkers.Checkers import chk_type from service.service.service_handler_api._ServiceHandler import _ServiceHandler -from service.service.service_handler_api.SettingsHandler import SettingsHandler from service.service.service_handler_api.Tools import get_device_endpoint_uuids from service.service.task_scheduler.TaskExecutor import TaskExecutor from .DataStoreDelta import DataStoreDelta @@ -40,7 +40,6 @@ class L3NM_IETFSlice_ServiceHandler(_ServiceHandler): ) -> None: self.__service = service self.__task_executor = task_executor - self.__settings_handler = SettingsHandler(service.service_config, **settings) @metered_subclass_method(METRICS_POOL) def SetEndpoint( @@ -48,6 +47,10 @@ class L3NM_IETFSlice_ServiceHandler(_ServiceHandler): endpoints: List[Tuple[str, str, Optional[str]]], connection_uuid: Optional[str] = None, ) -> List[Union[bool, Exception]]: + LOGGER.debug('[SetEndpoint] service={:s}'.format(grpc_message_to_json_string(self.__service))) + LOGGER.debug('[SetEndpoint] endpoints={:s}'.format(str(endpoints))) + LOGGER.debug('[SetEndpoint] connection_uuid={:s}'.format(str(connection_uuid))) + chk_type('endpoints', endpoints, list) if len(endpoints) == 0: return [] @@ -197,10 +200,9 @@ class L3NM_IETFSlice_ServiceHandler(_ServiceHandler): ) -> List[Union[bool, Exception]]: chk_type('constraints', constraints, list) if len(constraints) == 0: return [] - MSG = '[SetConstraint] Method not implemented. Constraints({:s}) are being ignored.' LOGGER.warning(MSG.format(str(constraints))) - return [True for _ in range(len(constraints))] + return [True for _ in constraints] @metered_subclass_method(METRICS_POOL) def DeleteConstraint( @@ -208,37 +210,26 @@ class L3NM_IETFSlice_ServiceHandler(_ServiceHandler): ) -> List[Union[bool, Exception]]: chk_type('constraints', constraints, list) if len(constraints) == 0: return [] - MSG = '[DeleteConstraint] Method not implemented. Constraints({:s}) are being ignored.' LOGGER.warning(MSG.format(str(constraints))) - return [True for _ in range(len(constraints))] + return [True for _ in constraints] @metered_subclass_method(METRICS_POOL) def SetConfig( self, resources: List[Tuple[str, Any]] ) -> List[Union[bool, Exception]]: chk_type('resources', resources, list) - results = [] - for resource in resources: - try: - resource_value = json.loads(resource[1]) - self.__settings_handler.set(resource[0], resource_value) - results.append(True) - except Exception as e: - LOGGER.exception('Unable to SetConfig({:s})'.format(str(resource))) - results.append(e) - return results + if len(resources) == 0: return [] + MSG = '[SetConfig] Method not implemented. Resources({:s}) are being ignored.' + LOGGER.warning(MSG.format(str(resources))) + return [True for _ in resources] @metered_subclass_method(METRICS_POOL) def DeleteConfig( self, resources: List[Tuple[str, Any]] ) -> List[Union[bool, Exception]]: chk_type('resources', resources, list) - results = [] - for resource in resources: - try: - self.__settings_handler.delete(resource[0]) - except Exception as e: - LOGGER.exception('Unable to DeleteConfig({:s})'.format(str(resource))) - results.append(e) - return results + if len(resources) == 0: return [] + MSG = '[DeleteConfig] Method not implemented. Resources({:s}) are being ignored.' + LOGGER.warning(MSG.format(str(resources))) + return [True for _ in resources] -- GitLab From 01774cd749e5b6e08ff1a446b4be61e180a76558 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 5 Sep 2025 09:36:46 +0000 Subject: [PATCH 150/367] Service component - L3NM IETF L3VPN: - Copied old code in old folder - Generalized logic to select edge SDN controlled nodes - Removed unused Settings Handler - Added log messages --- .../L3NM_IETFL3VPN_ServiceHandler.py | 78 ++- .../l3nm_ietfl3vpn/old/ConfigRules.py | 316 +++++++++++ .../old/L3NM_IETFL3VPN_ServiceHandler.py | 524 ++++++++++++++++++ 3 files changed, 872 insertions(+), 46 deletions(-) create mode 100644 src/service/service/service_handlers/l3nm_ietfl3vpn/old/ConfigRules.py create mode 100644 src/service/service/service_handlers/l3nm_ietfl3vpn/old/L3NM_IETFL3VPN_ServiceHandler.py diff --git a/src/service/service/service_handlers/l3nm_ietfl3vpn/L3NM_IETFL3VPN_ServiceHandler.py b/src/service/service/service_handlers/l3nm_ietfl3vpn/L3NM_IETFL3VPN_ServiceHandler.py index 3153a7c05..7c6522ee2 100644 --- a/src/service/service/service_handlers/l3nm_ietfl3vpn/L3NM_IETFL3VPN_ServiceHandler.py +++ b/src/service/service/service_handlers/l3nm_ietfl3vpn/L3NM_IETFL3VPN_ServiceHandler.py @@ -31,7 +31,6 @@ from common.tools.grpc.Tools import grpc_message_to_json_string from common.tools.object_factory.Device import json_device_id from common.type_checkers.Checkers import chk_type from service.service.service_handler_api._ServiceHandler import _ServiceHandler -from service.service.service_handler_api.SettingsHandler import SettingsHandler from service.service.service_handler_api.Tools import ( get_device_endpoint_uuids, get_endpoint_matching, @@ -195,13 +194,18 @@ def get_endpoint_settings(device_obj: Device, endpoint_name: str) -> dict: raise ValueError(f"Endpoint settings not found for endpoint {endpoint_name}") +PACKET_SDN_CONTROLLERS = { + DeviceTypeEnum.IP_SDN_CONTROLLER.value, + DeviceTypeEnum.EMULATED_IP_SDN_CONTROLLER.value, + DeviceTypeEnum.TERAFLOWSDN_CONTROLLER.value, +} + class L3NM_IETFL3VPN_ServiceHandler(_ServiceHandler): def __init__( # pylint: disable=super-init-not-called self, service: Service, task_executor: TaskExecutor, **settings ) -> None: self.__service = service self.__task_executor = task_executor - self.__settings_handler = SettingsHandler(service.service_config, **settings) def __find_IP_transport_edge_endpoints( self, endpoints @@ -219,7 +223,7 @@ class L3NM_IETFL3VPN_ServiceHandler(_ServiceHandler): DeviceId(**json_device_id(device_uuid)) ) device_controller = self.__task_executor.get_device_controller(device_obj) - if device_controller.device_type == DeviceTypeEnum.IP_SDN_CONTROLLER.value: + if device_controller.device_type in PACKET_SDN_CONTROLLERS: src_device_uuid, src_endpoint_uuid = device_uuid, endpoint_uuid src_device_controller = device_controller break @@ -233,7 +237,7 @@ class L3NM_IETFL3VPN_ServiceHandler(_ServiceHandler): DeviceId(**json_device_id(device_uuid)) ) device_controller = self.__task_executor.get_device_controller(device_obj) - if device_controller.device_type == DeviceTypeEnum.IP_SDN_CONTROLLER.value: + if device_controller.device_type in PACKET_SDN_CONTROLLERS: dst_device_uuid, dst_endpoint_uuid = device_uuid, endpoint_uuid dst_device_controller = device_controller break @@ -321,6 +325,10 @@ class L3NM_IETFL3VPN_ServiceHandler(_ServiceHandler): endpoints: List[Tuple[str, str, Optional[str]]], connection_uuid: Optional[str] = None, ) -> List[Union[bool, Exception]]: + LOGGER.debug('[SetEndpoint] service={:s}'.format(grpc_message_to_json_string(self.__service))) + LOGGER.debug('[SetEndpoint] endpoints={:s}'.format(str(endpoints))) + LOGGER.debug('[SetEndpoint] connection_uuid={:s}'.format(str(connection_uuid))) + chk_type("endpoints", endpoints, list) if len(endpoints) < 2: return [] @@ -489,60 +497,38 @@ class L3NM_IETFL3VPN_ServiceHandler(_ServiceHandler): def SetConstraint( self, constraints: List[Tuple[str, Any]] ) -> List[Union[bool, Exception]]: - chk_type("constraints", constraints, list) - if len(constraints) == 0: - return [] - - msg = "[SetConstraint] Method not implemented. Constraints({:s}) are being ignored." - LOGGER.warning(msg.format(str(constraints))) - return [True for _ in range(len(constraints))] + chk_type('constraints', constraints, list) + if len(constraints) == 0: return [] + MSG = '[SetConstraint] Method not implemented. Constraints({:s}) are being ignored.' + LOGGER.warning(MSG.format(str(constraints))) + return [True for _ in constraints] @metered_subclass_method(METRICS_POOL) def DeleteConstraint( self, constraints: List[Tuple[str, Any]] ) -> List[Union[bool, Exception]]: - chk_type("constraints", constraints, list) - if len(constraints) == 0: - return [] - - msg = "[DeleteConstraint] Method not implemented. Constraints({:s}) are being ignored." - LOGGER.warning(msg.format(str(constraints))) - return [True for _ in range(len(constraints))] + chk_type('constraints', constraints, list) + if len(constraints) == 0: return [] + MSG = '[DeleteConstraint] Method not implemented. Constraints({:s}) are being ignored.' + LOGGER.warning(MSG.format(str(constraints))) + return [True for _ in constraints] @metered_subclass_method(METRICS_POOL) def SetConfig( self, resources: List[Tuple[str, Any]] ) -> List[Union[bool, Exception]]: - chk_type("resources", resources, list) - if len(resources) == 0: - return [] - - results = [] - for resource in resources: - try: - resource_value = json.loads(resource[1]) - self.__settings_handler.set(resource[0], resource_value) - results.append(True) - except Exception as e: # pylint: disable=broad-except - LOGGER.exception("Unable to SetConfig({:s})".format(str(resource))) - results.append(e) - - return results + chk_type('resources', resources, list) + if len(resources) == 0: return [] + MSG = '[SetConfig] Method not implemented. Resources({:s}) are being ignored.' + LOGGER.warning(MSG.format(str(resources))) + return [True for _ in resources] @metered_subclass_method(METRICS_POOL) def DeleteConfig( self, resources: List[Tuple[str, Any]] ) -> List[Union[bool, Exception]]: - chk_type("resources", resources, list) - if len(resources) == 0: - return [] - - results = [] - for resource in resources: - try: - self.__settings_handler.delete(resource[0]) - except Exception as e: # pylint: disable=broad-except - LOGGER.exception("Unable to DeleteConfig({:s})".format(str(resource))) - results.append(e) - - return results + chk_type('resources', resources, list) + if len(resources) == 0: return [] + MSG = '[DeleteConfig] Method not implemented. Resources({:s}) are being ignored.' + LOGGER.warning(MSG.format(str(resources))) + return [True for _ in resources] diff --git a/src/service/service/service_handlers/l3nm_ietfl3vpn/old/ConfigRules.py b/src/service/service/service_handlers/l3nm_ietfl3vpn/old/ConfigRules.py new file mode 100644 index 000000000..c5638fc10 --- /dev/null +++ b/src/service/service/service_handlers/l3nm_ietfl3vpn/old/ConfigRules.py @@ -0,0 +1,316 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict, List, Tuple, TypedDict + +from common.proto.context_pb2 import Link +from common.tools.object_factory.ConfigRule import ( + json_config_rule_delete, + json_config_rule_set, +) +from context.client.ContextClient import ContextClient + + +class LANPrefixesDict(TypedDict): + lan: str + lan_tag: str + + +SITE_NETWORK_ACCESS_TYPE = "ietf-l3vpn-svc:multipoint" + + +def create_site_dict( + site_id: str, + site_location: str, + device_uuid: str, + endpoint_uuid: str, + service_uuid: str, + role: str, + management_type: str, + ce_address: str, + pe_address: str, + ce_pe_network_prefix: int, + mtu: int, + input_bw: int, + output_bw: int, + qos_profile_id: str, + qos_profile_direction: str, + qos_profile_latency: int, + qos_profile_bw_guarantee: int, + lan_prefixes: List[LANPrefixesDict], +) -> Dict: + """ + Helper function that creates a dictionary representing a single 'site' + entry (including management, locations, devices, routing-protocols, and + site-network-accesses). + """ + site_lan_prefixes = [ + { + "lan": lp["lan"], + "lan-tag": lp["lan_tag"], + "next-hop": ce_address, + } + for lp in lan_prefixes + ] + + return { + "site-id": site_id, + "management": {"type": management_type}, + "locations": {"location": [{"location-id": site_location}]}, + "devices": { + "device": [ + { + "device-id": device_uuid, + "location": site_location, + } + ] + }, + "routing-protocols": { + "routing-protocol": [ + { + "type": "ietf-l3vpn-svc:static", + "static": { + "cascaded-lan-prefixes": { + "ipv4-lan-prefixes": site_lan_prefixes + } + }, + } + ] + }, + "site-network-accesses": { + "site-network-access": [ + { + "site-network-access-id": endpoint_uuid, + "site-network-access-type": SITE_NETWORK_ACCESS_TYPE, + "device-reference": device_uuid, + "vpn-attachment": { + "vpn-id": service_uuid, + "site-role": role, + }, + "ip-connection": { + "ipv4": { + "address-allocation-type": "ietf-l3vpn-svc:static-address", + "addresses": { + "provider-address": pe_address, + "customer-address": ce_address, + "prefix-length": ce_pe_network_prefix, + }, + } + }, + "service": { + "svc-mtu": mtu, + "svc-input-bandwidth": input_bw, + "svc-output-bandwidth": output_bw, + "qos": { + "qos-profile": { + "classes": { + "class": [ + { + "class-id": qos_profile_id, + "direction": qos_profile_direction, + "latency": { + "latency-boundary": qos_profile_latency + }, + "bandwidth": { + "guaranteed-bw-percent": qos_profile_bw_guarantee + }, + } + ] + } + } + }, + }, + } + ] + }, + } + + +def setup_config_rules( + service_uuid: str, json_settings: Dict, operation_type: str +) -> List[Dict]: + # --- Extract common or required fields for the source site --- + src_device_uuid: str = json_settings["src_device_name"] + src_endpoint_uuid: str = json_settings["src_endpoint_name"] + src_site_location: str = json_settings["src_site_location"] + src_ipv4_lan_prefixes: list[LANPrefixesDict] = json_settings.get( + "src_ipv4_lan_prefixes" + ) + src_site_id: str = json_settings.get("src_site_id", f"site_{src_site_location}") + src_management_type: str = json_settings.get( + "src_management_type", "ietf-l3vpn-svc:provider-managed" + ) + if src_management_type != "ietf-l3vpn-svc:provider-managed": + raise Exception("management type %s not supported", src_management_type) + + src_role: str = "ietf-l3vpn-svc:hub-role" + src_ce_address: str = json_settings["src_ce_address"] + src_pe_address: str = json_settings["src_pe_address"] + src_ce_pe_network_prefix: int = json_settings["src_ce_pe_network_prefix"] + src_mtu: int = json_settings["src_mtu"] + src_input_bw: int = json_settings["src_input_bw"] + src_output_bw: int = json_settings["src_output_bw"] + src_qos_profile_id = "qos-realtime" + src_qos_profile_direction = "ietf-l3vpn-svc:both" + src_qos_profile_latency: int = json_settings["src_qos_profile_latency"] + src_qos_profile_bw_guarantee: int = json_settings.get( + "src_qos_profile_bw_guarantee", 100 + ) + + # --- Extract common or required fields for the destination site --- + dst_device_uuid = json_settings["dst_device_name"] + dst_endpoint_uuid = json_settings["dst_endpoint_name"] + dst_site_location: str = json_settings["dst_site_location"] + dst_ipv4_lan_prefixes: list[LANPrefixesDict] = json_settings[ + "dst_ipv4_lan_prefixes" + ] + dst_site_id: str = json_settings.get("dst_site_id", f"site_{dst_site_location}") + dst_management_type: str = json_settings.get( + "dst_management_type", "ietf-l3vpn-svc:provider-managed" + ) + if dst_management_type != "ietf-l3vpn-svc:provider-managed": + raise Exception("management type %s not supported", dst_management_type) + + dst_role: str = "ietf-l3vpn-svc:spoke-role" + dst_ce_address: str = json_settings["dst_ce_address"] + dst_pe_address: str = json_settings["dst_pe_address"] + dst_ce_pe_network_prefix: int = json_settings["dst_ce_pe_network_prefix"] + dst_mtu: int = json_settings["dst_mtu"] + dst_input_bw: int = json_settings["dst_input_bw"] + dst_output_bw: int = json_settings["dst_output_bw"] + dst_qos_profile_id = "qos-realtime" + dst_qos_profile_direction = "ietf-l3vpn-svc:both" + dst_qos_profile_latency: int = json_settings["dst_qos_profile_latency"] + dst_qos_profile_bw_guarantee: int = json_settings.get( + "dst_qos_profile_bw_guarantee", 100 + ) + + # --- Build site dictionaries using the helper function --- + src_site_dict = create_site_dict( + site_id=src_site_id, + site_location=src_site_location, + device_uuid=src_device_uuid, + endpoint_uuid=src_endpoint_uuid, + service_uuid=service_uuid, + role=src_role, + management_type=src_management_type, + ce_address=src_ce_address, + pe_address=src_pe_address, + ce_pe_network_prefix=src_ce_pe_network_prefix, + mtu=src_mtu, + input_bw=src_input_bw, + output_bw=src_output_bw, + qos_profile_id=src_qos_profile_id, + qos_profile_direction=src_qos_profile_direction, + qos_profile_latency=src_qos_profile_latency, + qos_profile_bw_guarantee=src_qos_profile_bw_guarantee, + lan_prefixes=src_ipv4_lan_prefixes, + ) + + dst_site_dict = create_site_dict( + site_id=dst_site_id, + site_location=dst_site_location, + device_uuid=dst_device_uuid, + endpoint_uuid=dst_endpoint_uuid, + service_uuid=service_uuid, + role=dst_role, + management_type=dst_management_type, + ce_address=dst_ce_address, + pe_address=dst_pe_address, + ce_pe_network_prefix=dst_ce_pe_network_prefix, + mtu=dst_mtu, + input_bw=dst_input_bw, + output_bw=dst_output_bw, + qos_profile_id=dst_qos_profile_id, + qos_profile_direction=dst_qos_profile_direction, + qos_profile_latency=dst_qos_profile_latency, + qos_profile_bw_guarantee=dst_qos_profile_bw_guarantee, + lan_prefixes=dst_ipv4_lan_prefixes, + ) + + # --- Combine both sites into one structure --- + sites = { + "site": [ + src_site_dict, + dst_site_dict, + ] + } + + l3_vpn_data_model = { + "ietf-l3vpn-svc:l3vpn-svc": { + "vpn-services": {"vpn-service": [{"vpn-id": service_uuid}]}, + "sites": sites, + } + } + + json_config_rules = [ + json_config_rule_set( + "/service[{:s}]/IETFL3VPN".format(service_uuid), + l3_vpn_data_model, + ), + json_config_rule_set( + "/service[{:s}]/IETFL3VPN/operation".format(service_uuid), + {"type": operation_type}, + ), + ] + + return json_config_rules + + +def teardown_config_rules(service_uuid: str) -> List[Dict]: + json_config_rules = [ + json_config_rule_delete( + "/service[{:s}]/IETFL3VPN".format(service_uuid), + {"id": service_uuid}, + ), + json_config_rule_delete( + "/service[{:s}]/IETFL3VPN/operation".format(service_uuid), + {}, + ), + ] + return json_config_rules + + +def get_link_ep_device_names( + link: Link, context_client: ContextClient +) -> Tuple[str, str, str, str]: + ep_ids = link.link_endpoint_ids + ep_device_id_1 = ep_ids[0].device_id + ep_uuid_1 = ep_ids[0].endpoint_uuid.uuid + device_obj_1 = context_client.GetDevice(ep_device_id_1) + for d_ep in device_obj_1.device_endpoints: + if d_ep.endpoint_id.endpoint_uuid.uuid == ep_uuid_1: + ep_name_1 = d_ep.name + break + else: + raise Exception("endpoint not found") + device_obj_name_1 = device_obj_1.name + ep_device_id_2 = ep_ids[1].device_id + ep_uuid_2 = ep_ids[1].endpoint_uuid.uuid + device_obj_2 = context_client.GetDevice(ep_device_id_2) + for d_ep in device_obj_2.device_endpoints: + if d_ep.endpoint_id.endpoint_uuid.uuid == ep_uuid_2: + ep_name_2 = d_ep.name + break + else: + raise Exception("endpoint not found") + device_obj_name_2 = device_obj_2.name + return ( + device_obj_name_1, + ep_name_1, + device_obj_1, + device_obj_name_2, + ep_name_2, + device_obj_2, + ) diff --git a/src/service/service/service_handlers/l3nm_ietfl3vpn/old/L3NM_IETFL3VPN_ServiceHandler.py b/src/service/service/service_handlers/l3nm_ietfl3vpn/old/L3NM_IETFL3VPN_ServiceHandler.py new file mode 100644 index 000000000..b54277911 --- /dev/null +++ b/src/service/service/service_handlers/l3nm_ietfl3vpn/old/L3NM_IETFL3VPN_ServiceHandler.py @@ -0,0 +1,524 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import logging +from typing import Any, List, Optional, Tuple, TypedDict, Union + +from deepdiff import DeepDiff + +from common.DeviceTypes import DeviceTypeEnum +from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method +from common.proto.context_pb2 import ( + ConfigRule, + Device, + DeviceId, + Service, + ServiceConfig, +) +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Device import json_device_id +from common.type_checkers.Checkers import chk_type +from service.service.service_handler_api._ServiceHandler import _ServiceHandler +from service.service.service_handler_api.Tools import ( + get_device_endpoint_uuids, + get_endpoint_matching, +) +from service.service.task_scheduler.TaskExecutor import TaskExecutor + +from .ConfigRules import setup_config_rules, teardown_config_rules + +RUNNING_RESOURCE_KEY = "running_ietf_slice" +CANDIDATE_RESOURCE_KEY = "candidate_ietf_slice" +MTU = 1500 + +LOGGER = logging.getLogger(__name__) + +METRICS_POOL = MetricsPool("Service", "Handler", labels={"handler": "l3nm_ietf_l3vpn"}) + + +class LANPrefixesDict(TypedDict): + lan: str + lan_tag: str + + +class Ipv4Info(TypedDict): + src_lan: str + dst_lan: str + src_port: str + dst_port: str + vlan: str + + +class QoSInfo(TypedDict): + src_qos_profile_latency: int + src_input_bw: int + src_output_bw: int + dst_qos_profile_latency: int + dst_input_bw: int + dst_output_bw: int + + +def get_custom_config_rule( + service_config: ServiceConfig, resource_key: str +) -> Optional[ConfigRule]: + """ + Return the custom ConfigRule from the ServiceConfig matching the given resource_key, + or None if not found. + """ + for cr in service_config.config_rules: + if ( + cr.WhichOneof("config_rule") == "custom" + and cr.custom.resource_key == resource_key + ): + return cr + return None + + +def load_json_rule_data(service_config: ServiceConfig) -> Tuple[dict, dict]: + """ + Loads the running/candidate JSON data from the service_config for IETF slice data. + Raises an exception if either is missing. + """ + running_cr = get_custom_config_rule(service_config, RUNNING_RESOURCE_KEY) + candidate_cr = get_custom_config_rule(service_config, CANDIDATE_RESOURCE_KEY) + + if not running_cr or not candidate_cr: + raise ValueError("Missing running/candidate IETF slice config rules.") + + running_data = json.loads(running_cr.custom.resource_value) + candidate_data = json.loads(candidate_cr.custom.resource_value) + return running_data, candidate_data + + +def extract_match_criterion_ipv4_info(match_criterion: dict) -> Ipv4Info: + """ + Extracts IPv4 match criteria data (src/dst IP, ports, VLAN) from a match_criterion dict. + """ + src_lan = dst_lan = src_port = dst_port = vlan = "" + for type_value in match_criterion["match-type"]: + value = type_value["value"][0] + if type_value["type"] == "ietf-network-slice-service:source-ip-prefix": + src_lan = value + elif type_value["type"] == "ietf-network-slice-service:destination-ip-prefix": + dst_lan = value + elif type_value["type"] == "ietf-network-slice-service:source-tcp-port": + src_port = value + elif type_value["type"] == "ietf-network-slice-service:destination-tcp-port": + dst_port = value + elif type_value["type"] == "ietf-network-slice-service:vlan": + vlan = value + + return Ipv4Info( + src_lan=src_lan, + dst_lan=dst_lan, + src_port=src_port, + dst_port=dst_port, + vlan=vlan, + ) + + +def extract_qos_info_from_connection_group( + src_sdp_id: str, dst_sdp_id: str, connectivity_constructs: list +) -> QoSInfo: + """ + Given a pair of SDP ids and a list of connectivity constructs, extract QoS info + such as latency and bandwidth (for both directions). + """ + + def _extract_qos_fields(cc: dict) -> Tuple[int, int]: + max_delay = 0 + bandwidth = 0 + metric_bounds = cc["service-slo-sle-policy"]["slo-policy"]["metric-bound"] + for metric_bound in metric_bounds: + if ( + metric_bound["metric-type"] + == "ietf-network-slice-service:one-way-delay-maximum" + and metric_bound["metric-unit"] == "milliseconds" + ): + max_delay = int(metric_bound["bound"]) + elif ( + metric_bound["metric-type"] + == "ietf-network-slice-service:one-way-bandwidth" + and metric_bound["metric-unit"] == "Mbps" + ): + # Convert from Mbps to bps + bandwidth = int(metric_bound["bound"]) * 1000000 + return max_delay, bandwidth + + src_cc = next( + cc + for cc in connectivity_constructs + if cc["p2p-sender-sdp"] == src_sdp_id and cc["p2p-receiver-sdp"] == dst_sdp_id + ) + dst_cc = next( + cc + for cc in connectivity_constructs + if cc["p2p-sender-sdp"] == dst_sdp_id and cc["p2p-receiver-sdp"] == src_sdp_id + ) + src_max_delay, src_bandwidth = _extract_qos_fields(src_cc) + dst_max_delay, dst_bandwidth = _extract_qos_fields(dst_cc) + + return QoSInfo( + src_qos_profile_latency=src_max_delay, + src_input_bw=src_bandwidth, + src_output_bw=dst_bandwidth, + dst_qos_profile_latency=dst_max_delay, + dst_input_bw=dst_bandwidth, + dst_output_bw=src_bandwidth, + ) + + +def get_endpoint_settings(device_obj: Device, endpoint_name: str) -> dict: + """ + Helper to retrieve endpoint settings from a device's config rules given an endpoint name. + Raises an exception if not found. + """ + for rule in device_obj.device_config.config_rules: + if ( + rule.WhichOneof("config_rule") == "custom" + and rule.custom.resource_key == f"/endpoints/endpoint[{endpoint_name}]" + ): + return json.loads(rule.custom.resource_value) + raise ValueError(f"Endpoint settings not found for endpoint {endpoint_name}") + + +class L3NM_IETFL3VPN_ServiceHandler(_ServiceHandler): + def __init__( # pylint: disable=super-init-not-called + self, service: Service, task_executor: TaskExecutor, **settings + ) -> None: + self.__service = service + self.__task_executor = task_executor + + def __find_IP_transport_edge_endpoints( + self, endpoints + ) -> Tuple[str, str, str, str, Device]: + """ + Searches for two endpoints whose device controllers are IP_SDN_CONTROLLER. + Returns (src_device_uuid, src_endpoint_uuid, dst_device_uuid, dst_endpoint_uuid, controller_device). + Raises an exception if not found or if the two IP devices differ. + """ + + # Find the first IP transport edge endpoint from the head of endpoints + for ep in endpoints: + device_uuid, endpoint_uuid = get_device_endpoint_uuids(ep) + device_obj = self.__task_executor.get_device( + DeviceId(**json_device_id(device_uuid)) + ) + device_controller = self.__task_executor.get_device_controller(device_obj) + if device_controller.device_type == DeviceTypeEnum.IP_SDN_CONTROLLER.value: + src_device_uuid, src_endpoint_uuid = device_uuid, endpoint_uuid + src_device_controller = device_controller + break + else: + raise Exception("No IP transport edge endpoints found") + + # Find the second IP transport edge endpoint from the tail of endpoints + for ep in reversed(endpoints): + device_uuid, endpoint_uuid = get_device_endpoint_uuids(ep) + device_obj = self.__task_executor.get_device( + DeviceId(**json_device_id(device_uuid)) + ) + device_controller = self.__task_executor.get_device_controller(device_obj) + if device_controller.device_type == DeviceTypeEnum.IP_SDN_CONTROLLER.value: + dst_device_uuid, dst_endpoint_uuid = device_uuid, endpoint_uuid + dst_device_controller = device_controller + break + else: + raise Exception("No IP transport edge endpoints found") + + if src_device_controller != dst_device_controller: + raise Exception("Different Src-Dst devices not supported by now") + + return ( + src_device_uuid, + src_endpoint_uuid, + dst_device_uuid, + dst_endpoint_uuid, + src_device_controller, + ) + + def __build_resource_value_dict( + self, + service_id: str, + src_device_obj: Device, + dst_device_obj: Device, + src_endpoint_name: str, + dst_endpoint_name: str, + qos_info: QoSInfo, + src_endpoint_settings: dict, + dst_endpoint_settings: dict, + src_match_criterion_ipv4_info: Ipv4Info, + dst_match_criterion_ipv4_info: Ipv4Info, + ) -> dict: + """ + Builds the final resource-value dict to be used when calling setup_config_rules(). + """ + # Prepare data for source + src_device_name = src_device_obj.name + src_ce_ip = src_endpoint_settings["address_ip"] + src_ce_prefix = src_endpoint_settings["address_prefix"] + src_lan_prefixes = [ + LANPrefixesDict( + lan=src_match_criterion_ipv4_info["dst_lan"], + lan_tag=src_match_criterion_ipv4_info["vlan"], + ) + ] + + # Prepare data for destination + dst_device_name = dst_device_obj.name + dst_ce_ip = dst_endpoint_settings["address_ip"] + dst_ce_prefix = dst_endpoint_settings["address_prefix"] + dst_lan_prefixes = [ + LANPrefixesDict( + lan=dst_match_criterion_ipv4_info["dst_lan"], + lan_tag=dst_match_criterion_ipv4_info["vlan"], + ) + ] + + return { + "uuid": service_id, + "src_device_name": src_device_name, + "src_endpoint_name": src_endpoint_name, + "src_site_location": src_endpoint_settings["site_location"], + "src_ipv4_lan_prefixes": src_lan_prefixes, + "src_ce_address": src_ce_ip, + "src_pe_address": src_ce_ip, + "src_ce_pe_network_prefix": src_ce_prefix, + "src_mtu": MTU, + "src_qos_profile_latency": qos_info["src_qos_profile_latency"], + "src_input_bw": qos_info["src_input_bw"], + "src_output_bw": qos_info["src_output_bw"], + "dst_device_name": dst_device_name, + "dst_endpoint_name": dst_endpoint_name, + "dst_site_location": dst_endpoint_settings["site_location"], + "dst_ipv4_lan_prefixes": dst_lan_prefixes, + "dst_ce_address": dst_ce_ip, + "dst_pe_address": dst_ce_ip, + "dst_ce_pe_network_prefix": dst_ce_prefix, + "dst_mtu": MTU, + "dst_qos_profile_latency": qos_info["dst_qos_profile_latency"], + "dst_input_bw": qos_info["dst_input_bw"], + "dst_output_bw": qos_info["dst_output_bw"], + } + + @metered_subclass_method(METRICS_POOL) + def SetEndpoint( + self, + endpoints: List[Tuple[str, str, Optional[str]]], + connection_uuid: Optional[str] = None, + ) -> List[Union[bool, Exception]]: + chk_type("endpoints", endpoints, list) + if len(endpoints) < 2: + return [] + + results = [] + service_config = self.__service.service_config + + try: + # Identify IP transport edge endpoints + ( + src_device_uuid, + src_endpoint_uuid, + dst_device_uuid, + dst_endpoint_uuid, + controller, + ) = self.__find_IP_transport_edge_endpoints(endpoints) + + # Retrieve device objects + src_device_obj = self.__task_executor.get_device( + DeviceId(**json_device_id(src_device_uuid)) + ) + src_endpoint_obj = get_endpoint_matching(src_device_obj, src_endpoint_uuid) + + dst_device_obj = self.__task_executor.get_device( + DeviceId(**json_device_id(dst_device_uuid)) + ) + dst_endpoint_obj = get_endpoint_matching(dst_device_obj, dst_endpoint_uuid) + + # Obtain endpoint settings + src_endpoint_settings = get_endpoint_settings( + src_device_obj, src_endpoint_obj.name + ) + dst_endpoint_settings = get_endpoint_settings( + dst_device_obj, dst_endpoint_obj.name + ) + + # Load running & candidate data, compute diff + running_data, candidate_data = load_json_rule_data(service_config) + running_candidate_diff = DeepDiff(running_data, candidate_data) + + # Determine service_id and operation_type + slice_service = candidate_data["network-slice-services"]["slice-service"][0] + service_id = slice_service["id"] + if not running_candidate_diff: + operation_type = "create" + elif "values_changed" in running_candidate_diff: + operation_type = "update" + + # Parse relevant connectivity data + sdps = slice_service["sdps"]["sdp"] + connection_group = slice_service["connection-groups"]["connection-group"][0] + connecitivity_constructs = connection_group["connectivity-construct"] + + # The code below assumes a single connectivity construct or + # that the relevant one is the first in the list: + connecitivity_construct = connecitivity_constructs[0] + src_sdp_idx = connecitivity_construct["p2p-sender-sdp"] + dst_sdp_idx = connecitivity_construct["p2p-receiver-sdp"] + + # QoS + qos_info = extract_qos_info_from_connection_group( + src_sdp_idx, dst_sdp_idx, connecitivity_constructs + ) + + # Retrieve match-criterion info + src_sdp = next(sdp for sdp in sdps if sdp["id"] == src_sdp_idx) + dst_sdp = next(sdp for sdp in sdps if sdp["id"] == dst_sdp_idx) + + src_match_criterion = src_sdp["service-match-criteria"]["match-criterion"][ + 0 + ] + dst_match_criterion = dst_sdp["service-match-criteria"]["match-criterion"][ + 0 + ] + src_match_criterion_ipv4_info = extract_match_criterion_ipv4_info( + src_match_criterion + ) + dst_match_criterion_ipv4_info = extract_match_criterion_ipv4_info( + dst_match_criterion + ) + + # Build resource dict & config rules + resource_value_dict = self.__build_resource_value_dict( + service_id=service_id, + src_device_obj=src_device_obj, + dst_device_obj=dst_device_obj, + src_endpoint_name=src_endpoint_obj.name, + dst_endpoint_name=dst_endpoint_obj.name, + qos_info=qos_info, + src_endpoint_settings=src_endpoint_settings, + dst_endpoint_settings=dst_endpoint_settings, + src_match_criterion_ipv4_info=src_match_criterion_ipv4_info, + dst_match_criterion_ipv4_info=dst_match_criterion_ipv4_info, + ) + json_config_rules = setup_config_rules( + service_id, resource_value_dict, operation_type + ) + + # Configure device + del controller.device_config.config_rules[:] + for jcr in json_config_rules: + controller.device_config.config_rules.append(ConfigRule(**jcr)) + self.__task_executor.configure_device(controller) + except Exception as e: # pylint: disable=broad-except + str_service_id = grpc_message_to_json_string(self.__service.service_id) + LOGGER.exception( + "Unable to SetEndpoint for Service({:s})".format(str(str_service_id)) + ) + results.append(e) + + return results + + @metered_subclass_method(METRICS_POOL) + def DeleteEndpoint( + self, + endpoints: List[Tuple[str, str, Optional[str]]], + connection_uuid: Optional[str] = None, + ) -> List[Union[bool, Exception]]: + chk_type("endpoints", endpoints, list) + if len(endpoints) < 2: + return [] + service_config = self.__service.service_config + ietf_slice_candidate_cr = get_custom_config_rule( + service_config, CANDIDATE_RESOURCE_KEY + ) + candidate_resource_value_dict = json.loads( + ietf_slice_candidate_cr.custom.resource_value + ) + service_id = candidate_resource_value_dict["network-slice-services"][ + "slice-service" + ][0]["id"] + results = [] + try: + src_device_uuid, _ = get_device_endpoint_uuids(endpoints[0]) + src_device = self.__task_executor.get_device( + DeviceId(**json_device_id(src_device_uuid)) + ) + src_controller = self.__task_executor.get_device_controller(src_device) + + dst_device_uuid, _ = get_device_endpoint_uuids(endpoints[1]) + dst_device = self.__task_executor.get_device( + DeviceId(**json_device_id(dst_device_uuid)) + ) + dst_controller = self.__task_executor.get_device_controller(dst_device) + if ( + src_controller.device_id.device_uuid.uuid + != dst_controller.device_id.device_uuid.uuid + ): + raise Exception("Different Src-Dst devices not supported by now") + controller = src_controller + json_config_rules = teardown_config_rules(service_id) + del controller.device_config.config_rules[:] + for jcr in json_config_rules: + controller.device_config.config_rules.append(ConfigRule(**jcr)) + self.__task_executor.configure_device(controller) + results.append(True) + except Exception as e: # pylint: disable=broad-except + LOGGER.exception( + "Unable to DeleteEndpoint for Service({:s})".format(str(service_id)) + ) + results.append(e) + + return results + + @metered_subclass_method(METRICS_POOL) + def SetConstraint( + self, constraints: List[Tuple[str, Any]] + ) -> List[Union[bool, Exception]]: + chk_type('constraints', constraints, list) + if len(constraints) == 0: return [] + MSG = '[SetConstraint] Method not implemented. Constraints({:s}) are being ignored.' + LOGGER.warning(MSG.format(str(constraints))) + return [True for _ in constraints] + + @metered_subclass_method(METRICS_POOL) + def DeleteConstraint( + self, constraints: List[Tuple[str, Any]] + ) -> List[Union[bool, Exception]]: + chk_type('constraints', constraints, list) + if len(constraints) == 0: return [] + MSG = '[DeleteConstraint] Method not implemented. Constraints({:s}) are being ignored.' + LOGGER.warning(MSG.format(str(constraints))) + return [True for _ in constraints] + + @metered_subclass_method(METRICS_POOL) + def SetConfig( + self, resources: List[Tuple[str, Any]] + ) -> List[Union[bool, Exception]]: + chk_type('resources', resources, list) + if len(resources) == 0: return [] + MSG = '[SetConfig] Method not implemented. Resources({:s}) are being ignored.' + LOGGER.warning(MSG.format(str(resources))) + return [True for _ in resources] + + @metered_subclass_method(METRICS_POOL) + def DeleteConfig( + self, resources: List[Tuple[str, Any]] + ) -> List[Union[bool, Exception]]: + chk_type('resources', resources, list) + if len(resources) == 0: return [] + MSG = '[DeleteConfig] Method not implemented. Resources({:s}) are being ignored.' + LOGGER.warning(MSG.format(str(resources))) + return [True for _ in resources] -- GitLab From c33f9ae6f320cf2ee5053260d202088f51d69d3e Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 5 Sep 2025 11:17:14 +0000 Subject: [PATCH 151/367] Service component: - Updated (De)Configure Task to report nodes also for NCE controller --- .../task_scheduler/tasks/Task_ConnectionConfigure.py | 7 ++++++- .../task_scheduler/tasks/Task_ConnectionDeconfigure.py | 7 ++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/src/service/service/task_scheduler/tasks/Task_ConnectionConfigure.py b/src/service/service/task_scheduler/tasks/Task_ConnectionConfigure.py index da27191aa..10f32e81b 100644 --- a/src/service/service/task_scheduler/tasks/Task_ConnectionConfigure.py +++ b/src/service/service/task_scheduler/tasks/Task_ConnectionConfigure.py @@ -28,6 +28,11 @@ if TYPE_CHECKING: KEY_TEMPLATE = 'connection({connection_id:s}):configure' +CONTROLLER_DEVICE_TYPES = { + DeviceTypeEnum.TERAFLOWSDN_CONTROLLER, + DeviceTypeEnum.NCE, +} + class Task_ConnectionConfigure(_Task): def __init__(self, task_executor : TaskExecutor, connection_id : ConnectionId) -> None: super().__init__(task_executor) @@ -57,7 +62,7 @@ class Task_ConnectionConfigure(_Task): errors = list() for device_type, (service_handler, connection_devices) in service_handlers.items(): - if device_type == DeviceTypeEnum.TERAFLOWSDN_CONTROLLER: + if device_type in CONTROLLER_DEVICE_TYPES: _endpointids_to_set = endpointids_to_set else: _endpointids_to_set = [ diff --git a/src/service/service/task_scheduler/tasks/Task_ConnectionDeconfigure.py b/src/service/service/task_scheduler/tasks/Task_ConnectionDeconfigure.py index f75c7fe84..28086cdb1 100644 --- a/src/service/service/task_scheduler/tasks/Task_ConnectionDeconfigure.py +++ b/src/service/service/task_scheduler/tasks/Task_ConnectionDeconfigure.py @@ -28,6 +28,11 @@ if TYPE_CHECKING: KEY_TEMPLATE = 'connection({connection_id:s}):deconfigure' +CONTROLLER_DEVICE_TYPES = { + DeviceTypeEnum.TERAFLOWSDN_CONTROLLER, + DeviceTypeEnum.NCE, +} + class Task_ConnectionDeconfigure(_Task): def __init__(self, task_executor : TaskExecutor, connection_id : ConnectionId) -> None: super().__init__(task_executor) @@ -57,7 +62,7 @@ class Task_ConnectionDeconfigure(_Task): errors = list() for device_type, (service_handler, connection_devices) in service_handlers.items(): - if device_type == DeviceTypeEnum.TERAFLOWSDN_CONTROLLER: + if device_type in CONTROLLER_DEVICE_TYPES: _endpointids_to_delete = endpointids_to_delete else: _endpointids_to_delete = [ -- GitLab From f096945cef1d7382fc8a45fc0ca1526d59a4d8e4 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 5 Sep 2025 11:28:04 +0000 Subject: [PATCH 152/367] Device component - IETF L3VPN: - Properly implemented set/delete - Code polishing --- .../drivers/ietf_l3vpn/IetfL3VpnDriver.py | 40 +++---------------- 1 file changed, 6 insertions(+), 34 deletions(-) diff --git a/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py b/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py index 049009ec3..379655a0b 100644 --- a/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py +++ b/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py @@ -172,41 +172,17 @@ class IetfL3VpnDriver(_Driver): results = [] if len(resources) == 0: return results with self.__lock: - for resource in resources: - resource_key, resource_value = resource - if RE_IETF_L3VPN_OPERATION.match(resource_key): - operation_type = json.loads(resource_value)['type'] - results.append((resource_key, True)) - break - else: - raise Exception('operation type not found in resources') - for i, resource in enumerate(resources): str_resource_name = 'resource_key[#{:d}]'.format(i) LOGGER.info('resource = {:s}'.format(str(resource))) resource_key, resource_value = resource - if not RE_IETF_L3VPN_DATA.match(resource_key): - continue + if not RE_IETF_L3VPN_DATA.match(resource_key): continue try: resource_value = json.loads(resource_value) - - service_uuid = resource_value['ietf-l3vpn-svc:l3vpn-svc'][ - 'vpn-services' - ]['vpn-service'][0]['vpn-id'] - - if operation_type == 'create': - self.tac.create_connectivity_service(resource_value) - elif operation_type == 'update': - self.tac.update_connectivity_service(resource_value) - elif operation_type == 'delete': - self.tac.delete_connectivity_service(service_uuid) - else: - MSG = 'OperationType({:s}) not supported' - raise Exception(MSG.format(str(operation_type))) - + self.tac.create_connectivity_service(resource_value) results.append((resource_key, True)) except Exception as e: - MSG = 'Unhandled error processing {:s}: resource_key({:s})' + MSG = 'Unhandled error processing SET {:s}: resource_key({:s})' LOGGER.exception(MSG.format(str_resource_name, str(resource_key))) results.append((resource_key, e)) return results @@ -216,17 +192,13 @@ class IetfL3VpnDriver(_Driver): self, resources : List[Tuple[str, Any]] ) -> List[Union[bool, Exception]]: results = [] - if len(resources) == 0: - return results + if len(resources) == 0: return results with self.__lock: for i, resource in enumerate(resources): str_resource_name = 'resource_key[#{:d}]'.format(i) LOGGER.info('resource = {:s}'.format(str(resource))) resource_key, resource_value = resource - - if not RE_IETF_L3VPN_DATA.match(resource_key): - continue - + if not RE_IETF_L3VPN_DATA.match(resource_key): continue try: resource_value = json.loads(resource_value) service_uuid = resource_value['ietf-l3vpn-svc:l3vpn-svc'][ @@ -235,7 +207,7 @@ class IetfL3VpnDriver(_Driver): self.tac.delete_connectivity_service(service_uuid) results.append((resource_key, True)) except Exception as e: - MSG = 'Unhandled error processing {:s}: resource_key({:s})' + MSG = 'Unhandled error processing DELETE {:s}: resource_key({:s})' LOGGER.exception(MSG.format(str_resource_name, str(resource_key))) results.append((resource_key, e)) return results -- GitLab From 428139434fac52df2a2fca4afc94e60e3bce3df4 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 5 Sep 2025 11:52:45 +0000 Subject: [PATCH 153/367] Device component - IETF L3VPN / IETF Slice: - Added propagation of underly endpoint settings while discovering --- src/device/service/Tools.py | 11 ++++++++- .../drivers/ietf_l3vpn/TfsApiClient.py | 23 +++++++++++++++++-- .../drivers/ietf_slice/TfsApiClient.py | 23 +++++++++++++++++-- 3 files changed, 52 insertions(+), 5 deletions(-) diff --git a/src/device/service/Tools.py b/src/device/service/Tools.py index 353f3fa65..384cfc9bb 100644 --- a/src/device/service/Tools.py +++ b/src/device/service/Tools.py @@ -19,12 +19,13 @@ from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME from common.DeviceTypes import DeviceTypeEnum from common.method_wrappers.ServiceExceptions import InvalidArgumentException, NotFoundException from common.proto.context_pb2 import ( - ConfigActionEnum, ConfigRule_ACL, Device, DeviceConfig, EndPoint, Link, Location, OpticalConfig + ConfigActionEnum, ConfigRule, ConfigRule_ACL, Device, DeviceConfig, EndPoint, Link, Location, OpticalConfig ) from common.proto.device_pb2 import MonitoringSettings from common.proto.kpi_sample_types_pb2 import KpiSampleType from common.tools.grpc.ConfigRules import update_config_rule_custom from common.tools.grpc.Tools import grpc_message_to_json +from common.tools.object_factory.ConfigRule import json_config_rule_set from common.type_checkers.Checkers import chk_length, chk_type from .driver_api._Driver import _Driver, RESOURCE_ENDPOINTS from .monitoring.MonitoringLoops import MonitoringLoops @@ -233,10 +234,12 @@ def populate_endpoints( # add endpoint to current device device_endpoint = device.device_endpoints.add() device_endpoint.endpoint_id.device_id.device_uuid.uuid = device_uuid + device_config_rules = device.device_config.config_rules else: # add endpoint to specified device device_endpoint = new_sub_devices[_device_uuid].device_endpoints.add() device_endpoint.endpoint_id.device_id.device_uuid.uuid = _device_uuid + device_config_rules = new_sub_devices[_device_uuid].device_config.config_rules device_endpoint.endpoint_id.endpoint_uuid.uuid = endpoint_uuid @@ -260,6 +263,12 @@ def populate_endpoints( if location is not None: device_endpoint.endpoint_location.MergeFrom(Location(**location)) + settings = resource_value.get('settings', None) + if settings is not None: + device_config_rules.append(ConfigRule(**json_config_rule_set( + '/endpoints/endpoint[{:s}]'.format(str(endpoint_name)), settings + ))) + if endpoint_uuid == 'mgmt' or endpoint_name == 'mgmt': if _device_uuid is None: devices_with_mgmt_endpoints.add(device_uuid) diff --git a/src/device/service/drivers/ietf_l3vpn/TfsApiClient.py b/src/device/service/drivers/ietf_l3vpn/TfsApiClient.py index baf723813..c92056285 100644 --- a/src/device/service/drivers/ietf_l3vpn/TfsApiClient.py +++ b/src/device/service/drivers/ietf_l3vpn/TfsApiClient.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging, requests +import json, logging, requests from typing import Dict, List, Optional from common.tools.client.RestApiClient import RestApiClient from device.service.driver_api.ImportTopologyEnum import ImportTopologyEnum @@ -129,15 +129,34 @@ class TfsApiClient(RestApiClient): device_data['ctrl_uuid'] = ctrl_uuid result.append((device_url, device_data)) + config_rule_list : List[Dict] = ( + json_device + .get('device_config', dict()) + .get('config_rules', list()) + ) + config_rule_dict = dict() + for cr in config_rule_list: + if cr['action'] != 'CONFIGACTION_SET': continue + if 'custom' not in cr: continue + cr_rk : str = cr['custom']['resource_key'] + if not cr_rk.startswith('/endpoints/endpoint['): continue + settings = json.loads(cr['custom']['resource_value']) + ep_name = settings['name'] + config_rule_dict[ep_name] = settings + for json_endpoint in json_device['device_endpoints']: endpoint_uuid = json_endpoint['endpoint_id']['endpoint_uuid']['uuid'] + endpoint_name = json_endpoint['name'] endpoint_url = '/endpoints/endpoint[{:s}]'.format(endpoint_uuid) endpoint_data = { 'device_uuid': device_uuid, 'uuid': endpoint_uuid, - 'name': json_endpoint['name'], + 'name': endpoint_name, 'type': json_endpoint['endpoint_type'], } + endpoint_settings = config_rule_dict.get(endpoint_name) + if endpoint_settings is not None: + endpoint_data['settings'] = endpoint_settings result.append((endpoint_url, endpoint_data)) if import_topology == ImportTopologyEnum.DEVICES: diff --git a/src/device/service/drivers/ietf_slice/TfsApiClient.py b/src/device/service/drivers/ietf_slice/TfsApiClient.py index 3073d905f..01ea1a666 100644 --- a/src/device/service/drivers/ietf_slice/TfsApiClient.py +++ b/src/device/service/drivers/ietf_slice/TfsApiClient.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging, requests +import json, logging, requests from typing import Dict, List, Optional from common.tools.client.RestApiClient import RestApiClient from device.service.driver_api.ImportTopologyEnum import ImportTopologyEnum @@ -130,15 +130,34 @@ class TfsApiClient(RestApiClient): device_data['ctrl_uuid'] = ctrl_uuid result.append((device_url, device_data)) + config_rule_list : List[Dict] = ( + json_device + .get('device_config', dict()) + .get('config_rules', list()) + ) + config_rule_dict = dict() + for cr in config_rule_list: + if cr['action'] != 'CONFIGACTION_SET': continue + if 'custom' not in cr: continue + cr_rk : str = cr['custom']['resource_key'] + if not cr_rk.startswith('/endpoints/endpoint['): continue + settings = json.loads(cr['custom']['resource_value']) + ep_name = settings['name'] + config_rule_dict[ep_name] = settings + for json_endpoint in json_device['device_endpoints']: endpoint_uuid = json_endpoint['endpoint_id']['endpoint_uuid']['uuid'] + endpoint_name = json_endpoint['name'] endpoint_url = '/endpoints/endpoint[{:s}]'.format(endpoint_uuid) endpoint_data = { 'device_uuid': device_uuid, 'uuid': endpoint_uuid, - 'name': json_endpoint['name'], + 'name': endpoint_name, 'type': json_endpoint['endpoint_type'], } + endpoint_settings = config_rule_dict.get(endpoint_name) + if endpoint_settings is not None: + endpoint_data['settings'] = endpoint_settings result.append((endpoint_url, endpoint_data)) if import_topology == ImportTopologyEnum.DEVICES: -- GitLab From 5ecf74569465948a9762384ec88dfd7ae5618c74 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 5 Sep 2025 11:54:53 +0000 Subject: [PATCH 154/367] ECOC F5GA Telemetry Demo: - Corrected Agg/IP topology descriptors --- .../data/topology/topology-agg.json | 42 ++++++++++++-- .../data/topology/topology-ip.json | 58 +++++++++++++++++-- 2 files changed, 90 insertions(+), 10 deletions(-) diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json index 58fe5010c..1d3efb630 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json @@ -34,9 +34,24 @@ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ {"uuid": "lo", "name": "lo", "type": "loopback"}, {"uuid": "200", "name": "200", "type": "copper"}, - {"uuid": "500", "name": "500", "type": "copper"}, - {"uuid": "501", "name": "501", "type": "copper"} - ]}}} + {"uuid": "201", "name": "201", "type": "copper"}, + {"uuid": "500", "name": "500", "type": "copper"} + ]}}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[200]", "resource_value": { + "uuid": "200", "name": "200", "type": "optical", + "address_ip": "172.1.201.1", "address_prefix": "24", + "site_location": "cloud", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[201]", "resource_value": { + "uuid": "201", "name": "201", "type": "optical", + "address_ip": "0.0.0.0", "address_prefix": "24", + "site_location": "transport", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[500]", "resource_value": { + "uuid": "500", "name": "500", "type": "optical", + "address_ip": "172.10.33.2", "address_prefix": "24", + "site_location": "transport", "mtu": "1500" + }}} ]}}, {"device_id": {"device_uuid": {"uuid": "POP2"}}, "device_type": "emu-packet-router", "device_drivers": ["DEVICEDRIVER_UNDEFINED"], @@ -46,9 +61,24 @@ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ {"uuid": "lo", "name": "lo", "type": "loopback"}, {"uuid": "200", "name": "200", "type": "copper"}, - {"uuid": "500", "name": "500", "type": "copper"}, - {"uuid": "501", "name": "501", "type": "copper"} - ]}}} + {"uuid": "201", "name": "201", "type": "copper"}, + {"uuid": "500", "name": "500", "type": "copper"} + ]}}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[200]", "resource_value": { + "uuid": "200", "name": "200", "type": "optical", + "address_ip": "172.1.101.1", "address_prefix": "24", + "site_location": "cloud", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[201]", "resource_value": { + "uuid": "201", "name": "201", "type": "optical", + "address_ip": "0.0.0.0", "address_prefix": "24", + "site_location": "transport", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[500]", "resource_value": { + "uuid": "500", "name": "500", "type": "optical", + "address_ip": "172.10.44.2", "address_prefix": "24", + "site_location": "transport", "mtu": "1500" + }}} ]}} ], "links": [ diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-ip.json b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-ip.json index 66f9f877c..58913deca 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-ip.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-ip.json @@ -17,7 +17,22 @@ {"uuid": "200", "name": "200", "type": "copper"}, {"uuid": "500", "name": "500", "type": "copper"}, {"uuid": "501", "name": "501", "type": "copper"} - ]}}} + ]}}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[200]", "resource_value": { + "uuid": "200", "name": "200", "type": "optical", + "address_ip": "128.32.44.254", "address_prefix": "24", + "site_location": "access", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[500]", "resource_value": { + "uuid": "500", "name": "500", "type": "optical", + "address_ip": "0.0.0.0", "address_prefix": "24", + "site_location": "transport", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[501]", "resource_value": { + "uuid": "501", "name": "501", "type": "optical", + "address_ip": "0.0.0.0", "address_prefix": "24", + "site_location": "transport", "mtu": "1500" + }}} ]} }, { @@ -30,7 +45,17 @@ {"uuid": "lo", "name": "lo", "type": "loopback"}, {"uuid": "500", "name": "500", "type": "copper"}, {"uuid": "501", "name": "501", "type": "copper"} - ]}}} + ]}}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[500]", "resource_value": { + "uuid": "500", "name": "500", "type": "optical", + "address_ip": "0.0.0.0", "address_prefix": "24", + "site_location": "transport", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[501]", "resource_value": { + "uuid": "501", "name": "501", "type": "optical", + "address_ip": "0.0.0.0", "address_prefix": "24", + "site_location": "transport", "mtu": "1500" + }}} ]} }, { @@ -43,7 +68,17 @@ {"uuid": "lo", "name": "lo", "type": "loopback"}, {"uuid": "500", "name": "500", "type": "copper"}, {"uuid": "501", "name": "501", "type": "copper"} - ]}}} + ]}}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[500]", "resource_value": { + "uuid": "500", "name": "500", "type": "optical", + "address_ip": "0.0.0.0", "address_prefix": "24", + "site_location": "transport", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[501]", "resource_value": { + "uuid": "501", "name": "501", "type": "optical", + "address_ip": "0.0.0.0", "address_prefix": "24", + "site_location": "transport", "mtu": "1500" + }}} ]} }, { @@ -57,7 +92,22 @@ {"uuid": "200", "name": "200", "type": "copper"}, {"uuid": "500", "name": "500", "type": "copper"}, {"uuid": "501", "name": "501", "type": "copper"} - ]}}} + ]}}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[200]", "resource_value": { + "uuid": "200", "name": "200", "type": "optical", + "address_ip": "172.10.44.254", "address_prefix": "24", + "site_location": "cloud", "mtu": "1500", "ce-ip": "172.10.44.2" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[500]", "resource_value": { + "uuid": "500", "name": "500", "type": "optical", + "address_ip": "0.0.0.0", "address_prefix": "24", + "site_location": "transport", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[501]", "resource_value": { + "uuid": "501", "name": "501", "type": "optical", + "address_ip": "0.0.0.0", "address_prefix": "24", + "site_location": "transport", "mtu": "1500" + }}} ]} } ], -- GitLab From d45603d5b39a6c7aa9f4912174ccc7a7c970020d Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 5 Sep 2025 12:10:56 +0000 Subject: [PATCH 155/367] Device component - IETF ACTN / NCE: - Added propagation of underly endpoint settings while discovering --- .../handlers/NetworkTopologyHandler.py | 18 +++++++++++++++ .../nce/handlers/NetworkTopologyHandler.py | 22 +++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/src/device/service/drivers/ietf_actn/handlers/NetworkTopologyHandler.py b/src/device/service/drivers/ietf_actn/handlers/NetworkTopologyHandler.py index d03f00830..1b2256e61 100644 --- a/src/device/service/drivers/ietf_actn/handlers/NetworkTopologyHandler.py +++ b/src/device/service/drivers/ietf_actn/handlers/NetworkTopologyHandler.py @@ -124,12 +124,30 @@ class NetworkTopologyHandler: if 'name' in tpte: tp_name = tpte['name'] + tp_ip_addr = '0.0.0.0' + if 'ietf-te-topology:te-tp-id' in tp: + tp_ip_addr = tp['ietf-te-topology:te-tp-id'] + + if node_name == 'O-PE1' and tp_name == '200': + site_location = 'access' + elif node_name == 'O-PE2' and tp_name == '200': + site_location = 'cloud' + else: + site_location = 'transport' + endpoint_url = '/endpoints/endpoint[{:s}, {:s}]'.format(node_id, tp_id) + endpoint_settings = { + 'address_ip' : tp_ip_addr, + 'address_prefix': '24', + 'mtu' : '1500', + 'site_location' : site_location, + } endpoint_data = { 'device_uuid': node_id, 'uuid': tp_id, 'name': tp_name, 'type': endpoint_type, + 'settings': endpoint_settings, } result.append((endpoint_url, endpoint_data)) diff --git a/src/device/service/drivers/nce/handlers/NetworkTopologyHandler.py b/src/device/service/drivers/nce/handlers/NetworkTopologyHandler.py index d03f00830..f8643d261 100644 --- a/src/device/service/drivers/nce/handlers/NetworkTopologyHandler.py +++ b/src/device/service/drivers/nce/handlers/NetworkTopologyHandler.py @@ -124,12 +124,34 @@ class NetworkTopologyHandler: if 'name' in tpte: tp_name = tpte['name'] + tp_ip_addr = '0.0.0.0' + if 'ietf-te-topology:te-tp-id' in tp: + tp_ip_addr = tp['ietf-te-topology:te-tp-id'] + + if node_name.startswith('ONT') and tp_name == '200': + site_location = 'user' + elif node_name.startswith('ONT') and tp_name == '500': + site_location = 'access' + elif node_name == 'OLT' and tp_name in {'200', '201'}: + site_location = 'access' + elif node_name == 'OLT' and tp_name in {'500', '501'}: + site_location = 'transport' + else: + site_location = 'access' + endpoint_url = '/endpoints/endpoint[{:s}, {:s}]'.format(node_id, tp_id) + endpoint_settings = { + 'address_ip' : tp_ip_addr, + 'address_prefix': '24', + 'mtu' : '1500', + 'site_location' : site_location, + } endpoint_data = { 'device_uuid': node_id, 'uuid': tp_id, 'name': tp_name, 'type': endpoint_type, + 'settings': endpoint_settings, } result.append((endpoint_url, endpoint_data)) -- GitLab From 090ef5396a482d49754d0f5b8388f15c27c7fbb7 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 5 Sep 2025 13:23:19 +0000 Subject: [PATCH 156/367] Device component - IETF ACTN / NCE: - Added missing endpoint settings during discovery --- .../drivers/ietf_actn/handlers/NetworkTopologyHandler.py | 3 +++ .../service/drivers/nce/handlers/NetworkTopologyHandler.py | 3 +++ 2 files changed, 6 insertions(+) diff --git a/src/device/service/drivers/ietf_actn/handlers/NetworkTopologyHandler.py b/src/device/service/drivers/ietf_actn/handlers/NetworkTopologyHandler.py index 1b2256e61..056a8e39f 100644 --- a/src/device/service/drivers/ietf_actn/handlers/NetworkTopologyHandler.py +++ b/src/device/service/drivers/ietf_actn/handlers/NetworkTopologyHandler.py @@ -137,6 +137,9 @@ class NetworkTopologyHandler: endpoint_url = '/endpoints/endpoint[{:s}, {:s}]'.format(node_id, tp_id) endpoint_settings = { + 'uuid' : tp_id, + 'name' : tp_name, + 'type' : endpoint_type, 'address_ip' : tp_ip_addr, 'address_prefix': '24', 'mtu' : '1500', diff --git a/src/device/service/drivers/nce/handlers/NetworkTopologyHandler.py b/src/device/service/drivers/nce/handlers/NetworkTopologyHandler.py index f8643d261..124ce4024 100644 --- a/src/device/service/drivers/nce/handlers/NetworkTopologyHandler.py +++ b/src/device/service/drivers/nce/handlers/NetworkTopologyHandler.py @@ -141,6 +141,9 @@ class NetworkTopologyHandler: endpoint_url = '/endpoints/endpoint[{:s}, {:s}]'.format(node_id, tp_id) endpoint_settings = { + 'uuid' : tp_id, + 'name' : tp_name, + 'type' : endpoint_type, 'address_ip' : tp_ip_addr, 'address_prefix': '24', 'mtu' : '1500', -- GitLab From 60d48b21f1e7fab214be53a3d0f5afeef07c1cb1 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 5 Sep 2025 15:08:51 +0000 Subject: [PATCH 157/367] Device component - NCE Driver: - Fixed AppFlow management - Fixed Telemetry subscriptions --- src/common/tools/client/RestConfClient.py | 23 ++- src/device/service/drivers/nce/NCEDriver.py | 51 +++--- .../drivers/nce/handlers/AppFlowHandler.py | 160 ++++++++++++++++++ .../nce/handlers/SubscriptionHandler.py | 86 ++++++++++ .../service/drivers/nce/nce_fan_client.py | 143 ---------------- 5 files changed, 290 insertions(+), 173 deletions(-) create mode 100644 src/device/service/drivers/nce/handlers/AppFlowHandler.py create mode 100644 src/device/service/drivers/nce/handlers/SubscriptionHandler.py delete mode 100644 src/device/service/drivers/nce/nce_fan_client.py diff --git a/src/common/tools/client/RestConfClient.py b/src/common/tools/client/RestConfClient.py index 1cd84c218..38591fb01 100644 --- a/src/common/tools/client/RestConfClient.py +++ b/src/common/tools/client/RestConfClient.py @@ -37,7 +37,7 @@ class RestConfClient(RestApiClient): def _discover_base_url(self) -> None: host_meta_url = HOST_META_URL.format(self._scheme, self._address, self._port) - host_meta : Dict = self.get(host_meta_url, expected_status_codes={requests.codes['OK']}) + host_meta : Dict = super().get(host_meta_url, expected_status_codes={requests.codes['OK']}) links = host_meta.get('links') if links is None: raise AttributeError('Missing attribute "links" in host-meta reply') @@ -56,14 +56,14 @@ class RestConfClient(RestApiClient): if href is None: raise AttributeError('Missing attribute "links[0]" in host-meta reply') if not isinstance(href, str): raise AttributeError('Attribute "links[0].href" must be a str') - self._base_url = str(href + '/data').replace('//', '/') + self._base_url = str(href).replace('//', '/') def get( self, endpoint : str, expected_status_codes : Set[int] = {requests.codes['OK']} ) -> Optional[Any]: return super().get( - endpoint, + ('/data/{:s}'.format(endpoint)).replace('//', '/') expected_status_codes=expected_status_codes ) @@ -72,7 +72,7 @@ class RestConfClient(RestApiClient): expected_status_codes : Set[int] = {requests.codes['CREATED']} ) -> Optional[Any]: return super().post( - endpoint, body=body, + ('/data/{:s}'.format(endpoint)).replace('//', '/'), body=body, expected_status_codes=expected_status_codes ) @@ -81,7 +81,7 @@ class RestConfClient(RestApiClient): expected_status_codes : Set[int] = {requests.codes['CREATED'], requests.codes['NO_CONTENT']} ) -> Optional[Any]: return super().put( - endpoint, body=body, + ('/data/{:s}'.format(endpoint)).replace('//', '/'), body=body, expected_status_codes=expected_status_codes ) @@ -90,7 +90,7 @@ class RestConfClient(RestApiClient): expected_status_codes : Set[int] = {requests.codes['NO_CONTENT']} ) -> Optional[Any]: return super().patch( - endpoint, body=body, + ('/data/{:s}'.format(endpoint)).replace('//', '/'), body=body, expected_status_codes=expected_status_codes ) @@ -99,6 +99,15 @@ class RestConfClient(RestApiClient): expected_status_codes : Set[int] = {requests.codes['NO_CONTENT']} ) -> Optional[Any]: return super().delete( - endpoint, body=body, + ('/data/{:s}'.format(endpoint)).replace('//', '/'), body=body, + expected_status_codes=expected_status_codes + ) + + def rpc( + self, endpoint : str, body : Optional[Any] = None, + expected_status_codes : Set[int] = {requests.codes['CREATED']} + ) -> Optional[Any]: + return super().post( + ('/operations/{:s}'.format(endpoint)).replace('//', '/'), body=body, expected_status_codes=expected_status_codes ) diff --git a/src/device/service/drivers/nce/NCEDriver.py b/src/device/service/drivers/nce/NCEDriver.py index b852aa5ef..7eff38c75 100644 --- a/src/device/service/drivers/nce/NCEDriver.py +++ b/src/device/service/drivers/nce/NCEDriver.py @@ -21,13 +21,12 @@ from device.service.driver_api._Driver import _Driver, RESOURCE_ENDPOINTS, RESOU from device.service.driver_api.AnyTreeTools import ( TreeNode, dump_subtree, get_subnode, set_subnode_value, ) +from .handlers.AppFlowHandler import AppFlowHandler from .handlers.NetworkTopologyHandler import NetworkTopologyHandler -from .Constants import SPECIAL_RESOURCE_MAPPINGS -from .nce_fan_client import ( - NCEClient, - SubscribedNotificationsSchema, - UnsubscribedNotificationsSchema, +from .handlers.SubscriptionHandler import ( + SubscribedNotificationsSchema, SubscriptionHandler, UnsubscribedNotificationsSchema ) +from .Constants import SPECIAL_RESOURCE_MAPPINGS from .Tools import compose_resource_endpoint @@ -60,14 +59,11 @@ class NCEDriver(_Driver): restconf_settings['logger'] = logging.getLogger(__name__ + '.RestConfClient') self._rest_conf_client = RestConfClient(address, port=port, **restconf_settings) self._handler_net_topology = NetworkTopologyHandler(self._rest_conf_client, **settings) + self._handler_app_flow = AppFlowHandler(self._rest_conf_client) + self._handler_subscription = SubscriptionHandler(self._rest_conf_client) self.__running = TreeNode('.') - scheme = self.settings.get('scheme', 'http') - username = self.settings.get('username') - password = self.settings.get('password') - self.nce = NCEClient( - self.address, self.port, scheme=scheme, username=username, password=password, - ) + endpoints = self.settings.get('endpoints', []) endpoint_resources = [] for endpoint in endpoints: @@ -92,7 +88,7 @@ class NCEDriver(_Driver): resource_key, resource_value = resource chk_string(str_resource_name, resource_key, allow_empty=False) resource_path = resource_key.split('/') - except Exception as e: # pylint: disable=broad-except + except Exception as e: LOGGER.exception( 'Exception validating {:s}: {:s}'.format( str_resource_name, str(resource_key) @@ -119,7 +115,7 @@ class NCEDriver(_Driver): except requests.exceptions.Timeout: LOGGER.exception('Timeout exception checking connectivity') return False - except Exception: # pylint: disable=broad-except + except Exception: LOGGER.exception('Unhandled exception checking connectivity') return False else: @@ -151,6 +147,17 @@ class NCEDriver(_Driver): chk_string(str_resource_name, resource_key, allow_empty=False) if resource_key == RESOURCE_ENDPOINTS: results.extend(self._handler_net_topology.get()) + elif resource_key == RESOURCE_SERVICES: + app_flows = self._handler_app_flow.retrieve() + app_flow_names = [ + app_flow['name'] + for app_flow in app_flows['huawei-nce-app-flow:app-flows']['app-flow'] + ] + if len(app_flow_names) == 1: + resource_key = '/service[{:s}]/AppFlow'.format(app_flow_names[0]) + results.append((resource_key, app_flows)) + elif len(app_flow_names) > 1: + raise Exception('Support for multiple app-flow retrieval not properly managed') else: resource_key = SPECIAL_RESOURCE_MAPPINGS.get(resource_key, resource_key) resource_path = resource_key.split('/') @@ -158,7 +165,7 @@ class NCEDriver(_Driver): # if not found, resource_node is None if resource_node is None: continue results.extend(dump_subtree(resource_node)) - except Exception as e: # pylint: disable=broad-except + except Exception as e: MSG = 'Error processing resource_key({:s}, {:s})' LOGGER.exception(MSG.format(str_resource_name, str(resource_key))) results.append((resource_key, e)) # if processing fails, store the exception @@ -178,7 +185,7 @@ class NCEDriver(_Driver): continue try: resource_value = json.loads(resource_value) - self.nce.create_app_flow(resource_value) + self._handler_app_flow.create(resource_value) results.append((resource_key, True)) except Exception as e: # pylint: disable=broad-except MSG = 'Unhandled error processing SET resource_key({:s})' @@ -198,9 +205,7 @@ class NCEDriver(_Driver): continue try: resource_value = json.loads(resource_value) - app_flows = resource_value['huawei-nce-app-flow:app-flows'] - app_flow_name = app_flows['app-flow'][0]['app-name'] - self.nce.delete_app_flow(app_flow_name) + self._handler_app_flow.delete(resource_value) results.append((resource_key, True)) except Exception as e: MSG = 'Unhandled error processing DELETE resource_key({:s})' @@ -216,16 +221,16 @@ class NCEDriver(_Driver): raise ValueError('NCE driver supports only one subscription at a time') s = subscriptions[0] uri = s[0] - _ = s[1] # sampling duration + #sampling_duration = s[1] sampling_interval = s[2] - s_data: SubscribedNotificationsSchema = { + s_data : SubscribedNotificationsSchema = { 'ietf-subscribed-notifications:input': { 'datastore': 'operational', 'ietf-yang-push:datastore-xpath-filter': uri, 'ietf-yang-push:periodic': {'ietf-yang-push:period': str(sampling_interval)}, } } - s_id = self.nce.subscribe_telemetry(s_data) + s_id = self._handler_subscription.subscribe(s_data) return [s_id] @metered_subclass_method(METRICS_POOL) @@ -236,12 +241,12 @@ class NCEDriver(_Driver): raise ValueError('NCE driver supports only one subscription at a time') s = subscriptions[0] identifier = s[0] - s_data: UnsubscribedNotificationsSchema = { + s_data : UnsubscribedNotificationsSchema = { 'delete-subscription': { 'identifier': identifier, } } - self.nce.unsubscribe_telemetry(s_data) + self._handler_subscription.unsubscribe(s_data) return [True] def GetState( diff --git a/src/device/service/drivers/nce/handlers/AppFlowHandler.py b/src/device/service/drivers/nce/handlers/AppFlowHandler.py new file mode 100644 index 000000000..1de9bd368 --- /dev/null +++ b/src/device/service/drivers/nce/handlers/AppFlowHandler.py @@ -0,0 +1,160 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, requests +from typing import Dict +from common.tools.client.RestConfClient import RestConfClient + + +LOGGER = logging.getLogger(__name__) + + +class AppFlowHandler: + def __init__(self, rest_conf_client : RestConfClient) -> None: + self._rest_conf_client = rest_conf_client + + self._url_qos_profile = '/huawei-nce-app-flow:qos-profiles' + self._url_qos_profile_item = self._url_qos_profile + '/qos-profile={:s}' + + self._url_application = '/huawei-nce-app-flow:applications' + self._url_application_item = self._url_application + '/application={:s}' + + self._url_app_flow = '/huawei-nce-app-flow:app-flows' + self._url_app_flow_item = self._url_app_flow + '/app-flow={:s}' + + + def create(self, data : Dict) -> None: + MSG = '[create] data={:s}' + LOGGER.debug(MSG.format(str(data))) + + try: + qos_profiles = ( + data + .get('huawei-nce-app-flow:app-flows', dict()) + .get('qos-profiles', dict()) + .get('qos-profile', list()) + ) + for qos_profile in qos_profiles: + request = {'huawei-nce-app-flow:qos-profiles': {'qos-profile': qos_profile}} + LOGGER.info('Creating QoS Profile: {:s}'.format(str(request))) + self._rest_conf_client.post(self._url_qos_profile, json=request) + + applications = ( + data + .get('huawei-nce-app-flow:app-flows', dict()) + .get('applications', dict()) + .get('application', list()) + ) + for application in applications: + request = {'huawei-nce-app-flow:applications': {'application': application}} + LOGGER.info('Creating Application: {:s}'.format(str(request))) + self._rest_conf_client.post(self._url_application, json=request) + + app_flows = ( + data + .get('huawei-nce-app-flow:app-flows', dict()) + .get('app-flow', list()) + ) + for app_flow in app_flows: + request = {'huawei-nce-app-flow:app-flows': {'app-flow': app_flow}} + LOGGER.info('Creating App Flow: {:s}'.format(str(request))) + self._rest_conf_client.post(self._url_app_flow, json=request) + + except requests.exceptions.ConnectionError as e: + MSG = 'Failed to send POST requests to NCE FAN NBI' + raise Exception(MSG) from e + + + def retrieve(self) -> Dict: + try: + LOGGER.info('Retrieving QoS Profiles') + qos_profiles = self._rest_conf_client.get(self._url_qos_profile) + + LOGGER.info('Retrieving Applications') + applications = self._rest_conf_client.get(self._url_application) + + LOGGER.info('Retrieving App Flows') + app_flows = self._rest_conf_client.get(self._url_app_flow) + except requests.exceptions.ConnectionError as e: + MSG = 'Failed to send GET requests to NCE FAN NBI' + raise Exception(MSG) from e + + qos_profiles = ( + qos_profiles + .get('huawei-nce-app-flow:qos-profiles', dict()) + .get('qos-profile', list()) + ) + + applications = ( + applications + .get('huawei-nce-app-flow:applications', dict()) + .get('application', list()) + ) + + app_flows = ( + app_flows + .get('huawei-nce-app-flow:app-flows', dict()) + .get('app-flow', list()) + ) + + return {'huawei-nce-app-flow:app-flows': { + 'qos-profiles': {'qos-profile': qos_profiles}, + 'applications': {'application': applications}, + 'app-flow': app_flows, + }} + + + def delete(self, data : Dict) -> None: + MSG = '[delete] data={:s}' + LOGGER.debug(MSG.format(str(data))) + + try: + app_flows = ( + data + .get('huawei-nce-app-flow:app-flows', dict()) + .get('app-flow', list()) + ) + for app_flow in app_flows: + app_flow_name = app_flow['name'] + LOGGER.info('Deleting App Flow: {:s}'.format(str(app_flow_name))) + app_flow_url = self._url_app_flow_item.format(app_flow_name) + self._rest_conf_client.delete(app_flow_url) + + applications = ( + data + .get('huawei-nce-app-flow:app-flows', dict()) + .get('applications', dict()) + .get('application', list()) + ) + for application in applications: + application_name = application['name'] + LOGGER.info('Deleting Application: {:s}'.format(str(application_name))) + application_url = self._url_application_item.format(application_name) + self._rest_conf_client.delete(application_url) + + qos_profiles = ( + data + .get('huawei-nce-app-flow:app-flows', dict()) + .get('qos-profiles', dict()) + .get('qos-profile', list()) + ) + for qos_profile in qos_profiles: + qos_profile_name = qos_profile['name'] + LOGGER.info('Deleting QoS Profile: {:s}'.format(str(qos_profile_name))) + qos_profile_url = self._url_qos_profile_item.format(qos_profile_name) + self._rest_conf_client.delete(qos_profile_url) + + except requests.exceptions.ConnectionError as e: + MSG = 'Failed to send POST requests to NCE FAN NBI' + raise Exception(MSG) from e diff --git a/src/device/service/drivers/nce/handlers/SubscriptionHandler.py b/src/device/service/drivers/nce/handlers/SubscriptionHandler.py new file mode 100644 index 000000000..00c13d637 --- /dev/null +++ b/src/device/service/drivers/nce/handlers/SubscriptionHandler.py @@ -0,0 +1,86 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, requests +from typing_extensions import TypedDict +from common.tools.client.RestConfClient import RestConfClient + + +LOGGER = logging.getLogger(__name__) + + +Periodic = TypedDict('Periodic', {'ietf-yang-push:period': str}) + +Input = TypedDict( + 'Input', + { + 'datastore': str, + 'ietf-yang-push:datastore-xpath-filter': str, + 'ietf-yang-push:periodic': Periodic, + }, +) + +SubscribedNotificationsSchema = TypedDict( + 'SubscribedNotificationsSchema', {'ietf-subscribed-notifications:input': Input} +) + +SubscriptionSchema = TypedDict('SubscriptionSchema', {'identifier': str}) + +UnsubscribedNotificationsSchema = TypedDict( + 'UnsubscribedNotificationsSchema', {'delete-subscription': SubscriptionSchema} +) + + +class SubscriptionId(TypedDict): + identifier: str + uri: str + + +class SubscriptionHandler: + def __init__(self, rest_conf_client : RestConfClient) -> None: + self._rest_conf_client = rest_conf_client + + self._url_qos_profile = '/huawei-nce-app-flow:qos-profiles' + self._url_qos_profile_item = self._url_qos_profile + '/qos-profile={:s}' + + self._url_app_flow = '/huawei-nce-app-flow:app-flows' + self._url_app_flow_item = self._url_app_flow + '/app-flow={:s}' + + + def subscribe( + self, subscription_data : SubscribedNotificationsSchema + ) -> SubscriptionId: + MSG = '[subscribe] subscription_data={:s}' + LOGGER.debug(MSG.format(str(subscription_data))) + try: + url = '/subscriptions:establish-subscription' + LOGGER.debug('Subscribing to telemetry: {:s}'.format(str(subscription_data))) + return self._rest_conf_client.rpc(url, json=subscription_data) + except requests.exceptions.ConnectionError as e: + MSG = 'Failed to send RPC request' + raise Exception(MSG) from e + + + def unsubscribe( + self, unsubscription_data : UnsubscribedNotificationsSchema + ) -> SubscriptionId: + MSG = '[unsubscribe] unsubscription_data={:s}' + LOGGER.debug(MSG.format(str(unsubscription_data))) + try: + url = '/subscriptions:delete-subscription' + LOGGER.debug('Unsubscribing from telemetry: {:s}'.format(str(unsubscription_data))) + return self._rest_conf_client.rpc(url, json=unsubscription_data) + except requests.exceptions.ConnectionError as e: + MSG = 'Failed to send RPC request' + raise Exception(MSG) from e diff --git a/src/device/service/drivers/nce/nce_fan_client.py b/src/device/service/drivers/nce/nce_fan_client.py deleted file mode 100644 index eac69e2b1..000000000 --- a/src/device/service/drivers/nce/nce_fan_client.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging -from typing import Optional -from typing_extensions import List, TypedDict - -import requests -from requests.auth import HTTPBasicAuth - - -Periodic = TypedDict('Periodic', {'ietf-yang-push:period': str}) - -Input = TypedDict( - 'Input', - { - 'datastore': str, - 'ietf-yang-push:datastore-xpath-filter': str, - 'ietf-yang-push:periodic': Periodic, - }, -) - -SubscribedNotificationsSchema = TypedDict( - 'SubscribedNotificationsSchema', {'ietf-subscribed-notifications:input': Input} -) - -SubscriptionSchema = TypedDict('SubscriptionSchema', {'identifier': str}) - -UnsubscribedNotificationsSchema = TypedDict( - 'UnsubscribedNotificationsSchema', {'delete-subscription': SubscriptionSchema} -) - - -class SubscriptionId(TypedDict): - identifier: str - uri: str - - -LOGGER = logging.getLogger(__name__) - -NCE_FAN_URL = '{:s}://{:s}:{:d}' -TIMEOUT = 30 - -HTTP_OK_CODES = { - 200, # OK - 201, # Created - 202, # Accepted - 204, # No Content -} - -MAPPING_STATUS = { - 'DEVICEOPERATIONALSTATUS_UNDEFINED': 0, - 'DEVICEOPERATIONALSTATUS_DISABLED': 1, - 'DEVICEOPERATIONALSTATUS_ENABLED': 2, -} - -MAPPING_DRIVER = { - 'DEVICEDRIVER_UNDEFINED': 0, - 'DEVICEDRIVER_OPENCONFIG': 1, - 'DEVICEDRIVER_TRANSPORT_API': 2, - 'DEVICEDRIVER_P4': 3, - 'DEVICEDRIVER_IETF_NETWORK_TOPOLOGY': 4, - 'DEVICEDRIVER_ONF_TR_532': 5, - 'DEVICEDRIVER_XR': 6, - 'DEVICEDRIVER_IETF_L2VPN': 7, - 'DEVICEDRIVER_GNMI_OPENCONFIG': 8, - 'DEVICEDRIVER_OPTICAL_TFS': 9, - 'DEVICEDRIVER_IETF_ACTN': 10, - 'DEVICEDRIVER_OC': 11, -} - -HEADERS = {'Content-Type': 'application/json'} - - -class NCEClient: - def __init__( - self, - address: str, - port: str, - scheme: str = 'http', - username: Optional[str] = None, - password: Optional[str] = None, - ) -> None: - self._nce_fan_url = NCE_FAN_URL.format(scheme, address, int(port)) - self._auth = None - - def create_app_flow(self, app_flow_data: dict) -> None: - try: - app_data = app_flow_data['huawei-nce-app-flow:app-flows']['applications'] - app_url = self._nce_fan_url + '/restconf/v1/data' + '/app-flows/apps' - LOGGER.info(f'Creating app: {app_data} URL: {app_url}') - requests.post(app_url, json=app_data, headers=HEADERS) - - app_flow_data = {'app-flow': app_flow_data['huawei-nce-app-flow:app-flows']['app-flow']} - app_flow_url = self._nce_fan_url + '/restconf/v1/data' + '/app-flows' - LOGGER.info(f'Creating app flow: {app_flow_data} URL: {app_flow_url}') - requests.post(app_flow_url, json=app_flow_data, headers=HEADERS) - except requests.exceptions.ConnectionError: - raise Exception('faild to send post requests to NCE FAN') - - def delete_app_flow(self, app_flow_name: str) -> None: - try: - app_url = ( - self._nce_fan_url - + '/restconf/v1/data' - + f'/app-flows/apps/application={app_flow_name}' - ) - LOGGER.info(f'Deleting app: {app_flow_name} URL: {app_url}') - requests.delete(app_url) - - app_flow_url = ( - self._nce_fan_url + '/restconf/v1/data' + f'/app-flows/app-flow={app_flow_name}' - ) - LOGGER.info(f'Deleting app flow: {app_flow_name} URL: {app_flow_url}') - requests.delete(app_flow_url) - except requests.exceptions.ConnectionError: - raise Exception('faild to send delete request to NCE FAN') - - def subscribe_telemetry( - self, subscription_data: SubscribedNotificationsSchema - ) -> SubscriptionId: - url = self._nce_fan_url + '/restconf/operations/subscriptions:establish-subscription' - LOGGER.debug(f'Subscribing to telemetry with data: {subscription_data} URL: {url}') - r = requests.post(url, json=subscription_data, headers=HEADERS) - r.raise_for_status() - return r.json() - - def unsubscribe_telemetry(self, unsubscription_data: UnsubscribedNotificationsSchema) -> None: - url = self._nce_fan_url + '/restconf/operations/subscriptions:delete-subscription' - LOGGER.debug(f'Unsubscribing to telemetry with data: {unsubscription_data} URL: {url}') - r = requests.post(url, json=unsubscription_data, headers=HEADERS) - r.raise_for_status() -- GitLab From 4e71c1d57db31adae20db83b39cfd12242b1a09d Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 5 Sep 2025 15:22:20 +0000 Subject: [PATCH 158/367] Common - Tools: - Minor bug fix in RestConfClient --- src/common/tools/client/RestConfClient.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/common/tools/client/RestConfClient.py b/src/common/tools/client/RestConfClient.py index 38591fb01..35442bdcf 100644 --- a/src/common/tools/client/RestConfClient.py +++ b/src/common/tools/client/RestConfClient.py @@ -63,7 +63,7 @@ class RestConfClient(RestApiClient): expected_status_codes : Set[int] = {requests.codes['OK']} ) -> Optional[Any]: return super().get( - ('/data/{:s}'.format(endpoint)).replace('//', '/') + ('/data/{:s}'.format(endpoint)).replace('//', '/'), expected_status_codes=expected_status_codes ) -- GitLab From 1a8948f4719035186a4aac2ff03447af17f5bbdb Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 5 Sep 2025 15:22:40 +0000 Subject: [PATCH 159/367] SIMAP Connector: - Code Cleanup --- .../service/simap_updater/SimapUpdater.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index e130267dd..70fa1a935 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -510,15 +510,6 @@ class SimapUpdater: self._event_collector.install_collector( self._context_client.GetAllEvents, Empty(), log_events_received=True ) - #self._event_collector.install_collector( - # self._context_client.GetTopologyEvents, Empty(), log_events_received=True - #) - #self._event_collector.install_collector( - # self._context_client.GetDeviceEvents, Empty(), log_events_received=True - #) - #self._event_collector.install_collector( - # self._context_client.GetLinkEvents, Empty(), log_events_received=True - #) self._event_dispatcher = EventDispatcher( self._event_collector.get_events_queue(), self._context_client, -- GitLab From b3ca7cbb9fff58085c0b7377e5cf379df2f64596 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 5 Sep 2025 15:23:10 +0000 Subject: [PATCH 160/367] ECOC F5GA Telemetry Demo: - Added subscription scripts - Added SIMAP to dump-logs --- .../data/telemetry/subscription-slice1.json | 9 ++++++ .../data/telemetry/subscription-slice2.json | 9 ++++++ src/tests/ecoc25-f5ga-telemetry/dump-logs.sh | 1 + .../subscribe-telemetry-slice1.sh | 28 +++++++++++++++++++ .../subscribe-telemetry-slice2.sh | 28 +++++++++++++++++++ 5 files changed, 75 insertions(+) create mode 100644 src/tests/ecoc25-f5ga-telemetry/data/telemetry/subscription-slice1.json create mode 100644 src/tests/ecoc25-f5ga-telemetry/data/telemetry/subscription-slice2.json create mode 100755 src/tests/ecoc25-f5ga-telemetry/subscribe-telemetry-slice1.sh create mode 100755 src/tests/ecoc25-f5ga-telemetry/subscribe-telemetry-slice2.sh diff --git a/src/tests/ecoc25-f5ga-telemetry/data/telemetry/subscription-slice1.json b/src/tests/ecoc25-f5ga-telemetry/data/telemetry/subscription-slice1.json new file mode 100644 index 000000000..0a73f0b67 --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/data/telemetry/subscription-slice1.json @@ -0,0 +1,9 @@ +{ + "ietf-subscribed-notifications:input": { + "datastore": "operational", + "ietf-yang-push:datastore-xpath-filter": "/ietf-network:networks/network=${simapName}/ietf-network-topology:link=link-1/simap-telemetry", + "ietf-yang-push:periodic": { + "ietf-yang-push:period": 10 + } + } +} diff --git a/src/tests/ecoc25-f5ga-telemetry/data/telemetry/subscription-slice2.json b/src/tests/ecoc25-f5ga-telemetry/data/telemetry/subscription-slice2.json new file mode 100644 index 000000000..0a73f0b67 --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/data/telemetry/subscription-slice2.json @@ -0,0 +1,9 @@ +{ + "ietf-subscribed-notifications:input": { + "datastore": "operational", + "ietf-yang-push:datastore-xpath-filter": "/ietf-network:networks/network=${simapName}/ietf-network-topology:link=link-1/simap-telemetry", + "ietf-yang-push:periodic": { + "ietf-yang-push:period": 10 + } + } +} diff --git a/src/tests/ecoc25-f5ga-telemetry/dump-logs.sh b/src/tests/ecoc25-f5ga-telemetry/dump-logs.sh index 5fea21189..0c2f101ae 100755 --- a/src/tests/ecoc25-f5ga-telemetry/dump-logs.sh +++ b/src/tests/ecoc25-f5ga-telemetry/dump-logs.sh @@ -18,4 +18,5 @@ ./scripts/show_logs_service.sh > service.log ./scripts/show_logs_slice.sh > slice.log ./scripts/show_logs_pathcomp_frontend.sh > pathcomp.log +./scripts/show_logs_simap_connector.sh > simap.log ./scripts/show_logs_nbi.sh > nbi.log diff --git a/src/tests/ecoc25-f5ga-telemetry/subscribe-telemetry-slice1.sh b/src/tests/ecoc25-f5ga-telemetry/subscribe-telemetry-slice1.sh new file mode 100755 index 000000000..34de3b3bf --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/subscribe-telemetry-slice1.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Make folder containing the script the root folder for its execution +cd $(dirname $0) + + +echo "[E2E] Subscribe Telemetry slice1..." +curl --request POST --location --header 'Content-Type: application/json' \ + --data @data/telemetry/subscription-slice1.json \ + http://0.0.0.0:80/restconf/operations/subscriptions:establish-subscription +echo + + +echo "Done!" diff --git a/src/tests/ecoc25-f5ga-telemetry/subscribe-telemetry-slice2.sh b/src/tests/ecoc25-f5ga-telemetry/subscribe-telemetry-slice2.sh new file mode 100755 index 000000000..1bfa62322 --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/subscribe-telemetry-slice2.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Make folder containing the script the root folder for its execution +cd $(dirname $0) + + +echo "[E2E] Subscribe Telemetry slice2..." +curl --request POST --location --header 'Content-Type: application/json' \ + --data @data/telemetry/subscription-slice2.json \ + http://0.0.0.0:80/restconf/operations/subscriptions:establish-subscription +echo + + +echo "Done!" -- GitLab From 7c7ccd6ce57e7d10a11cd4b5e0a7a5c081167889 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 5 Sep 2025 16:48:13 +0000 Subject: [PATCH 161/367] Common - Tools - Context Queries: - Added extra helper methods --- src/common/tools/context_queries/Device.py | 4 ++++ src/common/tools/context_queries/Link.py | 4 ++++ src/common/tools/context_queries/Service.py | 12 ++++++++++-- src/common/tools/context_queries/Topology.py | 8 ++++++++ 4 files changed, 26 insertions(+), 2 deletions(-) diff --git a/src/common/tools/context_queries/Device.py b/src/common/tools/context_queries/Device.py index b972fd511..d11bdb1be 100644 --- a/src/common/tools/context_queries/Device.py +++ b/src/common/tools/context_queries/Device.py @@ -50,6 +50,10 @@ def get_existing_device_uuids(context_client : ContextClient) -> Set[str]: existing_device_uuids = {device_id.device_uuid.uuid for device_id in existing_device_ids.device_ids} return existing_device_uuids +def get_devices(context_client : ContextClient) -> List[Device]: + devices = context_client.ListDevices(Empty()) + return [d for d in devices.devices] + def add_device_to_topology( context_client : ContextClient, context_id : ContextId, topology_uuid : str, device_uuid : str ) -> bool: diff --git a/src/common/tools/context_queries/Link.py b/src/common/tools/context_queries/Link.py index 5a96d9654..144ec7534 100644 --- a/src/common/tools/context_queries/Link.py +++ b/src/common/tools/context_queries/Link.py @@ -37,6 +37,10 @@ def get_existing_link_uuids(context_client : ContextClient) -> Set[str]: existing_link_uuids = {link_id.link_uuid.uuid for link_id in existing_link_ids.link_ids} return existing_link_uuids +def get_links(context_client : ContextClient) -> List[Link]: + links = context_client.ListLinks(Empty()) + return [l for l in links.links] + def add_link_to_topology( context_client : ContextClient, context_id : ContextId, topology_uuid : str, link_uuid : str ) -> bool: diff --git a/src/common/tools/context_queries/Service.py b/src/common/tools/context_queries/Service.py index da017412f..8b7ca7312 100644 --- a/src/common/tools/context_queries/Service.py +++ b/src/common/tools/context_queries/Service.py @@ -13,9 +13,9 @@ # limitations under the License. import grpc, logging -from typing import Optional +from typing import List, Optional from common.Constants import DEFAULT_CONTEXT_NAME -from common.proto.context_pb2 import Service, ServiceFilter, ServiceId +from common.proto.context_pb2 import ContextId, Service, ServiceFilter, ServiceId from context.client.ContextClient import ContextClient LOGGER = logging.getLogger(__name__) @@ -55,3 +55,11 @@ def get_service_by_uuid( return get_service_by_id( context_client, service_id, rw_copy=rw_copy, include_endpoint_ids=include_endpoint_ids, include_constraints=include_constraints, include_config_rules=include_config_rules) + +def get_services( + context_client : ContextClient, context_uuid : str = DEFAULT_CONTEXT_NAME + ) -> List[Service]: + context_id = ContextId() + context_id.context_uuid.uuid = context_uuid + services = context_client.ListServices(context_id) + return [s for s in services.services] diff --git a/src/common/tools/context_queries/Topology.py b/src/common/tools/context_queries/Topology.py index 5df396feb..29e4b8051 100644 --- a/src/common/tools/context_queries/Topology.py +++ b/src/common/tools/context_queries/Topology.py @@ -62,6 +62,14 @@ def get_topology( #LOGGER.exception('Unable to get topology({:s} / {:s})'.format(str(context_uuid), str(topology_uuid))) return None +def get_topologies( + context_client : ContextClient, context_uuid : str = DEFAULT_CONTEXT_NAME + ) -> List[Topology]: + context_id = ContextId() + context_id.context_uuid.uuid = context_uuid + topologies = context_client.ListTopologies(context_id) + return [t for t in topologies.topologies] + def get_topology_details( context_client : ContextClient, topology_uuid : str, context_uuid : str = DEFAULT_CONTEXT_NAME, rw_copy : bool = False -- GitLab From baa4e6cfbeb972854ae31e255b0208ac22f24419 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 5 Sep 2025 16:49:17 +0000 Subject: [PATCH 162/367] SIMAP Connector: - Added support for Services in ObjectCache - Added support for list-all in ObjectCache - Added handling of service events in SimapUpdater --- .../service/simap_updater/ObjectCache.py | 95 +++++++-- .../service/simap_updater/SimapUpdater.py | 181 +++++++++++++++++- .../service/simap_updater/Tools.py | 38 +++- 3 files changed, 296 insertions(+), 18 deletions(-) diff --git a/src/simap_connector/service/simap_updater/ObjectCache.py b/src/simap_connector/service/simap_updater/ObjectCache.py index 4b5cf8244..9531c6626 100644 --- a/src/simap_connector/service/simap_updater/ObjectCache.py +++ b/src/simap_connector/service/simap_updater/ObjectCache.py @@ -15,10 +15,11 @@ import logging from enum import Enum -from typing import Any, Dict, Optional, Tuple -from common.tools.context_queries.Device import get_device -from common.tools.context_queries.Link import get_link -from common.tools.context_queries.Topology import get_topology +from typing import Any, Dict, List, Optional, Tuple +from common.tools.context_queries.Device import get_device, get_devices +from common.tools.context_queries.Link import get_link, get_links +from common.tools.context_queries.Topology import get_topology, get_topologies +from common.tools.context_queries.Service import get_service_by_uuid, get_services from context.client.ContextClient import ContextClient @@ -26,17 +27,21 @@ LOGGER = logging.getLogger(__name__) class CachedEntities(Enum): - TOPOLOGY = 'topology' - DEVICE = 'device' - ENDPOINT = 'endpoint' - LINK = 'link' + TOPOLOGY = 'topology' + DEVICE = 'device' + ENDPOINT = 'endpoint' + LINK = 'link' + SERVICE = 'service' + CONNECTION = 'connection' KEY_LENGTHS = { - CachedEntities.TOPOLOGY : 1, - CachedEntities.DEVICE : 1, - CachedEntities.ENDPOINT : 2, - CachedEntities.LINK : 1, + CachedEntities.TOPOLOGY : 1, + CachedEntities.DEVICE : 1, + CachedEntities.ENDPOINT : 2, + CachedEntities.LINK : 1, + CachedEntities.SERVICE : 2, + CachedEntities.CONNECTION : 3, } @@ -69,6 +74,17 @@ class ObjectCache: if not auto_retrieve: return None return self._update(entity, *object_uuids) + def get_all( + self, entity : CachedEntities, fresh : bool = False + ) -> List[Any]: + if fresh: self._update_all(entity) + entity_name = str(entity.value) + return [ + obj + for obj_key, obj in self._object_cache.items() + if obj_key[0] == entity_name + ] + def set(self, entity : CachedEntities, object_inst : Any, *object_uuids : str) -> None: object_key = compose_object_key(entity, *object_uuids) self._object_cache[object_key] = object_inst @@ -90,6 +106,10 @@ class ObjectCache: object_inst = get_link( self._context_client, object_uuids[0], rw_copy=False ) + elif entity == CachedEntities.SERVICE: + object_inst = get_service_by_uuid( + self._context_client, object_uuids[0], rw_copy=False + ) else: MSG = 'Not Supported ({:s}, {:s})' LOGGER.warning(MSG.format(str(entity.value).title(), str(object_uuids))) @@ -122,6 +142,57 @@ class ObjectCache: return object_inst + def _update_all(self, entity : CachedEntities) -> None: + if entity == CachedEntities.TOPOLOGY: + objects = get_topologies(self._context_client) + objects = { + (t.topology_id.topology_uuid.uuid, t.name) : t + for t in objects + } + elif entity == CachedEntities.DEVICE: + objects = get_devices(self._context_client) + objects = { + (d.device_id.device_uuid.uuid, d.name) : d + for d in objects + } + elif entity == CachedEntities.ENDPOINT: + # Endpoints are only updated when updating a Device + return None + elif entity == CachedEntities.LINK: + objects = get_links(self._context_client) + objects = { + (l.link_id.link_uuid.uuid, l.name) : l + for l in objects + } + elif entity == CachedEntities.SERVICE: + objects = get_services(self._context_client) + objects = { + (s.service_id.service_uuid.uuid, s.name) : s + for s in objects + } + else: + MSG = 'Not Supported ({:s})' + LOGGER.warning(MSG.format(str(entity.value).title())) + return None + + for (object_uuid, object_name), object_inst in objects.items(): + self.set(entity, object_inst, object_uuid) + self.set(entity, object_inst, object_name) + + if entity == CachedEntities.DEVICE: + for endpoint in object_inst.device_endpoints: + endpoint_device_uuid = endpoint.endpoint_id.device_id.device_uuid.uuid + if object_uuid != endpoint_device_uuid: + MSG = 'DeviceUUID({:s}) != Endpoint.DeviceUUID({:s})' + raise Exception(str(object_uuid), str(endpoint_device_uuid)) + + endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid + endpoint_name = endpoint.name + self.set(CachedEntities.ENDPOINT, endpoint, object_uuid, endpoint_uuid) + self.set(CachedEntities.ENDPOINT, endpoint, object_uuid, endpoint_name) + self.set(CachedEntities.ENDPOINT, endpoint, object_name, endpoint_uuid) + self.set(CachedEntities.ENDPOINT, endpoint, object_name, endpoint_name) + def delete(self, entity : CachedEntities, *object_uuids : str) -> None: object_key = compose_object_key(entity, *object_uuids) self._object_cache.pop(object_key, None) diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 70fa1a935..303363985 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -13,10 +13,11 @@ # limitations under the License. -import logging, queue, threading +import logging, queue, threading, uuid from typing import Any, Optional, Set +from common.Constants import DEFAULT_TOPOLOGY_NAME from common.DeviceTypes import DeviceTypeEnum -from common.proto.context_pb2 import ContextEvent, DeviceEvent, Empty, LinkEvent, TopologyEvent +from common.proto.context_pb2 import ContextEvent, DeviceEvent, Empty, LinkEvent, ServiceEvent, TopologyEvent from common.tools.grpc.BaseEventCollector import BaseEventCollector from common.tools.grpc.BaseEventDispatcher import BaseEventDispatcher from common.tools.grpc.Tools import grpc_message_to_json_string @@ -28,7 +29,7 @@ from simap_connector.Config import ( from .simap_client.RestConfClient import RestConfClient from .simap_client.SimapClient import SimapClient from .ObjectCache import CachedEntities, ObjectCache -from .Tools import get_device_endpoint, get_link_endpoint +from .Tools import get_device_endpoint, get_link_endpoint, get_service_endpoint LOGGER = logging.getLogger(__name__) @@ -90,9 +91,14 @@ class EventDispatcher(BaseEventDispatcher): topology = self._object_cache.get(CachedEntities.TOPOLOGY, topology_uuid) topology_name = topology.name + if topology_name != DEFAULT_TOPOLOGY_NAME: + supporting_network_ids = [DEFAULT_TOPOLOGY_NAME] + # Theoretically it should be create(), but given we have multiple clients # updating same SIMAP server, use update to skip tricks on get-check-create-or-update. - self._simap_client.network(topology_name).update() + self._simap_client.network(topology_name).update( + supporting_network_ids=supporting_network_ids + ) MSG = 'Topology Created: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(topology_event))) @@ -105,7 +111,15 @@ class EventDispatcher(BaseEventDispatcher): topology_uuid = topology_event.topology_id.topology_uuid.uuid topology = self._object_cache.get(CachedEntities.TOPOLOGY, topology_uuid) topology_name = topology.name - self._simap_client.network(topology_name).update() + + if topology_name != DEFAULT_TOPOLOGY_NAME: + supporting_network_ids = [DEFAULT_TOPOLOGY_NAME] + + # Theoretically it should be create(), but given we have multiple clients + # updating same SIMAP server, use update to skip tricks on get-check-create-or-update. + self._simap_client.network(topology_name).update( + supporting_network_ids=supporting_network_ids + ) MSG = 'Topology Updated: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(topology_event))) @@ -501,6 +515,163 @@ class EventDispatcher(BaseEventDispatcher): MSG = 'Link Remove: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(link_event))) + def dispatch_service_create(self, service_event : ServiceEvent) -> None: + MSG = 'Processing Service Event: {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(service_event))) + + service_uuid = service_event.service_id.service_uuid.uuid + service = self._object_cache.get(CachedEntities.SERVICE, service_uuid) + service_name = service.name + + try: + uuid.UUID(hex=service_name) + # skip it if properly parsed, means it is a service with a UUID-based name, i.e., a sub-service + return + except: # pylint: disable=bare-except + pass + + topology_uuid, endpoint_uuids = get_service_endpoint(service) + if topology_uuid is None: + MSG = 'ServiceEvent({:s}) skipped, no endpoint_ids to identify topology: {:s}' + str_service_event = grpc_message_to_json_string(service_event) + str_service = grpc_message_to_json_string(service) + LOGGER.warning(MSG.format(str_service_event, str_service)) + return + + topologies = self._object_cache.get_all(CachedEntities.TOPOLOGY, fresh=False) + topology_names = {t.name for t in topologies} + topology_names.discard(DEFAULT_TOPOLOGY_NAME) + if len(topology_names) != 1: + MSG = 'ServiceEvent({:s}) skipped, unable to identify on which topology to insert it' + str_service_event = grpc_message_to_json_string(service_event) + LOGGER.warning(MSG.format(str_service_event)) + return + domain_name = topology_names.pop() # trans-pkt/agg-net/e2e-net + + domain_topo = self._simap_client.network(domain_name) + domain_topo.update() + + src_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[0][0], auto_retrieve=False) + src_endpoint = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[0]), auto_retrieve=False) + dst_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[1][0], auto_retrieve=False) + dst_endpoint = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[1]), auto_retrieve=False) + + try: + if src_device is None: + MSG = 'Device({:s}) not found in cache' + raise Exception(MSG.format(str(endpoint_uuids[0][0]))) + if src_endpoint is None: + MSG = 'Endpoint({:s}) not found in cache' + raise Exception(MSG.format(str(endpoint_uuids[0]))) + if dst_device is None: + MSG = 'Device({:s}) not found in cache' + raise Exception(MSG.format(str(endpoint_uuids[1][0]))) + if dst_endpoint is None: + MSG = 'Endpoint({:s}) not found in cache' + raise Exception(MSG.format(str(endpoint_uuids[1]))) + except Exception as e: + MSG = '{:s} in Service({:s})' + raise Exception(MSG.format(str(e), grpc_message_to_json_string(service))) from e + + src_dev_name = src_device.name + src_ep_name = src_endpoint.name + dst_dev_name = dst_device.name + dst_ep_name = dst_endpoint.name + + parent_domain_name = DEFAULT_TOPOLOGY_NAME # TODO: compute from service settings + + site_1_name = 'site1' # TODO: compute from service settings + site_1 = domain_topo.node(site_1_name) + site_1.create(supporting_node_ids=[(parent_domain_name, src_dev_name)]) + site_1.termination_point(src_ep_name).create( + supporting_termination_point_ids=[(parent_domain_name, src_dev_name, src_ep_name)] + ) + + site_2_name = 'site2' # TODO: compute from service settings + site_2 = domain_topo.node(site_2_name) + site_2.create(supporting_node_ids=[(parent_domain_name, dst_dev_name)]) + site_2.termination_point(dst_ep_name).create( + supporting_termination_point_ids=[(parent_domain_name, dst_dev_name, dst_ep_name)] + ) + + link_name = '{:s}:{:s}-{:s}=={:s}-{:s}'.format( + service_name, src_dev_name, src_ep_name, dst_dev_name, dst_ep_name + ) + domain_topo.link(link_name).create(src_dev_name, src_ep_name, dst_dev_name, dst_ep_name) + + MSG = 'Logical Link Created for Service: {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(service_event))) + + def dispatch_service_update(self, service_event : ServiceEvent) -> None: + self.dispatch_service_create(service_event) + + def dispatch_service_remove(self, service_event : ServiceEvent) -> None: + MSG = 'Processing Service Event: {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(service_event))) + + service_uuid = service_event.service_id.service_uuid.uuid + service = self._object_cache.get(CachedEntities.SERVICE, service_uuid) + service_name = service.name + + topology_uuid, endpoint_uuids = get_service_endpoint(service) + if topology_uuid is None: + MSG = 'ServiceEvent({:s}) skipped, no endpoint_ids to identify topology: {:s}' + str_service_event = grpc_message_to_json_string(service_event) + str_service = grpc_message_to_json_string(service) + LOGGER.warning(MSG.format(str_service_event, str_service)) + return + + topologies = self._object_cache.get_all(CachedEntities.TOPOLOGY, fresh=False) + topology_names = {t.name for t in topologies} + topology_names.discard(DEFAULT_TOPOLOGY_NAME) + if len(topology_names) != 1: + MSG = 'ServiceEvent({:s}) skipped, unable to identify on which topology to insert it' + str_service_event = grpc_message_to_json_string(service_event) + LOGGER.warning(MSG.format(str_service_event)) + return + domain_name = topology_names.pop() # trans-pkt/agg-net/e2e-net + + domain_topo = self._simap_client.network(domain_name) + domain_topo.update() + + src_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[0][0], auto_retrieve=False) + src_endpoint = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[0]), auto_retrieve=False) + dst_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[1][0], auto_retrieve=False) + dst_endpoint = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[1]), auto_retrieve=False) + + try: + if src_device is None: + MSG = 'Device({:s}) not found in cache' + raise Exception(MSG.format(str(endpoint_uuids[0][0]))) + if src_endpoint is None: + MSG = 'Endpoint({:s}) not found in cache' + raise Exception(MSG.format(str(endpoint_uuids[0]))) + if dst_device is None: + MSG = 'Device({:s}) not found in cache' + raise Exception(MSG.format(str(endpoint_uuids[1][0]))) + if dst_endpoint is None: + MSG = 'Endpoint({:s}) not found in cache' + raise Exception(MSG.format(str(endpoint_uuids[1]))) + except Exception as e: + MSG = '{:s} in Service({:s})' + raise Exception(MSG.format(str(e), grpc_message_to_json_string(service))) from e + + src_dev_name = src_device.name + src_ep_name = src_endpoint.name + dst_dev_name = dst_device.name + dst_ep_name = dst_endpoint.name + + link_name = '{:s}:{:s}-{:s}=={:s}-{:s}'.format( + service_name, src_dev_name, src_ep_name, dst_dev_name, dst_ep_name + ) + te_link = domain_topo.link(link_name) + te_link.delete() + + self._object_cache.delete(CachedEntities.SERVICE, service_uuid) + self._object_cache.delete(CachedEntities.SERVICE, service_name) + + MSG = 'Logical Link Removed for Service: {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(service_event))) class SimapUpdater: def __init__(self, terminate : threading.Event) -> None: diff --git a/src/simap_connector/service/simap_updater/Tools.py b/src/simap_connector/service/simap_updater/Tools.py index db0430747..fa03050a3 100644 --- a/src/simap_connector/service/simap_updater/Tools.py +++ b/src/simap_connector/service/simap_updater/Tools.py @@ -17,7 +17,7 @@ import enum from typing import List, Optional, Set, Tuple, Union from common.proto.context_pb2 import ( EVENTTYPE_CREATE, EVENTTYPE_REMOVE, EVENTTYPE_UPDATE, Device, - DeviceEvent, Link, LinkEvent, ServiceEvent, SliceEvent, TopologyEvent + DeviceEvent, Link, LinkEvent, Service, ServiceEvent, SliceEvent, TopologyEvent ) from common.tools.grpc.Tools import grpc_message_to_json_string @@ -122,3 +122,39 @@ def get_link_endpoint(link : Link) -> Tuple[Optional[str], List[Tuple[str, str]] raise Exception(MSG.format(str(e), grpc_message_to_json_string(link))) from e return topology_uuid, endpoint_uuids + + +def get_service_endpoint(service : Service) -> Tuple[Optional[str], List[Tuple[str, str]]]: + topology_uuids : Set[str] = set() + endpoint_uuids : List[Tuple[str, str]] = list() + + if len(service.service_endpoint_ids) == 0: + return None, endpoint_uuids + + for endpoint_id in service.service_endpoint_ids: + topology_uuid = endpoint_id.topology_id.topology_uuid.uuid + topology_uuids.add(topology_uuid) + + device_uuid = endpoint_id.device_id.device_uuid.uuid + endpoint_uuid = endpoint_id.endpoint_uuid.uuid + endpoint_uuids.append((device_uuid, endpoint_uuid)) + + try: + # Check topology UUIDs + if len(topology_uuids) != 1: + MSG = 'Unsupported: no/multiple Topologies({:s}) referenced' + raise Exception(MSG.format(str(topology_uuids))) + topology_uuid = list(topology_uuids)[0] + if len(topology_uuid) == 0: + MSG = 'Unsupported: empty TopologyUUID({:s}) referenced' + raise Exception(MSG.format(str(topology_uuid))) + + # Check Count Endpoints + if len(endpoint_uuids) != 2: + MSG = 'Unsupported: non-p2p service ServiceUUIDs({:s})' + raise Exception(MSG.format(str(endpoint_uuids))) + except Exception as e: + MSG = '{:s} in Service({:s})' + raise Exception(MSG.format(str(e), grpc_message_to_json_string(service))) from e + + return topology_uuid, endpoint_uuids -- GitLab From b6926560a158805dad6267c8dbdd1109641c773c Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 5 Sep 2025 16:49:45 +0000 Subject: [PATCH 163/367] ECOC F5GA Telemetry Demo: - Added fake topologies in descriptors to identify which controller is running each time --- .../ecoc25-f5ga-telemetry/data/topology/topology-agg.json | 3 ++- .../ecoc25-f5ga-telemetry/data/topology/topology-e2e.json | 3 ++- src/tests/ecoc25-f5ga-telemetry/data/topology/topology-ip.json | 3 ++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json index 1d3efb630..2f07dc48b 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json @@ -3,7 +3,8 @@ {"context_id": {"context_uuid": {"uuid": "admin"}}} ], "topologies": [ - {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}} + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}}, + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "agg-net"}}} ], "devices": [ {"device_id": {"device_uuid": {"uuid": "TFS-IP"}}, "device_type": "teraflowsdn", diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-e2e.json b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-e2e.json index a1dddff25..4cd5c8c68 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-e2e.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-e2e.json @@ -3,7 +3,8 @@ {"context_id": {"context_uuid": {"uuid": "admin"}}} ], "topologies": [ - {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}} + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}}, + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "e2e-net"}}} ], "devices": [ {"device_id": {"device_uuid": {"uuid": "TFS-AGG"}}, "device_type": "teraflowsdn", diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-ip.json b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-ip.json index 58913deca..cf91710a3 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-ip.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-ip.json @@ -3,7 +3,8 @@ {"context_id": {"context_uuid": {"uuid": "admin"}}} ], "topologies": [ - {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}} + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}}, + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "trans-pkt"}}} ], "devices": [ { -- GitLab From 9fadc0473940f359ceef38665ecd95121f7abc91 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 5 Sep 2025 17:24:52 +0000 Subject: [PATCH 164/367] SIMAP Connector: - Bug fix in SimapUpdater --- src/simap_connector/service/simap_updater/SimapUpdater.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 303363985..d21fe6cfc 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -91,8 +91,9 @@ class EventDispatcher(BaseEventDispatcher): topology = self._object_cache.get(CachedEntities.TOPOLOGY, topology_uuid) topology_name = topology.name + supporting_network_ids = list() if topology_name != DEFAULT_TOPOLOGY_NAME: - supporting_network_ids = [DEFAULT_TOPOLOGY_NAME] + supporting_network_ids.append(DEFAULT_TOPOLOGY_NAME) # Theoretically it should be create(), but given we have multiple clients # updating same SIMAP server, use update to skip tricks on get-check-create-or-update. @@ -112,8 +113,9 @@ class EventDispatcher(BaseEventDispatcher): topology = self._object_cache.get(CachedEntities.TOPOLOGY, topology_uuid) topology_name = topology.name + supporting_network_ids = list() if topology_name != DEFAULT_TOPOLOGY_NAME: - supporting_network_ids = [DEFAULT_TOPOLOGY_NAME] + supporting_network_ids.append(DEFAULT_TOPOLOGY_NAME) # Theoretically it should be create(), but given we have multiple clients # updating same SIMAP server, use update to skip tricks on get-check-create-or-update. -- GitLab From 8a6d8fe9f74f4295017e08a52a5f167d128ce849 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 5 Sep 2025 17:32:15 +0000 Subject: [PATCH 165/367] SIMAP Connector: - Bug fix in SimapUpdater --- src/simap_connector/service/simap_updater/ObjectCache.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/simap_connector/service/simap_updater/ObjectCache.py b/src/simap_connector/service/simap_updater/ObjectCache.py index 9531c6626..7963999de 100644 --- a/src/simap_connector/service/simap_updater/ObjectCache.py +++ b/src/simap_connector/service/simap_updater/ObjectCache.py @@ -40,7 +40,7 @@ KEY_LENGTHS = { CachedEntities.DEVICE : 1, CachedEntities.ENDPOINT : 2, CachedEntities.LINK : 1, - CachedEntities.SERVICE : 2, + CachedEntities.SERVICE : 1, CachedEntities.CONNECTION : 3, } -- GitLab From 495f649b4469300dbe34eb8fd305358cca825900 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 7 Sep 2025 09:37:25 +0000 Subject: [PATCH 166/367] SIMAP Connector: - Factorized create-update code into set - Fixed creation of simap virtual links, using now update instead - Fixed identification of service endpoints --- .../service/simap_updater/SimapUpdater.py | 204 +++++------------- .../service/simap_updater/Tools.py | 10 +- 2 files changed, 58 insertions(+), 156 deletions(-) diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index d21fe6cfc..2322be3b6 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -83,14 +83,14 @@ class EventDispatcher(BaseEventDispatcher): LOGGER.debug(MSG.format(grpc_message_to_json_string(context_event))) - def dispatch_topology_create(self, topology_event : TopologyEvent) -> None: + def _dispatch_topology_set(self, topology_event : TopologyEvent) -> None: MSG = 'Processing Topology Event: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(topology_event))) topology_uuid = topology_event.topology_id.topology_uuid.uuid topology = self._object_cache.get(CachedEntities.TOPOLOGY, topology_uuid) topology_name = topology.name - + supporting_network_ids = list() if topology_name != DEFAULT_TOPOLOGY_NAME: supporting_network_ids.append(DEFAULT_TOPOLOGY_NAME) @@ -101,27 +101,16 @@ class EventDispatcher(BaseEventDispatcher): supporting_network_ids=supporting_network_ids ) - MSG = 'Topology Created: {:s}' - LOGGER.info(MSG.format(grpc_message_to_json_string(topology_event))) + def dispatch_topology_create(self, topology_event : TopologyEvent) -> None: + self._dispatch_topology_set(topology_event) - def dispatch_topology_update(self, topology_event : TopologyEvent) -> None: - MSG = 'Processing Topology Event: {:s}' + MSG = 'Topology Create: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(topology_event))) - topology_uuid = topology_event.topology_id.topology_uuid.uuid - topology = self._object_cache.get(CachedEntities.TOPOLOGY, topology_uuid) - topology_name = topology.name - supporting_network_ids = list() - if topology_name != DEFAULT_TOPOLOGY_NAME: - supporting_network_ids.append(DEFAULT_TOPOLOGY_NAME) - - # Theoretically it should be create(), but given we have multiple clients - # updating same SIMAP server, use update to skip tricks on get-check-create-or-update. - self._simap_client.network(topology_name).update( - supporting_network_ids=supporting_network_ids - ) + def dispatch_topology_update(self, topology_event : TopologyEvent) -> None: + self._dispatch_topology_set(topology_event) MSG = 'Topology Updated: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(topology_event))) @@ -143,7 +132,7 @@ class EventDispatcher(BaseEventDispatcher): LOGGER.info(MSG.format(grpc_message_to_json_string(topology_event))) - def dispatch_device_create(self, device_event : DeviceEvent) -> None: + def _dispatch_device_set(self, device_event : DeviceEvent) -> None: MSG = 'Processing Device Event: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(device_event))) @@ -198,64 +187,16 @@ class EventDispatcher(BaseEventDispatcher): #self._remove_skipped_device(device) - MSG = 'Device Created: {:s}' - LOGGER.info(MSG.format(grpc_message_to_json_string(device_event))) + def dispatch_device_create(self, device_event : DeviceEvent) -> None: + self._dispatch_device_set(device_event) - def dispatch_device_update(self, device_event : DeviceEvent) -> None: - MSG = 'Processing Device Event: {:s}' + MSG = 'Device Created: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(device_event))) - device_uuid = device_event.device_id.device_uuid.uuid - device = self._object_cache.get(CachedEntities.DEVICE, device_uuid) - device_type = device.device_type - if device_type in SKIPPED_DEVICE_TYPES: - self._add_skipped_device(device) - MSG = ( - 'DeviceEvent({:s}) skipped, is of a skipped device type. ' - 'SIMAP should be updated by him: {:s}' - ) - str_device_event = grpc_message_to_json_string(device_event) - str_device = grpc_message_to_json_string(device) - LOGGER.warning(MSG.format(str_device_event, str_device)) - return - - #device_controller_uuid = device.controller_id.device_uuid.uuid - #if len(device_controller_uuid) > 0: - # self._add_skipped_device(device) - # MSG = ( - # 'DeviceEvent({:s}) skipped, is a remotely-managed device. ' - # 'SIMAP should be updated by remote controller: {:s}' - # ) - # str_device_event = grpc_message_to_json_string(device_event) - # str_device = grpc_message_to_json_string(device) - # LOGGER.warning(MSG.format(str_device_event, str_device)) - # return - - topology_uuid, endpoint_names = get_device_endpoint(device) - if topology_uuid is None: - #self._add_skipped_device(device) - MSG = 'DeviceEvent({:s}) skipped, no endpoints to identify topology: {:s}' - str_device_event = grpc_message_to_json_string(device_event) - str_device = grpc_message_to_json_string(device) - LOGGER.warning(MSG.format(str_device_event, str_device)) - return - - topology = self._object_cache.get(CachedEntities.TOPOLOGY, topology_uuid) - topology_name = topology.name - - te_topo = self._simap_client.network(topology_name) - te_topo.update() - - device_name = device.name - te_device = te_topo.node(device_name) - te_device.update() - - for endpoint_name in endpoint_names: - te_device.termination_point(endpoint_name).update() - - #self._remove_skipped_device(device) + def dispatch_device_update(self, device_event : DeviceEvent) -> None: + self._dispatch_device_set(device_event) MSG = 'Device Updated: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(device_event))) @@ -323,7 +264,7 @@ class EventDispatcher(BaseEventDispatcher): LOGGER.info(MSG.format(grpc_message_to_json_string(device_event))) - def dispatch_link_create(self, link_event : LinkEvent) -> None: + def _dispatch_link_set(self, link_event : LinkEvent) -> None: MSG = 'Processing Link Event: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(link_event))) @@ -389,83 +330,24 @@ class EventDispatcher(BaseEventDispatcher): MSG = '{:s} in Link({:s})' raise Exception(MSG.format(str(e), grpc_message_to_json_string(link))) from e - te_topo.link(link_name).create(src_device.name, src_endpoint.name, dst_device.name, dst_endpoint.name) - - MSG = 'Link Created: {:s}' - LOGGER.info(MSG.format(grpc_message_to_json_string(link_event))) - - def dispatch_link_update(self, link_event : LinkEvent) -> None: - MSG = 'Processing Link Event: {:s}' - LOGGER.info(MSG.format(grpc_message_to_json_string(link_event))) - - link_uuid = link_event.link_id.link_uuid.uuid - link = self._object_cache.get(CachedEntities.LINK, link_uuid) - link_name = link.name - - topology_uuid, endpoint_uuids = get_link_endpoint(link) - if topology_uuid is None: - MSG = 'LinkEvent({:s}) skipped, no endpoint_ids to identify topology: {:s}' - str_link_event = grpc_message_to_json_string(link_event) - str_link = grpc_message_to_json_string(link) - LOGGER.warning(MSG.format(str_link_event, str_link)) - return - - topology = self._object_cache.get(CachedEntities.TOPOLOGY, topology_uuid) - topology_name = topology.name - - te_topo = self._simap_client.network(topology_name) - te_topo.update() + te_link = te_topo.link(link_name) + te_link.update(src_device.name, src_endpoint.name, dst_device.name, dst_endpoint.name) - src_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[0][0], auto_retrieve=False) - src_endpoint = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[0]), auto_retrieve=False) - dst_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[1][0], auto_retrieve=False) - dst_endpoint = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[1]), auto_retrieve=False) - # Skip links that connect two management endpoints - if src_endpoint is not None and dst_endpoint is not None: - if str(src_endpoint.name).lower() == 'mgmt' and str(dst_endpoint.name).lower() == 'mgmt': - MSG = 'LinkEvent({:s}) skipped, connects two management endpoints: {:s}' - str_link_event = grpc_message_to_json_string(link_event) - str_link = grpc_message_to_json_string(link) - LOGGER.warning(MSG.format(str_link_event, str_link)) - return + def dispatch_link_create(self, link_event : LinkEvent) -> None: + self._dispatch_link_set(link_event) - # Skip links that connect to devices previously marked as skipped - src_uuid = src_device.device_id.device_uuid.uuid - dst_uuid = dst_device.device_id.device_uuid.uuid - src_name = src_device.name - dst_name = dst_device.name - if (src_uuid in self._skipped_devices or src_name in self._skipped_devices - or dst_uuid in self._skipped_devices or dst_name in self._skipped_devices): - MSG = 'LinkEvent({:s}) skipped, connects to skipped device(s): {:s}' - str_link_event = grpc_message_to_json_string(link_event) - str_link = grpc_message_to_json_string(link) - LOGGER.warning(MSG.format(str_link_event, str_link)) - return + MSG = 'Link Created: {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(link_event))) - try: - if src_device is None: - MSG = 'Device({:s}) not found in cache' - raise Exception(MSG.format(str(endpoint_uuids[0][0]))) - if src_endpoint is None: - MSG = 'Endpoint({:s}) not found in cache' - raise Exception(MSG.format(str(endpoint_uuids[0]))) - if dst_device is None: - MSG = 'Device({:s}) not found in cache' - raise Exception(MSG.format(str(endpoint_uuids[1][0]))) - if dst_endpoint is None: - MSG = 'Endpoint({:s}) not found in cache' - raise Exception(MSG.format(str(endpoint_uuids[1]))) - except Exception as e: - MSG = '{:s} in Link({:s})' - raise Exception(MSG.format(str(e), grpc_message_to_json_string(link))) from e - te_link = te_topo.link(link_name) - te_link.update(src_device.name, src_endpoint.name, dst_device.name, dst_endpoint.name) + def dispatch_link_update(self, link_event : LinkEvent) -> None: + self._dispatch_link_set(link_event) MSG = 'Link Updated: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(link_event))) + def dispatch_link_remove(self, link_event : LinkEvent) -> None: MSG = 'Processing Link Event: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(link_event))) @@ -517,7 +399,8 @@ class EventDispatcher(BaseEventDispatcher): MSG = 'Link Remove: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(link_event))) - def dispatch_service_create(self, service_event : ServiceEvent) -> None: + + def _dispatch_service_set(self, service_event : ServiceEvent) -> None: MSG = 'Processing Service Event: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(service_event))) @@ -533,6 +416,7 @@ class EventDispatcher(BaseEventDispatcher): pass topology_uuid, endpoint_uuids = get_service_endpoint(service) + if topology_uuid is None: MSG = 'ServiceEvent({:s}) skipped, no endpoint_ids to identify topology: {:s}' str_service_event = grpc_message_to_json_string(service_event) @@ -540,6 +424,13 @@ class EventDispatcher(BaseEventDispatcher): LOGGER.warning(MSG.format(str_service_event, str_service)) return + if len(endpoint_uuids) < 2: + MSG = 'ServiceEvent({:s}) skipped, not enough endpoint_ids to compose link: {:s}' + str_service_event = grpc_message_to_json_string(service_event) + str_service = grpc_message_to_json_string(service) + LOGGER.warning(MSG.format(str_service_event, str_service)) + return + topologies = self._object_cache.get_all(CachedEntities.TOPOLOGY, fresh=False) topology_names = {t.name for t in topologies} topology_names.discard(DEFAULT_TOPOLOGY_NAME) @@ -584,28 +475,36 @@ class EventDispatcher(BaseEventDispatcher): site_1_name = 'site1' # TODO: compute from service settings site_1 = domain_topo.node(site_1_name) - site_1.create(supporting_node_ids=[(parent_domain_name, src_dev_name)]) - site_1.termination_point(src_ep_name).create( - supporting_termination_point_ids=[(parent_domain_name, src_dev_name, src_ep_name)] - ) + site_1.update(supporting_node_ids=[(parent_domain_name, src_dev_name)]) + site_1_tp = site_1.termination_point(src_ep_name) + site_1_tp.update(supporting_termination_point_ids=[(parent_domain_name, src_dev_name, src_ep_name)]) site_2_name = 'site2' # TODO: compute from service settings site_2 = domain_topo.node(site_2_name) - site_2.create(supporting_node_ids=[(parent_domain_name, dst_dev_name)]) - site_2.termination_point(dst_ep_name).create( - supporting_termination_point_ids=[(parent_domain_name, dst_dev_name, dst_ep_name)] - ) + site_2.update(supporting_node_ids=[(parent_domain_name, dst_dev_name)]) + site_2_tp = site_2.termination_point(dst_ep_name) + site_2_tp.update(supporting_termination_point_ids=[(parent_domain_name, dst_dev_name, dst_ep_name)]) link_name = '{:s}:{:s}-{:s}=={:s}-{:s}'.format( service_name, src_dev_name, src_ep_name, dst_dev_name, dst_ep_name ) - domain_topo.link(link_name).create(src_dev_name, src_ep_name, dst_dev_name, dst_ep_name) + dom_link = domain_topo.link(link_name) + dom_link.update(src_dev_name, src_ep_name, dst_dev_name, dst_ep_name) + + + def dispatch_service_created(self, service_event : ServiceEvent) -> None: + self._dispatch_service_set(service_event) MSG = 'Logical Link Created for Service: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(service_event))) + def dispatch_service_update(self, service_event : ServiceEvent) -> None: - self.dispatch_service_create(service_event) + self._dispatch_service_set(service_event) + + MSG = 'Logical Link Updated for Service: {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(service_event))) + def dispatch_service_remove(self, service_event : ServiceEvent) -> None: MSG = 'Processing Service Event: {:s}' @@ -675,6 +574,7 @@ class EventDispatcher(BaseEventDispatcher): MSG = 'Logical Link Removed for Service: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(service_event))) + class SimapUpdater: def __init__(self, terminate : threading.Event) -> None: self._context_client = ContextClient() diff --git a/src/simap_connector/service/simap_updater/Tools.py b/src/simap_connector/service/simap_updater/Tools.py index fa03050a3..d420f24e9 100644 --- a/src/simap_connector/service/simap_updater/Tools.py +++ b/src/simap_connector/service/simap_updater/Tools.py @@ -125,12 +125,12 @@ def get_link_endpoint(link : Link) -> Tuple[Optional[str], List[Tuple[str, str]] def get_service_endpoint(service : Service) -> Tuple[Optional[str], List[Tuple[str, str]]]: + if len(service.service_endpoint_ids) == 0: + return None, list() + topology_uuids : Set[str] = set() endpoint_uuids : List[Tuple[str, str]] = list() - if len(service.service_endpoint_ids) == 0: - return None, endpoint_uuids - for endpoint_id in service.service_endpoint_ids: topology_uuid = endpoint_id.topology_id.topology_uuid.uuid topology_uuids.add(topology_uuid) @@ -144,15 +144,17 @@ def get_service_endpoint(service : Service) -> Tuple[Optional[str], List[Tuple[s if len(topology_uuids) != 1: MSG = 'Unsupported: no/multiple Topologies({:s}) referenced' raise Exception(MSG.format(str(topology_uuids))) + topology_uuid = list(topology_uuids)[0] if len(topology_uuid) == 0: MSG = 'Unsupported: empty TopologyUUID({:s}) referenced' raise Exception(MSG.format(str(topology_uuid))) # Check Count Endpoints - if len(endpoint_uuids) != 2: + if len(endpoint_uuids) > 2: MSG = 'Unsupported: non-p2p service ServiceUUIDs({:s})' raise Exception(MSG.format(str(endpoint_uuids))) + except Exception as e: MSG = '{:s} in Service({:s})' raise Exception(MSG.format(str(e), grpc_message_to_json_string(service))) from e -- GitLab From 4d2e05f8a1ea7249c4d472a9197d25b844d0bc80 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 7 Sep 2025 09:41:57 +0000 Subject: [PATCH 167/367] ECOC F5GA Telemetry Demo: - Fixed E2E subscription payloads --- .../data/telemetry/subscription-slice1.json | 2 +- .../data/telemetry/subscription-slice2.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/tests/ecoc25-f5ga-telemetry/data/telemetry/subscription-slice1.json b/src/tests/ecoc25-f5ga-telemetry/data/telemetry/subscription-slice1.json index 0a73f0b67..40d167c47 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/telemetry/subscription-slice1.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/telemetry/subscription-slice1.json @@ -1,7 +1,7 @@ { "ietf-subscribed-notifications:input": { "datastore": "operational", - "ietf-yang-push:datastore-xpath-filter": "/ietf-network:networks/network=${simapName}/ietf-network-topology:link=link-1/simap-telemetry", + "ietf-yang-push:datastore-xpath-filter": "/ietf-network:networks/network=e2e-net/ietf-network-topology:link=slice1:ONT1-200==POP2-200/simap-telemetry", "ietf-yang-push:periodic": { "ietf-yang-push:period": 10 } diff --git a/src/tests/ecoc25-f5ga-telemetry/data/telemetry/subscription-slice2.json b/src/tests/ecoc25-f5ga-telemetry/data/telemetry/subscription-slice2.json index 0a73f0b67..2c4478ea7 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/telemetry/subscription-slice2.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/telemetry/subscription-slice2.json @@ -1,7 +1,7 @@ { "ietf-subscribed-notifications:input": { "datastore": "operational", - "ietf-yang-push:datastore-xpath-filter": "/ietf-network:networks/network=${simapName}/ietf-network-topology:link=link-1/simap-telemetry", + "ietf-yang-push:datastore-xpath-filter": "/ietf-network:networks/network=e2e-net/ietf-network-topology:link=slice1:ONT1-200==POP1-200/simap-telemetry", "ietf-yang-push:periodic": { "ietf-yang-push:period": 10 } -- GitLab From f4c93c1fa06fc9c1370e2b6c9260dff76c7fb763 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 7 Sep 2025 11:31:38 +0000 Subject: [PATCH 168/367] Common - Tools - Rest API, and propagation of change along code: - Created REST API Tools module - Moved here a generic client and server --- src/common/tools/rest_api/__init__.py | 14 ++++++++++++++ .../tools/{ => rest_api}/client/RestApiClient.py | 0 src/common/tools/rest_api/client/__init__.py | 14 ++++++++++++++ .../server}/GenericRestServer.py | 0 src/common/tools/rest_api/server/__init__.py | 14 ++++++++++++++ .../service/drivers/ietf_l2vpn/TfsApiClient.py | 2 +- .../service/drivers/ietf_l3vpn/TfsApiClient.py | 2 +- .../service/drivers/ietf_slice/TfsApiClient.py | 2 +- .../service/drivers/optical_tfs/TfsApiClient.py | 2 +- .../drivers/optical_tfs/TfsOpticalClient.py | 2 +- src/device/tests/test_unitary_ietf_actn.py | 2 +- src/ztp_server/service/rest_server/RestServer.py | 2 +- src/ztp_server/tests/PrepareTestScenario.py | 2 +- 13 files changed, 50 insertions(+), 8 deletions(-) create mode 100644 src/common/tools/rest_api/__init__.py rename src/common/tools/{ => rest_api}/client/RestApiClient.py (100%) create mode 100644 src/common/tools/rest_api/client/__init__.py rename src/common/tools/{service => rest_api/server}/GenericRestServer.py (100%) create mode 100644 src/common/tools/rest_api/server/__init__.py diff --git a/src/common/tools/rest_api/__init__.py b/src/common/tools/rest_api/__init__.py new file mode 100644 index 000000000..3ccc21c7d --- /dev/null +++ b/src/common/tools/rest_api/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/common/tools/client/RestApiClient.py b/src/common/tools/rest_api/client/RestApiClient.py similarity index 100% rename from src/common/tools/client/RestApiClient.py rename to src/common/tools/rest_api/client/RestApiClient.py diff --git a/src/common/tools/rest_api/client/__init__.py b/src/common/tools/rest_api/client/__init__.py new file mode 100644 index 000000000..3ccc21c7d --- /dev/null +++ b/src/common/tools/rest_api/client/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/common/tools/service/GenericRestServer.py b/src/common/tools/rest_api/server/GenericRestServer.py similarity index 100% rename from src/common/tools/service/GenericRestServer.py rename to src/common/tools/rest_api/server/GenericRestServer.py diff --git a/src/common/tools/rest_api/server/__init__.py b/src/common/tools/rest_api/server/__init__.py new file mode 100644 index 000000000..3ccc21c7d --- /dev/null +++ b/src/common/tools/rest_api/server/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/device/service/drivers/ietf_l2vpn/TfsApiClient.py b/src/device/service/drivers/ietf_l2vpn/TfsApiClient.py index ed8367e60..ac2106716 100644 --- a/src/device/service/drivers/ietf_l2vpn/TfsApiClient.py +++ b/src/device/service/drivers/ietf_l2vpn/TfsApiClient.py @@ -14,7 +14,7 @@ import logging, requests from typing import Dict, List, Optional -from common.tools.client.RestApiClient import RestApiClient +from common.tools.rest_api.client.RestApiClient import RestApiClient from device.service.driver_api.ImportTopologyEnum import ImportTopologyEnum GET_CONTEXT_IDS_URL = '/tfs-api/context_ids' diff --git a/src/device/service/drivers/ietf_l3vpn/TfsApiClient.py b/src/device/service/drivers/ietf_l3vpn/TfsApiClient.py index c92056285..de695685c 100644 --- a/src/device/service/drivers/ietf_l3vpn/TfsApiClient.py +++ b/src/device/service/drivers/ietf_l3vpn/TfsApiClient.py @@ -14,7 +14,7 @@ import json, logging, requests from typing import Dict, List, Optional -from common.tools.client.RestApiClient import RestApiClient +from common.tools.rest_api.client.RestApiClient import RestApiClient from device.service.driver_api.ImportTopologyEnum import ImportTopologyEnum diff --git a/src/device/service/drivers/ietf_slice/TfsApiClient.py b/src/device/service/drivers/ietf_slice/TfsApiClient.py index 01ea1a666..08c9b78e0 100644 --- a/src/device/service/drivers/ietf_slice/TfsApiClient.py +++ b/src/device/service/drivers/ietf_slice/TfsApiClient.py @@ -14,7 +14,7 @@ import json, logging, requests from typing import Dict, List, Optional -from common.tools.client.RestApiClient import RestApiClient +from common.tools.rest_api.client.RestApiClient import RestApiClient from device.service.driver_api.ImportTopologyEnum import ImportTopologyEnum diff --git a/src/device/service/drivers/optical_tfs/TfsApiClient.py b/src/device/service/drivers/optical_tfs/TfsApiClient.py index f60edd4fc..79802fcf6 100644 --- a/src/device/service/drivers/optical_tfs/TfsApiClient.py +++ b/src/device/service/drivers/optical_tfs/TfsApiClient.py @@ -16,7 +16,7 @@ import logging from typing import Dict, List, Optional, Tuple from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME from common.proto.context_pb2 import ServiceStatusEnum, ServiceTypeEnum -from common.tools.client.RestApiClient import RestApiClient +from common.tools.rest_api.client.RestApiClient import RestApiClient from common.tools.object_factory.Constraint import json_constraint_custom from common.tools.object_factory.Context import json_context_id from common.tools.object_factory.Device import json_device_id diff --git a/src/device/service/drivers/optical_tfs/TfsOpticalClient.py b/src/device/service/drivers/optical_tfs/TfsOpticalClient.py index 648e7e596..4a4f26343 100644 --- a/src/device/service/drivers/optical_tfs/TfsOpticalClient.py +++ b/src/device/service/drivers/optical_tfs/TfsOpticalClient.py @@ -15,7 +15,7 @@ import logging, requests from typing import Dict, List, Optional, Union -from common.tools.client.RestApiClient import RestApiClient +from common.tools.rest_api.client.RestApiClient import RestApiClient LOGGER = logging.getLogger(__name__) diff --git a/src/device/tests/test_unitary_ietf_actn.py b/src/device/tests/test_unitary_ietf_actn.py index b5c4a5966..f9e6748fe 100644 --- a/src/device/tests/test_unitary_ietf_actn.py +++ b/src/device/tests/test_unitary_ietf_actn.py @@ -21,7 +21,7 @@ from common.tools.grpc.Tools import grpc_message_to_json_string from common.tools.object_factory.Device import ( json_device_connect_rules, json_device_id, json_device_ietf_actn_disabled ) -from common.tools.service.GenericRestServer import GenericRestServer +from common.tools.rest_api.server.GenericRestServer import GenericRestServer from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient from device.service.DeviceService import DeviceService diff --git a/src/ztp_server/service/rest_server/RestServer.py b/src/ztp_server/service/rest_server/RestServer.py index e1d50c9eb..7b028e39d 100755 --- a/src/ztp_server/service/rest_server/RestServer.py +++ b/src/ztp_server/service/rest_server/RestServer.py @@ -14,7 +14,7 @@ from common.Constants import ServiceNameEnum from common.Settings import get_service_baseurl_http, get_service_port_http -from common.tools.service.GenericRestServer import GenericRestServer +from common.tools.rest_api.server.GenericRestServer import GenericRestServer class RestServer(GenericRestServer): def __init__(self, cls_name: str = __name__) -> None: diff --git a/src/ztp_server/tests/PrepareTestScenario.py b/src/ztp_server/tests/PrepareTestScenario.py index cda9702f7..a722d743a 100644 --- a/src/ztp_server/tests/PrepareTestScenario.py +++ b/src/ztp_server/tests/PrepareTestScenario.py @@ -21,7 +21,7 @@ from common.Settings import ( ENVVAR_SUFIX_SERVICE_PORT_HTTP, get_env_var_name ) -from common.tools.service.GenericRestServer import GenericRestServer +from common.tools.rest_api.server.GenericRestServer import GenericRestServer from ztp_server.service.rest_server.ztpServer_plugins.ztp_provisioning_api import register_ztp_provisioning from ztp_server.client.ZtpClient import ZtpClient -- GitLab From 179b2b4b75b989ad18fecb10001931e97ad1e529 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 7 Sep 2025 11:33:11 +0000 Subject: [PATCH 169/367] Common - Tools - Rest Conf, and propagation of change along code: - Created REST Conf Tools module - Moved here a generic client and server - Propagated changes along code --- .../tools/rest_conf}/__init__.py | 0 src/common/tools/rest_conf/client/README.md | 5 + .../{ => rest_conf}/client/RestConfClient.py | 9 +- src/common/tools/rest_conf/client/__init__.py | 14 ++ src/common/tools/rest_conf/server/Dockerfile | 66 +++++ src/common/tools/rest_conf/server/README.md | 31 +++ src/common/tools/rest_conf/server/__init__.py | 14 ++ src/common/tools/rest_conf/server/build.sh | 23 ++ src/common/tools/rest_conf/server/deploy.sh | 32 +++ src/common/tools/rest_conf/server/destroy.sh | 25 ++ .../tools/rest_conf/server/requirements.in | 25 ++ .../server/restconf_server}/Dispatch.py | 0 .../server/restconf_server}/HostMeta.py | 0 .../restconf_server}/HttpStatusCodesEnum.py | 0 .../server/restconf_server/YangHandler.py | 226 ++++++++++++++++++ .../restconf_server/YangModelDiscoverer.py | 195 +++++++++++++++ .../server/restconf_server/__init__.py | 14 ++ .../server/restconf_server}/__main__.py | 0 .../rest_conf/server/restconf_server}/app.py | 47 ++-- .../rest_conf/server/run_server_gunicorn.sh | 20 ++ .../rest_conf/server/run_server_standalone.sh | 19 ++ .../tools/rest_conf/server/startup.json | 1 + .../drivers/ietf_actn/IetfActnDriver.py | 2 +- .../ietf_actn/handlers/EthtServiceHandler.py | 2 +- .../handlers/NetworkTopologyHandler.py | 2 +- .../ietf_actn/handlers/OsuTunnelHandler.py | 2 +- src/device/service/drivers/nce/NCEDriver.py | 2 +- .../drivers/nce/handlers/AppFlowHandler.py | 2 +- .../nce/handlers/NetworkTopologyHandler.py | 2 +- .../nce/handlers/SubscriptionHandler.py | 2 +- .../simap_client/RestConfClient.py | 191 --------------- .../simap_server/simap_server/YangHandler.py | 142 ----------- 32 files changed, 758 insertions(+), 357 deletions(-) rename src/{tests/tools/simap_server/simap_server => common/tools/rest_conf}/__init__.py (100%) create mode 100644 src/common/tools/rest_conf/client/README.md rename src/common/tools/{ => rest_conf}/client/RestConfClient.py (98%) create mode 100644 src/common/tools/rest_conf/client/__init__.py create mode 100644 src/common/tools/rest_conf/server/Dockerfile create mode 100644 src/common/tools/rest_conf/server/README.md create mode 100644 src/common/tools/rest_conf/server/__init__.py create mode 100755 src/common/tools/rest_conf/server/build.sh create mode 100755 src/common/tools/rest_conf/server/deploy.sh create mode 100755 src/common/tools/rest_conf/server/destroy.sh create mode 100644 src/common/tools/rest_conf/server/requirements.in rename src/{tests/tools/simap_server/simap_server => common/tools/rest_conf/server/restconf_server}/Dispatch.py (100%) rename src/{tests/tools/simap_server/simap_server => common/tools/rest_conf/server/restconf_server}/HostMeta.py (100%) rename src/{tests/tools/simap_server/simap_server => common/tools/rest_conf/server/restconf_server}/HttpStatusCodesEnum.py (100%) create mode 100644 src/common/tools/rest_conf/server/restconf_server/YangHandler.py create mode 100644 src/common/tools/rest_conf/server/restconf_server/YangModelDiscoverer.py create mode 100644 src/common/tools/rest_conf/server/restconf_server/__init__.py rename src/{tests/tools/simap_server/simap_server => common/tools/rest_conf/server/restconf_server}/__main__.py (100%) rename src/{tests/tools/simap_server/simap_server => common/tools/rest_conf/server/restconf_server}/app.py (55%) create mode 100755 src/common/tools/rest_conf/server/run_server_gunicorn.sh create mode 100755 src/common/tools/rest_conf/server/run_server_standalone.sh create mode 100644 src/common/tools/rest_conf/server/startup.json delete mode 100644 src/tests/tools/simap_server/simap_client/RestConfClient.py delete mode 100644 src/tests/tools/simap_server/simap_server/YangHandler.py diff --git a/src/tests/tools/simap_server/simap_server/__init__.py b/src/common/tools/rest_conf/__init__.py similarity index 100% rename from src/tests/tools/simap_server/simap_server/__init__.py rename to src/common/tools/rest_conf/__init__.py diff --git a/src/common/tools/rest_conf/client/README.md b/src/common/tools/rest_conf/client/README.md new file mode 100644 index 000000000..9605fc751 --- /dev/null +++ b/src/common/tools/rest_conf/client/README.md @@ -0,0 +1,5 @@ +# Generic RESTCONF Client + +This server implements a basic RESTCONF Client that can be potentially used for any case. + +See a simple working example in folder `src/tests/tools/simap_server` diff --git a/src/common/tools/client/RestConfClient.py b/src/common/tools/rest_conf/client/RestConfClient.py similarity index 98% rename from src/common/tools/client/RestConfClient.py rename to src/common/tools/rest_conf/client/RestConfClient.py index 35442bdcf..c2ce856f6 100644 --- a/src/common/tools/client/RestConfClient.py +++ b/src/common/tools/rest_conf/client/RestConfClient.py @@ -14,7 +14,7 @@ import logging, requests from typing import Any, Dict, Optional, Set -from .RestApiClient import RestApiClient +from common.tools.rest_api.client.RestApiClient import RestApiClient HOST_META_URL = '{:s}://{:s}:{:d}/.well-known/host-meta' @@ -35,6 +35,7 @@ class RestConfClient(RestApiClient): self._discover_base_url() + def _discover_base_url(self) -> None: host_meta_url = HOST_META_URL.format(self._scheme, self._address, self._port) host_meta : Dict = super().get(host_meta_url, expected_status_codes={requests.codes['OK']}) @@ -58,6 +59,7 @@ class RestConfClient(RestApiClient): self._base_url = str(href).replace('//', '/') + def get( self, endpoint : str, expected_status_codes : Set[int] = {requests.codes['OK']} @@ -67,6 +69,7 @@ class RestConfClient(RestApiClient): expected_status_codes=expected_status_codes ) + def post( self, endpoint : str, body : Optional[Any] = None, expected_status_codes : Set[int] = {requests.codes['CREATED']} @@ -76,6 +79,7 @@ class RestConfClient(RestApiClient): expected_status_codes=expected_status_codes ) + def put( self, endpoint : str, body : Optional[Any] = None, expected_status_codes : Set[int] = {requests.codes['CREATED'], requests.codes['NO_CONTENT']} @@ -85,6 +89,7 @@ class RestConfClient(RestApiClient): expected_status_codes=expected_status_codes ) + def patch( self, endpoint : str, body : Optional[Any] = None, expected_status_codes : Set[int] = {requests.codes['NO_CONTENT']} @@ -94,6 +99,7 @@ class RestConfClient(RestApiClient): expected_status_codes=expected_status_codes ) + def delete( self, endpoint : str, body : Optional[Any] = None, expected_status_codes : Set[int] = {requests.codes['NO_CONTENT']} @@ -103,6 +109,7 @@ class RestConfClient(RestApiClient): expected_status_codes=expected_status_codes ) + def rpc( self, endpoint : str, body : Optional[Any] = None, expected_status_codes : Set[int] = {requests.codes['CREATED']} diff --git a/src/common/tools/rest_conf/client/__init__.py b/src/common/tools/rest_conf/client/__init__.py new file mode 100644 index 000000000..3ccc21c7d --- /dev/null +++ b/src/common/tools/rest_conf/client/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/common/tools/rest_conf/server/Dockerfile b/src/common/tools/rest_conf/server/Dockerfile new file mode 100644 index 000000000..1dbe96c0a --- /dev/null +++ b/src/common/tools/rest_conf/server/Dockerfile @@ -0,0 +1,66 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM python:3.9-slim + +# Install dependencies +RUN apt-get --yes --quiet --quiet update && \ + apt-get --yes --quiet --quiet install git build-essential cmake libpcre2-dev python3-dev python3-cffi && \ + rm -rf /var/lib/apt/lists/* + +# Download, build and install libyang. Note that APT package is outdated +# - Ref: https://github.com/CESNET/libyang +# - Ref: https://github.com/CESNET/libyang-python/ +RUN mkdir -p /var/libyang +RUN git clone https://github.com/CESNET/libyang.git /var/libyang +WORKDIR /var/libyang +RUN git fetch +RUN git checkout v2.1.148 +RUN mkdir -p /var/libyang/build +WORKDIR /var/libyang/build +RUN cmake -D CMAKE_BUILD_TYPE:String="Release" .. +RUN make +RUN make install +RUN ldconfig + +# Set Python to show logs as they occur +ENV PYTHONUNBUFFERED=0 + +# Get generic Python packages +RUN python3 -m pip install --upgrade pip +RUN python3 -m pip install --upgrade setuptools wheel +RUN python3 -m pip install --upgrade pip-tools + +# Create component sub-folders, get specific Python packages +RUN mkdir -p /var/teraflow/restconf_server/ +WORKDIR /var/teraflow/restconf_server/ +COPY ./requirements.in ./requirements.in +RUN pip-compile --quiet --output-file=requirements.txt requirements.in +RUN python3 -m pip install -r requirements.txt + +# Add component files into working directory +COPY ./yang/*.yang ./yang/ +COPY ./restconf_server/*.py ./restconf_server/ +COPY ./startup.json ./startup.json + +# Configure RESTCONF Server +ENV RESTCONF_PREFIX="/restconf" +ENV YANG_SEARCH_PATH="./yang" +ENV STARTUP_FILE="./startup.json" + +# Configure Flask for production +ENV FLASK_ENV="production" + +# Start the service +ENTRYPOINT ["gunicorn", "--workers", "1", "--worker-class", "eventlet", "--bind", "0.0.0.0:8080", "restconf_server.app:app"] diff --git a/src/common/tools/rest_conf/server/README.md b/src/common/tools/rest_conf/server/README.md new file mode 100644 index 000000000..542d83617 --- /dev/null +++ b/src/common/tools/rest_conf/server/README.md @@ -0,0 +1,31 @@ +# Generic Mock RESTCONF Server + +This server implements a basic RESTCONF Server that can load, potentially, any YANG data model. +Just copy this file structure, drop in fodler `./yang` your YANG data models. +Hierarchical folder structures are also supported. +YangModelDiscoverer will parse the models, identify imports and dependencies, and sort the models before loading. + +The server can be configured using the following environment variables: +- `RESTCONF_PREFIX`, defaults to `"/restconf"` +- `YANG_SEARCH_PATH`, defaults to `"./yang"` +- `STARTUP_FILE`, defaults to `"./startup.json"` +- `SECRET_KEY`, defaults to `secrets.token_hex(64)` + + +See a simple working example in folder `src/tests/tools/simap_server` + + +## Build the RESTCONF Server Docker image +```bash +./build.sh +``` + +## Deploy the RESTCONF Server +```bash +./deploy.sh +``` + +## Destroy the RESTCONF Server +```bash +./destroy.sh +``` diff --git a/src/common/tools/rest_conf/server/__init__.py b/src/common/tools/rest_conf/server/__init__.py new file mode 100644 index 000000000..3ccc21c7d --- /dev/null +++ b/src/common/tools/rest_conf/server/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/common/tools/rest_conf/server/build.sh b/src/common/tools/rest_conf/server/build.sh new file mode 100755 index 000000000..e0645d65d --- /dev/null +++ b/src/common/tools/rest_conf/server/build.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Make folder containing the script the root folder for its execution +cd $(dirname $0) + + +docker buildx build -t mock-restconf-server:test -f Dockerfile . +#docker tag mock-restconf-server:test localhost:32000/tfs/mock-restconf-server:test +#docker push localhost:32000/tfs/mock-restconf-server:test diff --git a/src/common/tools/rest_conf/server/deploy.sh b/src/common/tools/rest_conf/server/deploy.sh new file mode 100755 index 000000000..03a33895d --- /dev/null +++ b/src/common/tools/rest_conf/server/deploy.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Cleanup +docker rm --force mock-restconf-server + + +# Create Mock RESTCONF Server +docker run --detach --name mock-restconf-server --publish 8080:8080 mock-restconf-server:test + + +sleep 2 + + +# Dump Mock RESTCONF Server Docker container +docker ps -a + + +echo "Bye!" diff --git a/src/common/tools/rest_conf/server/destroy.sh b/src/common/tools/rest_conf/server/destroy.sh new file mode 100755 index 000000000..ecc2af686 --- /dev/null +++ b/src/common/tools/rest_conf/server/destroy.sh @@ -0,0 +1,25 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Cleanup +docker rm --force mock-restconf-server + + +# Dump Docker containers +docker ps -a + + +echo "Bye!" diff --git a/src/common/tools/rest_conf/server/requirements.in b/src/common/tools/rest_conf/server/requirements.in new file mode 100644 index 000000000..17155ed58 --- /dev/null +++ b/src/common/tools/rest_conf/server/requirements.in @@ -0,0 +1,25 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cryptography==39.0.1 +eventlet==0.39.0 +Flask-HTTPAuth==4.5.0 +Flask-RESTful==0.3.9 +Flask==2.1.3 +gunicorn==23.0.0 +jsonschema==4.4.0 +libyang==2.8.4 +pyopenssl==23.0.0 +requests==2.27.1 +werkzeug==2.3.7 diff --git a/src/tests/tools/simap_server/simap_server/Dispatch.py b/src/common/tools/rest_conf/server/restconf_server/Dispatch.py similarity index 100% rename from src/tests/tools/simap_server/simap_server/Dispatch.py rename to src/common/tools/rest_conf/server/restconf_server/Dispatch.py diff --git a/src/tests/tools/simap_server/simap_server/HostMeta.py b/src/common/tools/rest_conf/server/restconf_server/HostMeta.py similarity index 100% rename from src/tests/tools/simap_server/simap_server/HostMeta.py rename to src/common/tools/rest_conf/server/restconf_server/HostMeta.py diff --git a/src/tests/tools/simap_server/simap_server/HttpStatusCodesEnum.py b/src/common/tools/rest_conf/server/restconf_server/HttpStatusCodesEnum.py similarity index 100% rename from src/tests/tools/simap_server/simap_server/HttpStatusCodesEnum.py rename to src/common/tools/rest_conf/server/restconf_server/HttpStatusCodesEnum.py diff --git a/src/common/tools/rest_conf/server/restconf_server/YangHandler.py b/src/common/tools/rest_conf/server/restconf_server/YangHandler.py new file mode 100644 index 000000000..9df57528f --- /dev/null +++ b/src/common/tools/rest_conf/server/restconf_server/YangHandler.py @@ -0,0 +1,226 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import json, libyang, logging +import urllib.parse +from typing import Dict, List, Optional, Set + + +LOGGER = logging.getLogger(__name__) + + +def walk_schema(node : libyang.SNode, path : str = '') -> Set[str]: + current_path = f'{path}/{node.name()}' + schema_paths : Set[str] = {current_path} + for child in node.children(): + if isinstance(child, (libyang.SLeaf, libyang.SLeafList)): continue + schema_paths.update(walk_schema(child, current_path)) + return schema_paths + +def extract_schema_paths(yang_module : libyang.Module) -> Set[str]: + schema_paths : Set[str] = set() + for node in yang_module.children(): + schema_paths.update(walk_schema(node)) + return schema_paths + +class YangHandler: + def __init__( + self, yang_search_path : str, yang_module_names : List[str], + yang_startup_data : Dict + ) -> None: + self._yang_context = libyang.Context(yang_search_path) + self._loaded_modules : Set[str] = set() + self._schema_paths : Set[str] = set() + for yang_module_name in yang_module_names: + LOGGER.info('Loading module: {:s}'.format(str(yang_module_name))) + yang_module = self._yang_context.load_module(yang_module_name) + yang_module.feature_enable_all() + self._loaded_modules.add(yang_module_name) + self._schema_paths.update(extract_schema_paths(yang_module)) + + self._datastore = self._yang_context.parse_data_mem( + json.dumps(yang_startup_data), fmt='json' + ) + + def destroy(self) -> None: + self._yang_context.destroy() + + def get_schema_paths(self) -> Set[str]: + return self._schema_paths + + def get(self, path : str) -> Optional[str]: + path = self._normalize_path(path) + data = self._datastore.find_path(path) + if data is None: return None + json_data = data.print_mem( + fmt='json', with_siblings=False, pretty=True, + keep_empty_containers=False, include_implicit_defaults=True + ) + return json_data + + def get_xpath(self, xpath : str) -> List[str]: + if not xpath.startswith('/'): xpath = '/' + xpath + items = self._datastore.find_all(xpath) + result = list() + for item in items: + result.append(item.print_mem( + fmt='json', with_siblings=False, pretty=True, + keep_empty_containers=False, include_implicit_defaults=True + )) + return result + + def create(self, path : str, payload : Dict) -> str: + path = self._normalize_path(path) + # TODO: client should not provide identifier of element to be created, add it to subpath + dnode_parsed : Optional[libyang.DNode] = self._yang_context.parse_data_mem( + json.dumps(payload), 'json', strict=True, parse_only=False, + validate_present=True, validate_multi_error=True + ) + if dnode_parsed is None: raise Exception('Unable to parse Data({:s})'.format(str(payload))) + + dnode : Optional[libyang.DNode] = self._yang_context.create_data_path( + path, parent=self._datastore, value=dnode_parsed, update=False + ) + self._datastore.merge(dnode_parsed, with_siblings=True, defaults=True) + + json_data = dnode.print_mem( + fmt='json', with_siblings=True, pretty=True, + keep_empty_containers=True, include_implicit_defaults=True + ) + return json_data + + def update(self, path : str, payload : Dict) -> str: + path = self._normalize_path(path) + # NOTE: client should provide identifier of element to be updated + dnode_parsed : Optional[libyang.DNode] = self._yang_context.parse_data_mem( + json.dumps(payload), 'json', strict=True, parse_only=False, + validate_present=True, validate_multi_error=True + ) + if dnode_parsed is None: raise Exception('Unable to parse Data({:s})'.format(str(payload))) + + dnode = self._yang_context.create_data_path( + path, parent=self._datastore, value=dnode_parsed, update=True + ) + self._datastore.merge(dnode_parsed, with_siblings=True, defaults=True) + + json_data = dnode.print_mem( + fmt='json', with_siblings=True, pretty=True, + keep_empty_containers=True, include_implicit_defaults=True + ) + return json_data + + def delete(self, path : str) -> Optional[str]: + path = self._normalize_path(path) + + # NOTE: client should provide identifier of element to be deleted + + node : libyang.DNode = self._datastore.find_path(path) + if node is None: return None + + LOGGER.info('node = {:s}'.format(str(node))) + json_data = str(node.print_mem( + fmt='json', with_siblings=True, pretty=True, + keep_empty_containers=True, include_implicit_defaults=True + )) + LOGGER.info('json_data = {:s}'.format(json_data)) + + node.unlink() + node.free() + + return json_data + + def _normalize_path(self, path : str) -> str: + """ + Normalize RESTCONF path segments using the standard `list=` + syntax into the libyang bracketed predicate form expected by + the datastore (e.g. `network="admin"` -> `network[network-id="admin"]`). + + This implementation looks up the schema node for the list and + uses its key leaf names to build the proper predicates. If the + schema information is unavailable, it falls back to using the + list name as the key name. + """ + + # URL-decode each path segment so escaped characters like `%22` + # (double quotes) are properly handled when parsing list keys. + parts = [urllib.parse.unquote(p) for p in path.strip('/').split('/') if p != ''] + schema_path = '' + out_parts: List[str] = [] + + for part in parts: + if '=' in part: + # split into name and value (value may contain commas/quotes) + name, val = part.split('=', 1) + # keep original name (may include prefix) for output, but + # use local name (without module prefix) to lookup schema + local_name = name.split(':', 1)[1] if ':' in name else name + schema_path = schema_path + '/' + local_name if schema_path else '/' + local_name + schema_nodes = list(self._yang_context.find_path(schema_path)) + if len(schema_nodes) != 1: + MSG = 'No/Multiple SchemaNodes({:s}) for SchemaPath({:s})' + raise Exception(MSG.format( + str([repr(sn) for sn in schema_nodes]), schema_path + )) + schema_node = schema_nodes[0] + + # parse values splitting on commas outside quotes + values = [] + cur = '' + in_quotes = False + for ch in val: + if ch == '"': + in_quotes = not in_quotes + cur += ch + elif ch == ',' and not in_quotes: + values.append(cur) + cur = '' + else: + cur += ch + if cur != '': + values.append(cur) + + # determine key names from schema_node if possible + key_names = None + if isinstance(schema_node, libyang.SList): + key_names = [k.name() for k in schema_node.keys()] + #if isinstance(keys, (list, tuple)): + # key_names = keys + #elif isinstance(keys, str): + # key_names = [kn for kn in k.split() if kn] + #else: + # MSG = 'Unsupported keys format: {:s} / {:s}' + # raise Exception(MSG.format(str(type(keys)), str(keys))) + #elif hasattr(schema_node, 'key'): + # k = schema_node.key() + # if isinstance(k, str): + # key_names = [kn for kn in k.split() if kn] + + if not key_names: + # fallback: use the local list name as the single key + key_names = [local_name] + + # build predicate(s) + preds = [] + for idx, kn in enumerate(key_names): + kv = values[idx] if idx < len(values) else values[0] + preds.append(f'[{kn}="{kv}"]') + + out_parts.append(name + ''.join(preds)) + else: + local_part = part.split(':', 1)[1] if ':' in part else part + schema_path = schema_path + '/' + local_part if schema_path else '/' + local_part + out_parts.append(part) + + return '/' + '/'.join(out_parts) diff --git a/src/common/tools/rest_conf/server/restconf_server/YangModelDiscoverer.py b/src/common/tools/rest_conf/server/restconf_server/YangModelDiscoverer.py new file mode 100644 index 000000000..f31305280 --- /dev/null +++ b/src/common/tools/rest_conf/server/restconf_server/YangModelDiscoverer.py @@ -0,0 +1,195 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging, re +from collections import defaultdict +from graphlib import TopologicalSorter, CycleError +from pathlib import Path +from typing import Dict, List, Optional, Set, Tuple + + +COMMENT_SINGLE_RE = re.compile(r"//.*?$", re.MULTILINE) +COMMENT_MULTI_RE = re.compile(r"/\*.*?\*/", re.DOTALL) + +# module / submodule name +MODNAME_RE = re.compile(r"\b(module|submodule)\s+([A-Za-z0-9_.-]+)\s*\{") + +# import foo { ... } (most common form) +IMPORT_BLOCK_RE = re.compile(r"\bimport\s+([A-Za-z0-9_.-]+)\s*\{", re.IGNORECASE) + +# import foo; (very rare, but we’ll support it) +IMPORT_SEMI_RE = re.compile(r"\bimport\s+([A-Za-z0-9_.-]+)\s*;", re.IGNORECASE) + + +def _parse_yang_file(path: Path) -> Tuple[Optional[str], Set[str]]: + path_stem = path.stem # file name without extension + expected_module_name = path_stem.split('@', 1)[0] + + try: + data = path.read_text(encoding='utf-8', errors='ignore') + except Exception: + data = path.read_bytes().decode('utf-8', errors='ignore') + + data = COMMENT_MULTI_RE.sub('', data) + data = COMMENT_SINGLE_RE.sub('', data) + + match = MODNAME_RE.search(data) + if match is None: + return None, set() + module_name = match.group(2) + if module_name != expected_module_name: + MSG = 'Module({:s}) mismatches its FileName({:s})' + raise Exception(MSG.format(str(module_name), str(expected_module_name))) + + module_imports = set() + if module_name is not None: + module_imports.update(IMPORT_BLOCK_RE.findall(data)) + module_imports.update(IMPORT_SEMI_RE.findall(data)) + + # ignore modules importing themselves, just in case + module_imports.discard(module_name) + + return module_name, module_imports + + +class YangModuleDiscoverer: + def __init__(self, yang_search_path : str) -> None: + self._yang_search_path = yang_search_path + + self._module_to_paths : Dict[str, List[Path]] = defaultdict(list) + self._module_to_imports : Dict[str, Set[str]] = defaultdict(set) + self._ordered_module_names : Optional[List[str]] = None + + + def run( + self, do_print_order : bool = False, do_log_order : bool = False, + logger : Optional[logging.Logger] = None, level : int = logging.INFO + ) -> List[str]: + if self._ordered_module_names is None: + self._scan_modules() + self._sort_modules() + + if do_print_order: + self.print_order() + + if do_log_order: + if logger is None: logger = logging.getLogger(__name__) + self.log_order(logger, level=level) + + return self._ordered_module_names + + def _scan_modules(self) -> None: + yang_root = Path(self._yang_search_path).resolve() + if not yang_root.exists(): + MSG = 'Path({:s}) not found' + raise Exception(MSG.format(str(self._yang_search_path))) + + for yang_path in yang_root.rglob('*.yang'): + module_name, module_imports = _parse_yang_file(yang_path) + if module_name is None: continue + self._module_to_paths[module_name].append(yang_path) + self._module_to_imports[module_name] = module_imports + + if len(self._module_to_paths) == 0: + MSG = 'No modules found in Path({:s})' + raise Exception(MSG.format(str(self._yang_search_path))) + + self._check_duplicated_module_declaration() + self._check_missing_modules() + + + def _check_duplicated_module_declaration(self) -> None: + duplicate_module_declarations : List[str] = list() + for module_name, paths in self._module_to_paths.items(): + if len(paths) == 1: continue + str_paths = [str(p) for p in paths] + duplicate_module_declarations.append( + ' {:s} => {:s}'.format(module_name, str_paths) + ) + + if len(duplicate_module_declarations) > 0: + MSG = 'Duplicate module declarations:\n{:s}' + str_dup_mods = '\n'.join(duplicate_module_declarations) + raise Exception(MSG.format(str_dup_mods)) + + + def _check_missing_modules(self) -> None: + local_module_names = set(self._module_to_imports.keys()) + missing_modules : List[str] = list() + for module_name, imported_modules in self._module_to_imports.items(): + missing = imported_modules.difference(local_module_names) + if len(missing) == 0: continue + missing_modules.append( + ' {:s} => {:s}'.format(module_name, str(missing)) + ) + + if len(missing_modules) > 0: + MSG = 'Missing modules:\n{:s}' + str_mis_mods = '\n'.join(missing_modules) + raise Exception(MSG.format(str_mis_mods)) + + + def _sort_modules(self) -> None: + ts = TopologicalSorter() + for module_name, imported_modules in self._module_to_imports.items(): + ts.add(module_name, *imported_modules) + + try: + self._ordered_module_names = list(ts.static_order()) # raises CycleError on cycles + except CycleError as e: + cycle = list(dict.fromkeys(e.args[1])) # de-dup while preserving order + MSG = 'Circular dependencies between modules: {:s}' + raise Exception(MSG.format(str(cycle))) # pylint: disable=raise-missing-from + + + def dump_order(self) -> List[Tuple[int, str, List[str]]]: + if self._ordered_module_names is None: + raise Exception('First process the YANG Modules running method .run()') + + module_order : List[Tuple[int, str, List[str]]] = list() + for i, module_name in enumerate(self._ordered_module_names, 1): + module_imports = sorted(self._module_to_imports[module_name]) + module_order.append((i, module_name, module_imports)) + + return module_order + + + def print_order(self) -> None: + print('Ordered Modules:') + for i, module_name, module_imports in self.dump_order(): + MSG = '{:2d} : {:s} => {:s}' + print(MSG.format(i, module_name, str(module_imports))) + + + def log_order(self, logger : logging.Logger, level : int = logging.INFO) -> None: + logger.log(level, 'Ordered Modules:') + for i, module_name, module_imports in self.dump_order(): + MSG = '{:2d} : {:s} => {:s}' + logger.log(level, MSG.format(i, module_name, str(module_imports))) + + +def main() -> None: + logging.basicConfig(level=logging.INFO) + + ymd = YangModuleDiscoverer('./yang') + ordered_module_names = ymd.run( + do_print_order=True, + do_log_order=True + ) + print('ordered_module_names', ordered_module_names) + + +if __name__ == '__main__': + main() diff --git a/src/common/tools/rest_conf/server/restconf_server/__init__.py b/src/common/tools/rest_conf/server/restconf_server/__init__.py new file mode 100644 index 000000000..3ccc21c7d --- /dev/null +++ b/src/common/tools/rest_conf/server/restconf_server/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/tests/tools/simap_server/simap_server/__main__.py b/src/common/tools/rest_conf/server/restconf_server/__main__.py similarity index 100% rename from src/tests/tools/simap_server/simap_server/__main__.py rename to src/common/tools/rest_conf/server/restconf_server/__main__.py diff --git a/src/tests/tools/simap_server/simap_server/app.py b/src/common/tools/rest_conf/server/restconf_server/app.py similarity index 55% rename from src/tests/tools/simap_server/simap_server/app.py rename to src/common/tools/rest_conf/server/restconf_server/app.py index 3e9f815e5..9ecca3110 100644 --- a/src/tests/tools/simap_server/simap_server/app.py +++ b/src/common/tools/rest_conf/server/restconf_server/app.py @@ -13,34 +13,49 @@ # limitations under the License. -import json, logging, secrets -from flask import Flask +import json, logging, os, secrets, time +from flask import Flask, request from flask_restful import Api from .Dispatch import RestConfDispatch from .HostMeta import HostMeta from .YangHandler import YangHandler +from .YangModelDiscoverer import YangModuleDiscoverer + + +RESTCONF_PREFIX = os.environ.get('RESTCONF_PREFIX', '/restconf' ) +YANG_SEARCH_PATH = os.environ.get('YANG_SEARCH_PATH', './yang' ) +STARTUP_FILE = os.environ.get('STARTUP_FILE', './startup.json') +SECRET_KEY = os.environ.get('SECRET_KEY', secrets.token_hex(64)) logging.basicConfig( level=logging.INFO, - format="[Worker-%(process)d][%(asctime)s] %(levelname)s:%(name)s:%(message)s", + format='[Worker-%(process)d][%(asctime)s] %(levelname)s:%(name)s:%(message)s', ) LOGGER = logging.getLogger(__name__) -RESTCONF_PREFIX = '/restconf' -SECRET_KEY = secrets.token_hex(64) +def log_request(response): + timestamp = time.strftime('[%Y-%b-%d %H:%M]') + LOGGER.info( + '%s %s %s %s %s', timestamp, request.remote_addr, request.method, + request.full_path, response.status + ) + return response + + +ymd = YangModuleDiscoverer(YANG_SEARCH_PATH) +YANG_MODULE_NAMES = ymd.run(do_log_order=True) + +with open(STARTUP_FILE, mode='r', encoding='UTF-8') as fp: + YANG_STARTUP_DATA = json.loads(fp.read()) + + +ymd = YangModuleDiscoverer(YANG_SEARCH_PATH) +YANG_MODULE_NAMES = ymd.run(do_log_order=True) -YANG_SEARCH_PATH = './yang' -YANG_MODULE_NAMES = [ - 'ietf-inet-types', - 'simap-telemetry', - 'ietf-network-topology', - 'ietf-network', -] -STARTUP_FILE = './startup.json' with open(STARTUP_FILE, mode='r', encoding='UTF-8') as fp: YANG_STARTUP_DATA = json.loads(fp.read()) @@ -48,10 +63,11 @@ with open(STARTUP_FILE, mode='r', encoding='UTF-8') as fp: yang_handler = YangHandler( YANG_SEARCH_PATH, YANG_MODULE_NAMES, YANG_STARTUP_DATA ) -restconf_paths = yang_handler.get_module_paths() +restconf_paths = yang_handler.get_schema_paths() app = Flask(__name__) app.config['SECRET_KEY'] = SECRET_KEY +app.after_request(log_request) api = Api(app) api.add_resource( @@ -64,9 +80,10 @@ api.add_resource( RESTCONF_PREFIX + '/data', RESTCONF_PREFIX + '/data/', RESTCONF_PREFIX + '/data/', + RESTCONF_PREFIX + '/data//', resource_class_args=(yang_handler,) ) LOGGER.info('Available RESTCONF paths:') -for restconf_path in restconf_paths: +for restconf_path in sorted(restconf_paths): LOGGER.info('- {:s}'.format(str(restconf_path))) diff --git a/src/common/tools/rest_conf/server/run_server_gunicorn.sh b/src/common/tools/rest_conf/server/run_server_gunicorn.sh new file mode 100755 index 000000000..af7a1c8e0 --- /dev/null +++ b/src/common/tools/rest_conf/server/run_server_gunicorn.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Make folder containing the script the root folder for its execution +cd $(dirname $0) + +export FLASK_ENV=development +gunicorn -w 1 --worker-class eventlet -b 0.0.0.0:8080 --log-level DEBUG restconf_server.app:app diff --git a/src/common/tools/rest_conf/server/run_server_standalone.sh b/src/common/tools/rest_conf/server/run_server_standalone.sh new file mode 100755 index 000000000..4ce7966dd --- /dev/null +++ b/src/common/tools/rest_conf/server/run_server_standalone.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Make folder containing the script the root folder for its execution +cd $(dirname $0) + +python -m restconf_server diff --git a/src/common/tools/rest_conf/server/startup.json b/src/common/tools/rest_conf/server/startup.json new file mode 100644 index 000000000..0967ef424 --- /dev/null +++ b/src/common/tools/rest_conf/server/startup.json @@ -0,0 +1 @@ +{} diff --git a/src/device/service/drivers/ietf_actn/IetfActnDriver.py b/src/device/service/drivers/ietf_actn/IetfActnDriver.py index 12064c3e8..11f2a69bf 100644 --- a/src/device/service/drivers/ietf_actn/IetfActnDriver.py +++ b/src/device/service/drivers/ietf_actn/IetfActnDriver.py @@ -15,7 +15,7 @@ import copy, json, logging, requests, threading from typing import Any, Iterator, List, Optional, Tuple, Union from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method -from common.tools.client.RestConfClient import RestConfClient +from common.tools.rest_conf.client.RestConfClient import RestConfClient from common.type_checkers.Checkers import chk_string, chk_type from device.service.driver_api._Driver import _Driver, RESOURCE_ENDPOINTS, RESOURCE_SERVICES from .handlers.EthtServiceHandler import EthtServiceHandler diff --git a/src/device/service/drivers/ietf_actn/handlers/EthtServiceHandler.py b/src/device/service/drivers/ietf_actn/handlers/EthtServiceHandler.py index 7c9e4b89c..00c9b664e 100644 --- a/src/device/service/drivers/ietf_actn/handlers/EthtServiceHandler.py +++ b/src/device/service/drivers/ietf_actn/handlers/EthtServiceHandler.py @@ -14,7 +14,7 @@ import enum, logging from typing import Dict, List, Optional, Tuple, Union -from common.tools.client.RestConfClient import RestConfClient +from common.tools.rest_conf.client.RestConfClient import RestConfClient LOGGER = logging.getLogger(__name__) diff --git a/src/device/service/drivers/ietf_actn/handlers/NetworkTopologyHandler.py b/src/device/service/drivers/ietf_actn/handlers/NetworkTopologyHandler.py index 056a8e39f..ee7efdf7a 100644 --- a/src/device/service/drivers/ietf_actn/handlers/NetworkTopologyHandler.py +++ b/src/device/service/drivers/ietf_actn/handlers/NetworkTopologyHandler.py @@ -20,7 +20,7 @@ from common.proto.context_pb2 import ( DEVICEDRIVER_UNDEFINED, DEVICEOPERATIONALSTATUS_DISABLED, DEVICEOPERATIONALSTATUS_ENABLED ) -from common.tools.client.RestConfClient import RestConfClient +from common.tools.rest_conf.client.RestConfClient import RestConfClient from device.service.driver_api.ImportTopologyEnum import ( ImportTopologyEnum, get_import_topology ) diff --git a/src/device/service/drivers/ietf_actn/handlers/OsuTunnelHandler.py b/src/device/service/drivers/ietf_actn/handlers/OsuTunnelHandler.py index a43bac28c..5abc009b4 100644 --- a/src/device/service/drivers/ietf_actn/handlers/OsuTunnelHandler.py +++ b/src/device/service/drivers/ietf_actn/handlers/OsuTunnelHandler.py @@ -14,7 +14,7 @@ import enum, logging from typing import Dict, List, Optional, Union -from common.tools.client.RestConfClient import RestConfClient +from common.tools.rest_conf.client.RestConfClient import RestConfClient LOGGER = logging.getLogger(__name__) diff --git a/src/device/service/drivers/nce/NCEDriver.py b/src/device/service/drivers/nce/NCEDriver.py index 7eff38c75..9ff0125f5 100644 --- a/src/device/service/drivers/nce/NCEDriver.py +++ b/src/device/service/drivers/nce/NCEDriver.py @@ -15,7 +15,7 @@ import anytree, copy, json, logging, re, requests, threading from typing import Any, Iterator, List, Optional, Tuple, Union from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method -from common.tools.client.RestConfClient import RestConfClient +from common.tools.rest_conf.client.RestConfClient import RestConfClient from common.type_checkers.Checkers import chk_length, chk_string, chk_type from device.service.driver_api._Driver import _Driver, RESOURCE_ENDPOINTS, RESOURCE_SERVICES from device.service.driver_api.AnyTreeTools import ( diff --git a/src/device/service/drivers/nce/handlers/AppFlowHandler.py b/src/device/service/drivers/nce/handlers/AppFlowHandler.py index 1de9bd368..7e8082abe 100644 --- a/src/device/service/drivers/nce/handlers/AppFlowHandler.py +++ b/src/device/service/drivers/nce/handlers/AppFlowHandler.py @@ -14,7 +14,7 @@ import logging, requests from typing import Dict -from common.tools.client.RestConfClient import RestConfClient +from common.tools.rest_conf.client.RestConfClient import RestConfClient LOGGER = logging.getLogger(__name__) diff --git a/src/device/service/drivers/nce/handlers/NetworkTopologyHandler.py b/src/device/service/drivers/nce/handlers/NetworkTopologyHandler.py index 124ce4024..15c87a19d 100644 --- a/src/device/service/drivers/nce/handlers/NetworkTopologyHandler.py +++ b/src/device/service/drivers/nce/handlers/NetworkTopologyHandler.py @@ -20,7 +20,7 @@ from common.proto.context_pb2 import ( DEVICEDRIVER_UNDEFINED, DEVICEOPERATIONALSTATUS_DISABLED, DEVICEOPERATIONALSTATUS_ENABLED ) -from common.tools.client.RestConfClient import RestConfClient +from common.tools.rest_conf.client.RestConfClient import RestConfClient from device.service.driver_api.ImportTopologyEnum import ( ImportTopologyEnum, get_import_topology ) diff --git a/src/device/service/drivers/nce/handlers/SubscriptionHandler.py b/src/device/service/drivers/nce/handlers/SubscriptionHandler.py index 00c13d637..78c553b65 100644 --- a/src/device/service/drivers/nce/handlers/SubscriptionHandler.py +++ b/src/device/service/drivers/nce/handlers/SubscriptionHandler.py @@ -14,7 +14,7 @@ import logging, requests from typing_extensions import TypedDict -from common.tools.client.RestConfClient import RestConfClient +from common.tools.rest_conf.client.RestConfClient import RestConfClient LOGGER = logging.getLogger(__name__) diff --git a/src/tests/tools/simap_server/simap_client/RestConfClient.py b/src/tests/tools/simap_server/simap_client/RestConfClient.py deleted file mode 100644 index b7c057a70..000000000 --- a/src/tests/tools/simap_server/simap_client/RestConfClient.py +++ /dev/null @@ -1,191 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import enum, logging, requests -from requests.auth import HTTPBasicAuth -from typing import Any, Dict, Optional, Set - -class RestRequestMethod(enum.Enum): - GET = 'get' - POST = 'post' - PUT = 'put' - PATCH = 'patch' - DELETE = 'delete' - -EXPECTED_STATUS_CODES : Set[int] = { - requests.codes['OK' ], # 200 - OK - requests.codes['CREATED' ], # 201 - Created - requests.codes['ACCEPTED' ], # 202 - Accepted - requests.codes['NO_CONTENT'], # 204 - No Content -} - -def compose_basic_auth( - username : Optional[str] = None, password : Optional[str] = None -) -> Optional[HTTPBasicAuth]: - if username is None or password is None: return None - return HTTPBasicAuth(username, password) - -class SchemeEnum(enum.Enum): - HTTP = 'http' - HTTPS = 'https' - -def check_scheme(scheme : str) -> str: - str_scheme = str(scheme).lower() - enm_scheme = SchemeEnum._value2member_map_[str_scheme] - return enm_scheme.value - -HOST_META_URL = '{:s}://{:s}:{:d}/.well-known/host-meta' -RESTCONF_URL = '{:s}://{:s}:{:d}/{:s}' - -class RestConfClient: - def __init__( - self, address : str, port : int = 8080, scheme : str = 'http', - username : Optional[str] = None, password : Optional[str] = None, - timeout : int = 10, verify_certs : bool = True, allow_redirects : bool = True, - logger : Optional[logging.Logger] = None - ) -> None: - self._address = address - self._port = int(port) - self._scheme = check_scheme(scheme) - self._auth = compose_basic_auth(username=username, password=password) - self._base_url = '' - self._timeout = int(timeout) - self._verify_certs = verify_certs - self._allow_redirects = allow_redirects - self._logger = logger - - self._discover_base_url() - - def _discover_base_url(self) -> None: - host_meta_url = HOST_META_URL.format(self._scheme, self._address, self._port) - host_meta : Dict = self.get(host_meta_url, expected_status_codes={requests.codes['OK']}) - - links = host_meta.get('links') - if links is None: raise AttributeError('Missing attribute "links" in host-meta reply') - if not isinstance(links, list): raise AttributeError('Attribute "links" must be a list') - if len(links) != 1: raise AttributeError('Attribute "links" is expected to have exactly 1 item') - - link = links[0] - if not isinstance(link, dict): raise AttributeError('Attribute "links[0]" must be a dict') - - rel = link.get('rel') - if rel is None: raise AttributeError('Missing attribute "links[0].rel" in host-meta reply') - if not isinstance(rel, str): raise AttributeError('Attribute "links[0].rel" must be a str') - if rel != 'restconf': raise AttributeError('Attribute "links[0].rel" != "restconf"') - - href = link.get('href') - if href is None: raise AttributeError('Missing attribute "links[0]" in host-meta reply') - if not isinstance(href, str): raise AttributeError('Attribute "links[0].href" must be a str') - - self._base_url = str(href + '/data').replace('//', '/') - - def _log_msg_request( - self, method : RestRequestMethod, request_url : str, body : Optional[Any], - log_level : int = logging.INFO - ) -> str: - msg = 'Request: {:s} {:s}'.format(str(method.value).upper(), str(request_url)) - if body is not None: msg += ' body={:s}'.format(str(body)) - if self._logger is not None: self._logger.log(log_level, msg) - return msg - - def _log_msg_check_reply( - self, method : RestRequestMethod, request_url : str, body : Optional[Any], - reply : requests.Response, expected_status_codes : Set[int], - log_level : int = logging.INFO - ) -> str: - msg = 'Reply: {:s}'.format(str(reply.text)) - if self._logger is not None: self._logger.log(log_level, msg) - http_status_code = reply.status_code - if http_status_code in expected_status_codes: return msg - MSG = 'Request failed. method={:s} url={:s} body={:s} status_code={:s} reply={:s}' - msg = MSG.format( - str(method.value).upper(), str(request_url), str(body), - str(http_status_code), str(reply.text) - ) - self._logger.error(msg) - raise Exception(msg) - - def _do_rest_request( - self, method : RestRequestMethod, endpoint : str, body : Optional[Any] = None, - expected_status_codes : Set[int] = EXPECTED_STATUS_CODES - ) -> Optional[Any]: - candidate_schemes = tuple(['{:s}://'.format(m).lower() for m in SchemeEnum.__members__.keys()]) - if endpoint.lower().startswith(candidate_schemes): - request_url = endpoint.lstrip('/') - else: - endpoint = str(self._base_url + '/' + endpoint).replace('//', '/').lstrip('/') - request_url = '{:s}://{:s}:{:d}/{:s}'.format( - self._scheme, self._address, self._port, endpoint.lstrip('/') - ) - self._log_msg_request(method, request_url, body) - try: - headers = {'accept': 'application/json'} - reply = requests.request( - method.value, request_url, headers=headers, json=body, - auth=self._auth, verify=self._verify_certs, timeout=self._timeout, - allow_redirects=self._allow_redirects - ) - except Exception as e: - MSG = 'Request failed. method={:s} url={:s} body={:s}' - msg = MSG.format(str(method.value).upper(), request_url, str(body)) - self._logger.exception(msg) - raise Exception(msg) from e - self._log_msg_check_reply(method, request_url, body, reply, expected_status_codes) - if reply.content and len(reply.content) > 0: return reply.json() - return None - - def get( - self, endpoint : str, - expected_status_codes : Set[int] = {requests.codes['OK']} - ) -> Optional[Any]: - return self._do_rest_request( - RestRequestMethod.GET, endpoint, - expected_status_codes=expected_status_codes - ) - - def post( - self, endpoint : str, body : Optional[Any] = None, - expected_status_codes : Set[int] = {requests.codes['CREATED']} - ) -> Optional[Any]: - return self._do_rest_request( - RestRequestMethod.POST, endpoint, body=body, - expected_status_codes=expected_status_codes - ) - - def put( - self, endpoint : str, body : Optional[Any] = None, - expected_status_codes : Set[int] = {requests.codes['CREATED'], requests.codes['NO_CONTENT']} - ) -> Optional[Any]: - return self._do_rest_request( - RestRequestMethod.PUT, endpoint, body=body, - expected_status_codes=expected_status_codes - ) - - def patch( - self, endpoint : str, body : Optional[Any] = None, - expected_status_codes : Set[int] = {requests.codes['NO_CONTENT']} - ) -> Optional[Any]: - return self._do_rest_request( - RestRequestMethod.PATCH, endpoint, body=body, - expected_status_codes=expected_status_codes - ) - - def delete( - self, endpoint : str, body : Optional[Any] = None, - expected_status_codes : Set[int] = {requests.codes['NO_CONTENT']} - ) -> Optional[Any]: - return self._do_rest_request( - RestRequestMethod.DELETE, endpoint, body=body, - expected_status_codes=expected_status_codes - ) diff --git a/src/tests/tools/simap_server/simap_server/YangHandler.py b/src/tests/tools/simap_server/simap_server/YangHandler.py deleted file mode 100644 index 8745c0b5e..000000000 --- a/src/tests/tools/simap_server/simap_server/YangHandler.py +++ /dev/null @@ -1,142 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import json, libyang, logging -from typing import Dict, List, Optional, Type - -LOGGER = logging.getLogger(__name__) - -def walk_schema(node : libyang.SNode, path : str = '') -> Dict[str, Type]: - schema_paths : Dict[str, Type] = dict() - current_path = f'{path}/{node.name()}' - schema_paths[current_path] = type(node) - for child in node.children(): - if isinstance(child, (libyang.SLeaf, libyang.SLeafList)): continue - schema_paths.update(walk_schema(child, current_path)) - return schema_paths - -def extract_schema_paths(yang_module : libyang.Module) -> Dict[str, Type]: - schema_paths : Dict[str, Type] = dict() - for node in yang_module.children(): - schema_paths.update(walk_schema(node)) - return schema_paths - -class YangHandler: - def __init__( - self, yang_search_path : str, yang_module_names : List[str], - yang_startup_data : Dict - ) -> None: - self._yang_context = libyang.Context(yang_search_path) - self._loaded_modules = set() - self._yang_module_paths : Dict[str, Type] = dict() - for yang_module_name in yang_module_names: - LOGGER.info('Loading module: {:s}'.format(str(yang_module_name))) - yang_module = self._yang_context.load_module(yang_module_name) - yang_module.feature_enable_all() - self._loaded_modules.add(yang_module_name) - self._yang_module_paths.update(extract_schema_paths(yang_module)) - - self._datastore = self._yang_context.parse_data_mem( - json.dumps(yang_startup_data), fmt='json' - ) - - def destroy(self) -> None: - self._yang_context.destroy() - - def get_module_paths(self) -> Dict[str, Type]: - return self._yang_module_paths - - def get(self, path : str) -> Optional[str]: - if not path.startswith('/'): path = '/' + path - data = self._datastore.find_path(path) - if data is None: return None - json_data = data.print_mem( - fmt='json', with_siblings=True, pretty=True, - keep_empty_containers=True, include_implicit_defaults=True - ) - return json_data - - def get_xpath(self, xpath : str) -> List[str]: - if not path.startswith('/'): path = '/' + path - nodes = self._datastore.find_all(xpath) - result = list() - for node in nodes: - result.append(node.print_mem( - fmt='json', with_siblings=True, pretty=True, - keep_empty_containers=True, include_implicit_defaults=True - )) - return result - - def create(self, path : str, payload : Dict) -> str: - if not path.startswith('/'): path = '/' + path - # TODO: client should not provide identifier of element to be created, add it to subpath - dnode_parsed : Optional[libyang.DNode] = self._yang_context.parse_data_mem( - json.dumps(payload), 'json', strict=True, parse_only=False, - validate_present=True, validate_multi_error=True - ) - if dnode_parsed is None: raise Exception('Unable to parse Data({:s})'.format(str(payload))) - #LOGGER.info('parsed = {:s}'.format(json.dumps(dnode.print_dict()))) - - dnode : Optional[libyang.DNode] = self._yang_context.create_data_path( - path, parent=self._datastore, value=dnode_parsed, update=False - ) - self._datastore.merge(dnode_parsed, with_siblings=True, defaults=True) - - json_data = dnode.print_mem( - fmt='json', with_siblings=True, pretty=True, - keep_empty_containers=True, include_implicit_defaults=True - ) - return json_data - - def update(self, path : str, payload : Dict) -> str: - if not path.startswith('/'): path = '/' + path - # NOTE: client should provide identifier of element to be updated - dnode_parsed : Optional[libyang.DNode] = self._yang_context.parse_data_mem( - json.dumps(payload), 'json', strict=True, parse_only=False, - validate_present=True, validate_multi_error=True - ) - if dnode_parsed is None: raise Exception('Unable to parse Data({:s})'.format(str(payload))) - #LOGGER.info('parsed = {:s}'.format(json.dumps(dnode.print_dict()))) - - dnode = self._yang_context.create_data_path( - path, parent=self._datastore, value=dnode_parsed, update=True - ) - self._datastore.merge(dnode_parsed, with_siblings=True, defaults=True) - - json_data = dnode.print_mem( - fmt='json', with_siblings=True, pretty=True, - keep_empty_containers=True, include_implicit_defaults=True - ) - return json_data - - def delete(self, path : str) -> Optional[str]: - if not path.startswith('/'): path = '/' + path - - # NOTE: client should provide identifier of element to be deleted - - node : libyang.DNode = self._datastore.find_path(path) - if node is None: return None - - LOGGER.info('node = {:s}'.format(str(node))) - json_data = str(node.print_mem( - fmt='json', with_siblings=True, pretty=True, - keep_empty_containers=True, include_implicit_defaults=True - )) - LOGGER.info('json_data = {:s}'.format(json_data)) - - node.unlink() - node.free() - - return json_data -- GitLab From ff5a7ad6be6de103faa4a404418500396558a660 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 7 Sep 2025 11:34:19 +0000 Subject: [PATCH 170/367] Tests - Tools - SIMAP Server - Updated to use the generic RESTCONF Client and Server --- src/tests/tools/simap_server/.gitlab-ci.yml | 2 +- src/tests/tools/simap_server/Dockerfile | 15 +++++++---- src/tests/tools/simap_server/build.sh | 4 +-- src/tests/tools/simap_server/deploy.sh | 5 ++++ src/tests/tools/simap_server/destroy.sh | 3 +++ src/tests/tools/simap_server/requirements.in | 25 ------------------- src/tests/tools/simap_server/run_client.sh | 4 +-- .../tools/simap_server/run_server_gunicorn.sh | 20 --------------- .../simap_server/run_server_standalone.sh | 19 -------------- .../simap_server/simap_client/SimapClient.py | 16 ++++++------ .../simap_server/simap_client/__main__.py | 2 +- 11 files changed, 32 insertions(+), 83 deletions(-) delete mode 100644 src/tests/tools/simap_server/requirements.in delete mode 100755 src/tests/tools/simap_server/run_server_gunicorn.sh delete mode 100755 src/tests/tools/simap_server/run_server_standalone.sh diff --git a/src/tests/tools/simap_server/.gitlab-ci.yml b/src/tests/tools/simap_server/.gitlab-ci.yml index 2e1652765..30c79a50a 100644 --- a/src/tests/tools/simap_server/.gitlab-ci.yml +++ b/src/tests/tools/simap_server/.gitlab-ci.yml @@ -19,7 +19,7 @@ build simap_server: - docker image prune --force - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY script: - - docker buildx build -t "$CI_REGISTRY_IMAGE/simap-server:test" -f ./src/tests/tools/simap_server/Dockerfile ./src/tests/tools/simap_server + - docker buildx build -t "$CI_REGISTRY_IMAGE/simap-server:test" -f ./src/tests/tools/simap_server/Dockerfile . - docker push "$CI_REGISTRY_IMAGE/simap-server:test" after_script: - docker image prune --force diff --git a/src/tests/tools/simap_server/Dockerfile b/src/tests/tools/simap_server/Dockerfile index 973d56a8f..f47de61a8 100644 --- a/src/tests/tools/simap_server/Dockerfile +++ b/src/tests/tools/simap_server/Dockerfile @@ -45,17 +45,22 @@ RUN python3 -m pip install --upgrade pip-tools # Create component sub-folders, get specific Python packages RUN mkdir -p /var/teraflow/simap_server/ WORKDIR /var/teraflow/simap_server/ -COPY ./requirements.in ./requirements.in +COPY src/common/tools/rest_conf/server/requirements.in ./requirements.in RUN pip-compile --quiet --output-file=requirements.txt requirements.in RUN python3 -m pip install -r requirements.txt # Add component files into working directory -COPY ./yang/*.yang ./yang/ -COPY ./simap_server/*.py ./simap_server/ -COPY ./startup.json ./startup.json +COPY src/common/tools/rest_conf/server/restconf_server/ ./simap_server/ +COPY src/tests/tools/simap_server/yang/*.yang ./yang/ +COPY src/tests/tools/simap_server/startup.json ./startup.json + +# Configure RESTCONF Server +ENV RESTCONF_PREFIX="/restconf" +ENV YANG_SEARCH_PATH="./yang" +ENV STARTUP_FILE="./startup.json" # Configure Flask for production -ENV FLASK_ENV=production +ENV FLASK_ENV="production" # Start the service ENTRYPOINT ["gunicorn", "--workers", "1", "--worker-class", "eventlet", "--bind", "0.0.0.0:8080", "simap_server.app:app"] diff --git a/src/tests/tools/simap_server/build.sh b/src/tests/tools/simap_server/build.sh index 033570f49..949cc7933 100755 --- a/src/tests/tools/simap_server/build.sh +++ b/src/tests/tools/simap_server/build.sh @@ -14,8 +14,8 @@ # limitations under the License. # Make folder containing the script the root folder for its execution -cd $(dirname $0) +cd $(dirname $0)/../../../../ -docker buildx build -t simap-server:test -f Dockerfile . +docker buildx build -t simap-server:test -f ./src/tests/tools/simap_server/Dockerfile . #docker tag simap-server:test localhost:32000/tfs/simap-server:test #docker push localhost:32000/tfs/simap-server:test diff --git a/src/tests/tools/simap_server/deploy.sh b/src/tests/tools/simap_server/deploy.sh index 71bbeb041..854ee6577 100755 --- a/src/tests/tools/simap_server/deploy.sh +++ b/src/tests/tools/simap_server/deploy.sh @@ -13,15 +13,20 @@ # See the License for the specific language governing permissions and # limitations under the License. + # Cleanup docker rm --force simap-server + # Create SIMAP Server docker run --detach --name simap-server --publish 8080:8080 simap-server:test + sleep 2 + # Dump SIMAP Server Docker container docker ps -a + echo "Bye!" diff --git a/src/tests/tools/simap_server/destroy.sh b/src/tests/tools/simap_server/destroy.sh index 54345573f..51edb6bca 100755 --- a/src/tests/tools/simap_server/destroy.sh +++ b/src/tests/tools/simap_server/destroy.sh @@ -13,10 +13,13 @@ # See the License for the specific language governing permissions and # limitations under the License. + # Cleanup docker rm --force simap-server + # Dump Docker containers docker ps -a + echo "Bye!" diff --git a/src/tests/tools/simap_server/requirements.in b/src/tests/tools/simap_server/requirements.in deleted file mode 100644 index 17155ed58..000000000 --- a/src/tests/tools/simap_server/requirements.in +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -cryptography==39.0.1 -eventlet==0.39.0 -Flask-HTTPAuth==4.5.0 -Flask-RESTful==0.3.9 -Flask==2.1.3 -gunicorn==23.0.0 -jsonschema==4.4.0 -libyang==2.8.4 -pyopenssl==23.0.0 -requests==2.27.1 -werkzeug==2.3.7 diff --git a/src/tests/tools/simap_server/run_client.sh b/src/tests/tools/simap_server/run_client.sh index 518deb462..76aced855 100755 --- a/src/tests/tools/simap_server/run_client.sh +++ b/src/tests/tools/simap_server/run_client.sh @@ -14,6 +14,6 @@ # limitations under the License. # Make folder containing the script the root folder for its execution -cd $(dirname $0) +cd $(dirname $0)/../../../ -python -m simap_client +python -m tests.tools.simap_server.simap_client diff --git a/src/tests/tools/simap_server/run_server_gunicorn.sh b/src/tests/tools/simap_server/run_server_gunicorn.sh deleted file mode 100755 index be9c62fcb..000000000 --- a/src/tests/tools/simap_server/run_server_gunicorn.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Make folder containing the script the root folder for its execution -cd $(dirname $0) - -export FLASK_ENV=development -gunicorn -w 1 --worker-class eventlet -b 0.0.0.0:8080 --log-level DEBUG simap_server.app:app diff --git a/src/tests/tools/simap_server/run_server_standalone.sh b/src/tests/tools/simap_server/run_server_standalone.sh deleted file mode 100755 index d2580f41d..000000000 --- a/src/tests/tools/simap_server/run_server_standalone.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Make folder containing the script the root folder for its execution -cd $(dirname $0) - -python -m simap_server diff --git a/src/tests/tools/simap_server/simap_client/SimapClient.py b/src/tests/tools/simap_server/simap_client/SimapClient.py index b4c27d43a..8f457d452 100644 --- a/src/tests/tools/simap_server/simap_client/SimapClient.py +++ b/src/tests/tools/simap_server/simap_client/SimapClient.py @@ -14,12 +14,12 @@ from typing import Dict, List, Tuple -from .RestConfClient import RestConfClient +from common.tools.rest_conf.client.RestConfClient import RestConfClient class TerminationPoint: - ENDPOINT_NO_ID = '/ietf-network:networks/network[network-id="{:s}"]/node[node-id="{:s}"]' - ENDPOINT_ID = ENDPOINT_NO_ID + '/ietf-network-topology:termination-point[tp-id="{:s}"]' + ENDPOINT_NO_ID = '/ietf-network:networks/network={:s}/node={:s}' + ENDPOINT_ID = ENDPOINT_NO_ID + '/ietf-network-topology:termination-point={:s}' def __init__(self, restconf_client : RestConfClient, network_id : str, node_id : str, tp_id : str): self._restconf_client = restconf_client @@ -63,8 +63,8 @@ class TerminationPoint: self._restconf_client.delete(endpoint) class Node: - ENDPOINT_NO_ID = '/ietf-network:networks/network[network-id="{:s}"]' - ENDPOINT_ID = ENDPOINT_NO_ID + '/node[node-id="{:s}"]' + ENDPOINT_NO_ID = '/ietf-network:networks/network={:s}' + ENDPOINT_ID = ENDPOINT_NO_ID + '/node={:s}' def __init__(self, restconf_client : RestConfClient, network_id : str, node_id : str): self._restconf_client = restconf_client @@ -120,8 +120,8 @@ class Node: self._restconf_client.delete(endpoint) class Link: - ENDPOINT_NO_ID = '/ietf-network:networks/network[network-id="{:s}"]' - ENDPOINT_ID = ENDPOINT_NO_ID + '/ietf-network-topology:link[link-id="{:s}"]' + ENDPOINT_NO_ID = '/ietf-network:networks/network={:s}' + ENDPOINT_ID = ENDPOINT_NO_ID + '/ietf-network-topology:link={:s}' def __init__(self, restconf_client : RestConfClient, network_id : str, link_id : str): self._restconf_client = restconf_client @@ -172,7 +172,7 @@ class Link: class Network: ENDPOINT_NO_ID = '/ietf-network:networks' - ENDPOINT_ID = ENDPOINT_NO_ID + '/network[network-id="{:s}"]' + ENDPOINT_ID = ENDPOINT_NO_ID + '/network={:s}' def __init__(self, restconf_client : RestConfClient, network_id : str): self._restconf_client = restconf_client diff --git a/src/tests/tools/simap_server/simap_client/__main__.py b/src/tests/tools/simap_server/simap_client/__main__.py index 77f79aa6c..40dcc8847 100644 --- a/src/tests/tools/simap_server/simap_client/__main__.py +++ b/src/tests/tools/simap_server/simap_client/__main__.py @@ -14,7 +14,7 @@ import json, logging -from .RestConfClient import RestConfClient +from common.tools.rest_conf.client.RestConfClient import RestConfClient from .SimapClient import SimapClient logging.basicConfig(level=logging.INFO) -- GitLab From 433346ff3119d9e52e2806a402cb5e5a9d2286e1 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 7 Sep 2025 12:29:15 +0000 Subject: [PATCH 171/367] Common - Tools - Rest Conf - Packed code into RestConfServerApp class - Separated Config settings to Config.py --- .../server/restconf_server/Config.py | 22 +++++ .../RestConfServerApplication.py | 95 +++++++++++++++++++ .../rest_conf/server/restconf_server/app.py | 72 ++------------ 3 files changed, 125 insertions(+), 64 deletions(-) create mode 100644 src/common/tools/rest_conf/server/restconf_server/Config.py create mode 100644 src/common/tools/rest_conf/server/restconf_server/RestConfServerApplication.py diff --git a/src/common/tools/rest_conf/server/restconf_server/Config.py b/src/common/tools/rest_conf/server/restconf_server/Config.py new file mode 100644 index 000000000..f0a47aac5 --- /dev/null +++ b/src/common/tools/rest_conf/server/restconf_server/Config.py @@ -0,0 +1,22 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import os, secrets + + +RESTCONF_PREFIX = os.environ.get('RESTCONF_PREFIX', '/restconf' ) +YANG_SEARCH_PATH = os.environ.get('YANG_SEARCH_PATH', './yang' ) +STARTUP_FILE = os.environ.get('STARTUP_FILE', './startup.json') +SECRET_KEY = os.environ.get('SECRET_KEY', secrets.token_hex(64)) diff --git a/src/common/tools/rest_conf/server/restconf_server/RestConfServerApplication.py b/src/common/tools/rest_conf/server/restconf_server/RestConfServerApplication.py new file mode 100644 index 000000000..1ed5bdc28 --- /dev/null +++ b/src/common/tools/rest_conf/server/restconf_server/RestConfServerApplication.py @@ -0,0 +1,95 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import json, logging, time +from flask import Flask, request +from flask_restful import Api +from .Config import RESTCONF_PREFIX, SECRET_KEY, STARTUP_FILE, YANG_SEARCH_PATH +from .Dispatch import RestConfDispatch +from .HostMeta import HostMeta +from .YangHandler import YangHandler +from .YangModelDiscoverer import YangModuleDiscoverer + + +logging.basicConfig( + level=logging.INFO, + format='[Worker-%(process)d][%(asctime)s] %(levelname)s:%(name)s:%(message)s', +) + + +LOGGER = logging.getLogger(__name__) + + +def log_request(response): + timestamp = time.strftime('[%Y-%b-%d %H:%M]') + LOGGER.info( + '%s %s %s %s %s', timestamp, request.remote_addr, request.method, + request.full_path, response.status + ) + return response + + +class RestConfServerApplication: + def __init__(self) -> None: + self._ymd = YangModuleDiscoverer(YANG_SEARCH_PATH) + self._yang_module_names = self._ymd.run(do_log_order=True) + + with open(STARTUP_FILE, mode='r', encoding='UTF-8') as fp: + self._yang_startup_data = json.loads(fp.read()) + + self._yang_handler = YangHandler( + YANG_SEARCH_PATH, self._yang_module_names, self._yang_startup_data + ) + + self._app = Flask(__name__) + self._app.config['SECRET_KEY'] = SECRET_KEY + self._app.after_request(log_request) + self._api = Api(self._app, prefix=RESTCONF_PREFIX) + + def get_startup_data(self) -> None: + return self._yang_startup_data + + def register_host_meta(self) -> None: + self._api.add_resource( + HostMeta, + '/.well-known/host-meta', + resource_class_args=(RESTCONF_PREFIX,) + ) + + def register_restconf(self) -> None: + self._api.add_resource( + RestConfDispatch, + '/data', + '/data/', + '/data/', + '/data//', + resource_class_args=(self._yang_handler,) + ) + + def register_endpoints(self) -> None: + self.register_host_meta() + self.register_restconf() + + def get_flask_app(self) -> Flask: + return self._app + + def get_flask_api(self) -> Api: + return self._api + + def dump_configuration(self) -> None: + LOGGER.info('Available RESTCONF paths:') + restconf_paths = self._yang_handler.get_schema_paths() + for restconf_path in sorted(restconf_paths): + LOGGER.info('- {:s}'.format(str(restconf_path))) diff --git a/src/common/tools/rest_conf/server/restconf_server/app.py b/src/common/tools/rest_conf/server/restconf_server/app.py index 9ecca3110..4ca09328b 100644 --- a/src/common/tools/rest_conf/server/restconf_server/app.py +++ b/src/common/tools/rest_conf/server/restconf_server/app.py @@ -13,19 +13,8 @@ # limitations under the License. -import json, logging, os, secrets, time -from flask import Flask, request -from flask_restful import Api -from .Dispatch import RestConfDispatch -from .HostMeta import HostMeta -from .YangHandler import YangHandler -from .YangModelDiscoverer import YangModuleDiscoverer - - -RESTCONF_PREFIX = os.environ.get('RESTCONF_PREFIX', '/restconf' ) -YANG_SEARCH_PATH = os.environ.get('YANG_SEARCH_PATH', './yang' ) -STARTUP_FILE = os.environ.get('STARTUP_FILE', './startup.json') -SECRET_KEY = os.environ.get('SECRET_KEY', secrets.token_hex(64)) +import logging +from .RestConfServerApplication import RestConfServerApplication logging.basicConfig( @@ -34,56 +23,11 @@ logging.basicConfig( ) LOGGER = logging.getLogger(__name__) +LOGGER.info('Starting...') +rcs_app = RestConfServerApplication() +LOGGER.info('All connectors registered') -def log_request(response): - timestamp = time.strftime('[%Y-%b-%d %H:%M]') - LOGGER.info( - '%s %s %s %s %s', timestamp, request.remote_addr, request.method, - request.full_path, response.status - ) - return response - - -ymd = YangModuleDiscoverer(YANG_SEARCH_PATH) -YANG_MODULE_NAMES = ymd.run(do_log_order=True) - -with open(STARTUP_FILE, mode='r', encoding='UTF-8') as fp: - YANG_STARTUP_DATA = json.loads(fp.read()) - - - -ymd = YangModuleDiscoverer(YANG_SEARCH_PATH) -YANG_MODULE_NAMES = ymd.run(do_log_order=True) - - -with open(STARTUP_FILE, mode='r', encoding='UTF-8') as fp: - YANG_STARTUP_DATA = json.loads(fp.read()) - - -yang_handler = YangHandler( - YANG_SEARCH_PATH, YANG_MODULE_NAMES, YANG_STARTUP_DATA -) -restconf_paths = yang_handler.get_schema_paths() - -app = Flask(__name__) -app.config['SECRET_KEY'] = SECRET_KEY -app.after_request(log_request) - -api = Api(app) -api.add_resource( - HostMeta, - '/.well-known/host-meta', - resource_class_args=(RESTCONF_PREFIX,) -) -api.add_resource( - RestConfDispatch, - RESTCONF_PREFIX + '/data', - RESTCONF_PREFIX + '/data/', - RESTCONF_PREFIX + '/data/', - RESTCONF_PREFIX + '/data//', - resource_class_args=(yang_handler,) -) +rcs_app.dump_configuration() +app = rcs_app.get_flask_app() -LOGGER.info('Available RESTCONF paths:') -for restconf_path in sorted(restconf_paths): - LOGGER.info('- {:s}'.format(str(restconf_path))) +LOGGER.info('Initialization completed!') -- GitLab From 56d59db5b4c3024297572ccd2383ddde89ec9e97 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 7 Sep 2025 12:29:40 +0000 Subject: [PATCH 172/367] Tests - Tools - SIMAP Server - Minor fixes --- src/tests/tools/simap_server/build.sh | 1 + src/tests/tools/simap_server/deploy.sh | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/tests/tools/simap_server/build.sh b/src/tests/tools/simap_server/build.sh index 949cc7933..7ec0e0c91 100755 --- a/src/tests/tools/simap_server/build.sh +++ b/src/tests/tools/simap_server/build.sh @@ -16,6 +16,7 @@ # Make folder containing the script the root folder for its execution cd $(dirname $0)/../../../../ +# Build image SIMAP Server docker buildx build -t simap-server:test -f ./src/tests/tools/simap_server/Dockerfile . #docker tag simap-server:test localhost:32000/tfs/simap-server:test #docker push localhost:32000/tfs/simap-server:test diff --git a/src/tests/tools/simap_server/deploy.sh b/src/tests/tools/simap_server/deploy.sh index 854ee6577..a30b4bb1b 100755 --- a/src/tests/tools/simap_server/deploy.sh +++ b/src/tests/tools/simap_server/deploy.sh @@ -25,7 +25,7 @@ docker run --detach --name simap-server --publish 8080:8080 simap-server:test sleep 2 -# Dump SIMAP Server Docker container +# Dump SIMAP Server container docker ps -a -- GitLab From 930544cca3d60b3dd13c0f9893bc006b9a79eddd Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 7 Sep 2025 12:33:00 +0000 Subject: [PATCH 173/367] Common - Tools - Rest Conf - Fixed registration of endpoints --- .../restconf_server/RestConfServerApplication.py | 14 +++++--------- .../tools/rest_conf/server/restconf_server/app.py | 2 ++ 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/src/common/tools/rest_conf/server/restconf_server/RestConfServerApplication.py b/src/common/tools/rest_conf/server/restconf_server/RestConfServerApplication.py index 1ed5bdc28..0b5faa497 100644 --- a/src/common/tools/rest_conf/server/restconf_server/RestConfServerApplication.py +++ b/src/common/tools/rest_conf/server/restconf_server/RestConfServerApplication.py @@ -56,7 +56,7 @@ class RestConfServerApplication: self._app = Flask(__name__) self._app.config['SECRET_KEY'] = SECRET_KEY self._app.after_request(log_request) - self._api = Api(self._app, prefix=RESTCONF_PREFIX) + self._api = Api(self._app) def get_startup_data(self) -> None: return self._yang_startup_data @@ -71,17 +71,13 @@ class RestConfServerApplication: def register_restconf(self) -> None: self._api.add_resource( RestConfDispatch, - '/data', - '/data/', - '/data/', - '/data//', + RESTCONF_PREFIX + '/data', + RESTCONF_PREFIX + '/data/', + RESTCONF_PREFIX + '/data/', + RESTCONF_PREFIX + '/data//', resource_class_args=(self._yang_handler,) ) - def register_endpoints(self) -> None: - self.register_host_meta() - self.register_restconf() - def get_flask_app(self) -> Flask: return self._app diff --git a/src/common/tools/rest_conf/server/restconf_server/app.py b/src/common/tools/rest_conf/server/restconf_server/app.py index 4ca09328b..de35b2524 100644 --- a/src/common/tools/rest_conf/server/restconf_server/app.py +++ b/src/common/tools/rest_conf/server/restconf_server/app.py @@ -25,6 +25,8 @@ LOGGER = logging.getLogger(__name__) LOGGER.info('Starting...') rcs_app = RestConfServerApplication() +rcs_app.register_host_meta() +rcs_app.register_restconf() LOGGER.info('All connectors registered') rcs_app.dump_configuration() -- GitLab From 8a2d7da7b15ae351a0a1a529650ff8e4575bb21e Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 7 Sep 2025 13:05:09 +0000 Subject: [PATCH 174/367] Tests - Tools - Mock NCE-FAN / NCE-T Controllers - Upgraded component to use common RESTConf tools --- src/tests/tools/mock_nce_fan_ctrl/Dockerfile | 18 +- src/tests/tools/mock_nce_fan_ctrl/README.md | 7 +- src/tests/tools/mock_nce_fan_ctrl/build.sh | 4 +- src/tests/tools/mock_nce_fan_ctrl/deploy.sh | 7 +- src/tests/tools/mock_nce_fan_ctrl/destroy.sh | 3 + .../nce_fan_ctrl/Dispatch.py | 148 ------------ .../nce_fan_ctrl/HostMeta.py | 50 ---- .../nce_fan_ctrl/HttpStatusCodesEnum.py | 27 --- .../nce_fan_ctrl}/SimapClient.py | 16 +- .../nce_fan_ctrl/SimapUpdater.py | 64 +++++ .../nce_fan_ctrl/YangHandler.py | 226 ------------------ .../nce_fan_ctrl/YangModelDiscoverer.py | 195 --------------- .../mock_nce_fan_ctrl/nce_fan_ctrl/app.py | 105 ++------ .../simap_client/RestConfClient.py | 191 --------------- .../nce_fan_ctrl/simap_client/__init__.py | 14 -- .../tools/mock_nce_fan_ctrl/requirements.in | 25 -- .../mock_nce_fan_ctrl/run_ctrl_gunicorn.sh | 20 -- .../mock_nce_fan_ctrl/run_ctrl_standalone.sh | 19 -- src/tests/tools/mock_nce_t_ctrl/Dockerfile | 18 +- src/tests/tools/mock_nce_t_ctrl/README.md | 6 +- src/tests/tools/mock_nce_t_ctrl/build.sh | 5 +- src/tests/tools/mock_nce_t_ctrl/deploy.sh | 7 +- src/tests/tools/mock_nce_t_ctrl/destroy.sh | 3 + .../mock_nce_t_ctrl/nce_t_ctrl/Dispatch.py | 148 ------------ .../mock_nce_t_ctrl/nce_t_ctrl/HostMeta.py | 50 ---- .../nce_t_ctrl/HttpStatusCodesEnum.py | 27 --- .../nce_t_ctrl}/SimapClient.py | 16 +- .../nce_t_ctrl/SimapUpdater.py | 64 +++++ .../mock_nce_t_ctrl/nce_t_ctrl/YangHandler.py | 226 ------------------ .../nce_t_ctrl/YangModelDiscoverer.py | 195 --------------- .../tools/mock_nce_t_ctrl/nce_t_ctrl/app.py | 105 ++------ .../nce_t_ctrl/simap_client/RestConfClient.py | 191 --------------- .../nce_t_ctrl/simap_client/__init__.py | 14 -- .../tools/mock_nce_t_ctrl/requirements.in | 25 -- .../mock_nce_t_ctrl/run_ctrl_gunicorn.sh | 20 -- .../mock_nce_t_ctrl/run_ctrl_standalone.sh | 19 -- 36 files changed, 234 insertions(+), 2044 deletions(-) delete mode 100644 src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/Dispatch.py delete mode 100644 src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/HostMeta.py delete mode 100644 src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/HttpStatusCodesEnum.py rename src/tests/tools/{mock_nce_t_ctrl/nce_t_ctrl/simap_client => mock_nce_fan_ctrl/nce_fan_ctrl}/SimapClient.py (95%) create mode 100644 src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/SimapUpdater.py delete mode 100644 src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/YangHandler.py delete mode 100644 src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/YangModelDiscoverer.py delete mode 100644 src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/simap_client/RestConfClient.py delete mode 100644 src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/simap_client/__init__.py delete mode 100644 src/tests/tools/mock_nce_fan_ctrl/requirements.in delete mode 100755 src/tests/tools/mock_nce_fan_ctrl/run_ctrl_gunicorn.sh delete mode 100755 src/tests/tools/mock_nce_fan_ctrl/run_ctrl_standalone.sh delete mode 100644 src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/Dispatch.py delete mode 100644 src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/HostMeta.py delete mode 100644 src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/HttpStatusCodesEnum.py rename src/tests/tools/{mock_nce_fan_ctrl/nce_fan_ctrl/simap_client => mock_nce_t_ctrl/nce_t_ctrl}/SimapClient.py (95%) create mode 100644 src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/SimapUpdater.py delete mode 100644 src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/YangHandler.py delete mode 100644 src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/YangModelDiscoverer.py delete mode 100644 src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/simap_client/RestConfClient.py delete mode 100644 src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/simap_client/__init__.py delete mode 100644 src/tests/tools/mock_nce_t_ctrl/requirements.in delete mode 100755 src/tests/tools/mock_nce_t_ctrl/run_ctrl_gunicorn.sh delete mode 100755 src/tests/tools/mock_nce_t_ctrl/run_ctrl_standalone.sh diff --git a/src/tests/tools/mock_nce_fan_ctrl/Dockerfile b/src/tests/tools/mock_nce_fan_ctrl/Dockerfile index a11f21b62..cae06e98a 100644 --- a/src/tests/tools/mock_nce_fan_ctrl/Dockerfile +++ b/src/tests/tools/mock_nce_fan_ctrl/Dockerfile @@ -45,18 +45,24 @@ RUN python3 -m pip install --upgrade pip-tools # Create component sub-folders, get specific Python packages RUN mkdir -p /var/teraflow/nce_fan_ctrl/ WORKDIR /var/teraflow/nce_fan_ctrl/ -COPY ./requirements.in ./requirements.in +COPY src/common/tools/rest_conf/server/requirements.in ./requirements.in RUN pip-compile --quiet --output-file=requirements.txt requirements.in RUN python3 -m pip install -r requirements.txt # Add component files into working directory -COPY ./yang/. ./yang/ -COPY ./nce_fan_ctrl/*.py ./nce_fan_ctrl/ -COPY ./nce_fan_ctrl/simap_client/*.py ./nce_fan_ctrl/simap_client/ -COPY ./startup.json ./startup.json +COPY src/common/tools/rest_conf/server/restconf_server/ ./nce_fan_ctrl/ +COPY src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/*.py ./nce_fan_ctrl/ +COPY src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/simap_client/*.py ./nce_fan_ctrl/simap_client/ +COPY src/tests/tools/mock_nce_fan_ctrl/yang/. ./yang/ +COPY src/tests/tools/mock_nce_fan_ctrl/startup.json ./startup.json + +# Configure RESTCONF Server +ENV RESTCONF_PREFIX="/restconf" +ENV YANG_SEARCH_PATH="./yang" +ENV STARTUP_FILE="./startup.json" # Configure Flask for production -ENV FLASK_ENV=production +ENV FLASK_ENV="production" # Start the service ENTRYPOINT ["gunicorn", "--workers", "1", "--worker-class", "eventlet", "--bind", "0.0.0.0:8080", "nce_fan_ctrl.app:app"] diff --git a/src/tests/tools/mock_nce_fan_ctrl/README.md b/src/tests/tools/mock_nce_fan_ctrl/README.md index ff0e2dbe1..6c8716627 100644 --- a/src/tests/tools/mock_nce_fan_ctrl/README.md +++ b/src/tests/tools/mock_nce_fan_ctrl/README.md @@ -5,19 +5,20 @@ In this case, it is prepared to load a NCE-FAN Controller based on: - IETF Network Topology - IETF YANG Data Model for Transport Network Client Signals - IETF YANG Data Model for Traffic Engineering Tunnels, Label Switched Paths and Interfaces +- BBF App-Flow (preview) -## Build the Docker image +## Build the RESTCONF-based NCE-FAN Controller Docker image ```bash ./build.sh ``` -## Deploy the Controller +## Deploy the RESTCONF-based NCE-FAN Controller ```bash ./deploy.sh ``` -## Destroy the Controller +## Destroy the RESTCONF-based NCE-FAN Controller ```bash ./destroy.sh ``` diff --git a/src/tests/tools/mock_nce_fan_ctrl/build.sh b/src/tests/tools/mock_nce_fan_ctrl/build.sh index d4d49c98d..589959435 100755 --- a/src/tests/tools/mock_nce_fan_ctrl/build.sh +++ b/src/tests/tools/mock_nce_fan_ctrl/build.sh @@ -14,9 +14,9 @@ # limitations under the License. # Make folder containing the script the root folder for its execution -cd $(dirname $0) +cd $(dirname $0)/../../../../ # Build image for NCE-FAN Controller -docker buildx build -t nce-fan-ctrl:test -f Dockerfile . +docker buildx build -t nce-fan-ctrl:test -f ./src/tests/tools/mock_nce_fan_ctrl/Dockerfile . #docker tag nce-fan-ctrl:test localhost:32000/tfs/nce-fan-ctrl:test #docker push localhost:32000/tfs/nce-fan-ctrl:test diff --git a/src/tests/tools/mock_nce_fan_ctrl/deploy.sh b/src/tests/tools/mock_nce_fan_ctrl/deploy.sh index 90eef41c5..7e0d56abe 100755 --- a/src/tests/tools/mock_nce_fan_ctrl/deploy.sh +++ b/src/tests/tools/mock_nce_fan_ctrl/deploy.sh @@ -13,15 +13,20 @@ # See the License for the specific language governing permissions and # limitations under the License. + # Cleanup docker rm --force nce-fan-ctrl + # Create NCE-FAN Controller docker run --detach --name nce-fan-ctrl --publish 8080:8080 nce-fan-ctrl:test + sleep 2 -# Dump Docker containers + +# Dump NCE-FAN Controller container docker ps -a + echo "Bye!" diff --git a/src/tests/tools/mock_nce_fan_ctrl/destroy.sh b/src/tests/tools/mock_nce_fan_ctrl/destroy.sh index c6eda7260..64148a70a 100755 --- a/src/tests/tools/mock_nce_fan_ctrl/destroy.sh +++ b/src/tests/tools/mock_nce_fan_ctrl/destroy.sh @@ -13,10 +13,13 @@ # See the License for the specific language governing permissions and # limitations under the License. + # Cleanup docker rm --force nce-fan-ctrl + # Dump Docker containers docker ps -a + echo "Bye!" diff --git a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/Dispatch.py b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/Dispatch.py deleted file mode 100644 index 319aa9f7b..000000000 --- a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/Dispatch.py +++ /dev/null @@ -1,148 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import json, logging -from flask import Response, abort, jsonify, request -from flask_restful import Resource -from .HttpStatusCodesEnum import HttpStatusCodesEnum -from .YangHandler import YangHandler - -LOGGER = logging.getLogger(__name__) - -class RestConfDispatch(Resource): - def __init__(self, yang_handler : YangHandler) -> None: - super().__init__() - self._yang_handler = yang_handler - - def get(self, subpath : str = '/') -> Response: - data = self._yang_handler.get(subpath) - if data is None: - abort( - HttpStatusCodesEnum.CLI_ERR_NOT_FOUND.value, - description='Path({:s}) not found'.format(str(subpath)) - ) - - LOGGER.info('[GET] {:s} => {:s}'.format(subpath, str(data))) - - response = jsonify(json.loads(data)) - response.status_code = HttpStatusCodesEnum.SUCCESS_OK.value - return response - - def post(self, subpath : str) -> Response: - # TODO: client should not provide identifier of element to be created, add it to subpath - try: - payload = request.get_json(force=True) - except Exception: - LOGGER.exception('Invalid JSON') - abort(HttpStatusCodesEnum.CLI_ERR_BAD_REQUEST.value, desctiption='Invalid JSON') - - data = self._yang_handler.get(subpath) - if data is not None: - abort( - HttpStatusCodesEnum.CLI_ERR_CONFLICT.value, - description='Path({:s}) already exists'.format(str(subpath)) - ) - - try: - json_data = self._yang_handler.create(subpath, payload) - except Exception as e: - LOGGER.exception('Create failed') - abort( - HttpStatusCodesEnum.CLI_ERR_NOT_ACCEPTABLE.value, - description=str(e) - ) - - LOGGER.info('[POST] {:s} {:s} => {:s}'.format(subpath, str(payload), str(json_data))) - - response = jsonify({'status': 'created'}) - response.status_code = HttpStatusCodesEnum.SUCCESS_CREATED.value - return response - - def put(self, subpath : str) -> Response: - # NOTE: client should provide identifier of element to be created/replaced - try: - payload = request.get_json(force=True) - except Exception: - LOGGER.exception('Invalid JSON') - abort(HttpStatusCodesEnum.CLI_ERR_BAD_REQUEST.value, desctiption='Invalid JSON') - - try: - json_data = self._yang_handler.update(subpath, payload) - except Exception as e: - LOGGER.exception('Update failed') - abort( - HttpStatusCodesEnum.CLI_ERR_NOT_ACCEPTABLE.value, - description=str(e) - ) - - LOGGER.info('[PUT] {:s} {:s} => {:s}'.format(subpath, str(payload), str(json_data))) - updated = False # TODO: compute if create or update - - response = jsonify({'status': ( - 'updated' if updated else 'created' - )}) - response.status_code = ( - HttpStatusCodesEnum.SUCCESS_NO_CONTENT.value - if updated else - HttpStatusCodesEnum.SUCCESS_CREATED.value - ) - return response - - def patch(self, subpath : str) -> Response: - # NOTE: client should provide identifier of element to be patched - try: - payload = request.get_json(force=True) - except Exception: - LOGGER.exception('Invalid JSON') - abort(HttpStatusCodesEnum.CLI_ERR_BAD_REQUEST.value, desctiption='Invalid JSON') - - try: - json_data = self._yang_handler.update(subpath, payload) - except Exception as e: - LOGGER.exception('Update failed') - abort( - HttpStatusCodesEnum.CLI_ERR_NOT_ACCEPTABLE.value, - description=str(e) - ) - - LOGGER.info('[PATCH] {:s} {:s} => {:s}'.format(subpath, str(payload), str(json_data))) - - response = jsonify({'status': 'patched'}) - response.status_code = HttpStatusCodesEnum.SUCCESS_NO_CONTENT.value - return response - - def delete(self, subpath : str) -> Response: - # NOTE: client should provide identifier of element to be patched - - try: - deleted_node = self._yang_handler.delete(subpath) - except Exception as e: - LOGGER.exception('Delete failed') - abort( - HttpStatusCodesEnum.CLI_ERR_NOT_ACCEPTABLE.value, - description=str(e) - ) - - LOGGER.info('[DELETE] {:s} => {:s}'.format(subpath, str(deleted_node))) - - if deleted_node is None: - abort( - HttpStatusCodesEnum.CLI_ERR_NOT_FOUND.value, - description='Path({:s}) not found'.format(str(subpath)) - ) - - response = jsonify({}) - response.status_code = HttpStatusCodesEnum.SUCCESS_NO_CONTENT.value - return response diff --git a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/HostMeta.py b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/HostMeta.py deleted file mode 100644 index 95ef34b19..000000000 --- a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/HostMeta.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import xml.etree.ElementTree as ET -from flask import abort, jsonify, make_response, request -from flask_restful import Resource -from .HttpStatusCodesEnum import HttpStatusCodesEnum - -XRD_NS = 'http://docs.oasis-open.org/ns/xri/xrd-1.0' -ET.register_namespace('', XRD_NS) - -class HostMeta(Resource): - def __init__(self, restconf_prefix : str) -> None: - super().__init__() - self._restconf_prefix = restconf_prefix - - def get(self): - best = request.accept_mimetypes.best_match([ - 'application/xrd+xml', 'application/json' - ], default='application/xrd+xml') - - if best == 'application/xrd+xml': - xrd = ET.Element('{{{:s}}}XRD'.format(str(XRD_NS))) - ET.SubElement(xrd, '{{{:s}}}Link'.format(str(XRD_NS)), attrib={ - 'rel': 'restconf', 'href': self._restconf_prefix - }) - xml_string = ET.tostring(xrd, encoding='utf-8', xml_declaration=True).decode() - response = make_response(str(xml_string)) - response.status_code = 200 - response.content_type = best - return response - elif best == 'application/json': - response = jsonify({'links': [{'rel': 'restconf', 'href': self._restconf_prefix}]}) - response.status_code = 200 - response.content_type = best - return response - else: - abort(HttpStatusCodesEnum.CLI_ERR_NOT_ACCEPTABLE) diff --git a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/HttpStatusCodesEnum.py b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/HttpStatusCodesEnum.py deleted file mode 100644 index c44d135c0..000000000 --- a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/HttpStatusCodesEnum.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import enum - -class HttpStatusCodesEnum(enum.IntEnum): - SUCCESS_OK = 200 - SUCCESS_CREATED = 201 - SUCCESS_ACCEPTED = 202 - SUCCESS_NO_CONTENT = 204 - CLI_ERR_BAD_REQUEST = 400 - CLI_ERR_NOT_FOUND = 404 - CLI_ERR_NOT_ACCEPTABLE = 406 - CLI_ERR_CONFLICT = 409 - SVR_ERR_NOT_IMPLEMENTED = 501 diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/simap_client/SimapClient.py b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/SimapClient.py similarity index 95% rename from src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/simap_client/SimapClient.py rename to src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/SimapClient.py index b4c27d43a..8f457d452 100644 --- a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/simap_client/SimapClient.py +++ b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/SimapClient.py @@ -14,12 +14,12 @@ from typing import Dict, List, Tuple -from .RestConfClient import RestConfClient +from common.tools.rest_conf.client.RestConfClient import RestConfClient class TerminationPoint: - ENDPOINT_NO_ID = '/ietf-network:networks/network[network-id="{:s}"]/node[node-id="{:s}"]' - ENDPOINT_ID = ENDPOINT_NO_ID + '/ietf-network-topology:termination-point[tp-id="{:s}"]' + ENDPOINT_NO_ID = '/ietf-network:networks/network={:s}/node={:s}' + ENDPOINT_ID = ENDPOINT_NO_ID + '/ietf-network-topology:termination-point={:s}' def __init__(self, restconf_client : RestConfClient, network_id : str, node_id : str, tp_id : str): self._restconf_client = restconf_client @@ -63,8 +63,8 @@ class TerminationPoint: self._restconf_client.delete(endpoint) class Node: - ENDPOINT_NO_ID = '/ietf-network:networks/network[network-id="{:s}"]' - ENDPOINT_ID = ENDPOINT_NO_ID + '/node[node-id="{:s}"]' + ENDPOINT_NO_ID = '/ietf-network:networks/network={:s}' + ENDPOINT_ID = ENDPOINT_NO_ID + '/node={:s}' def __init__(self, restconf_client : RestConfClient, network_id : str, node_id : str): self._restconf_client = restconf_client @@ -120,8 +120,8 @@ class Node: self._restconf_client.delete(endpoint) class Link: - ENDPOINT_NO_ID = '/ietf-network:networks/network[network-id="{:s}"]' - ENDPOINT_ID = ENDPOINT_NO_ID + '/ietf-network-topology:link[link-id="{:s}"]' + ENDPOINT_NO_ID = '/ietf-network:networks/network={:s}' + ENDPOINT_ID = ENDPOINT_NO_ID + '/ietf-network-topology:link={:s}' def __init__(self, restconf_client : RestConfClient, network_id : str, link_id : str): self._restconf_client = restconf_client @@ -172,7 +172,7 @@ class Link: class Network: ENDPOINT_NO_ID = '/ietf-network:networks' - ENDPOINT_ID = ENDPOINT_NO_ID + '/network[network-id="{:s}"]' + ENDPOINT_ID = ENDPOINT_NO_ID + '/network={:s}' def __init__(self, restconf_client : RestConfClient, network_id : str): self._restconf_client = restconf_client diff --git a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/SimapUpdater.py b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/SimapUpdater.py new file mode 100644 index 000000000..4d8cff662 --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/SimapUpdater.py @@ -0,0 +1,64 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging, os +from typing import Dict +from common.tools.rest_conf.client.RestConfClient import RestConfClient +from .SimapClient import SimapClient + + +SIMAP_ADDRESS = os.environ.get('SIMAP_ADDRESS') +SIMAP_PORT = os.environ.get('SIMAP_PORT' ) + + +class SimapUpdater: + def __init__(self): + if SIMAP_ADDRESS is None: return + if SIMAP_PORT is None: return + + self._restconf_client = RestConfClient( + SIMAP_ADDRESS, port=SIMAP_PORT, + logger=logging.getLogger('RestConfClient') + ) + self._simap_client = SimapClient(self._restconf_client) + + + def upload_topology(self, network_data : Dict) -> None: + network_id = network_data['network-id'] + te_topo = self._simap_client.network(network_id) + te_topo.update() + + nodes = network_data.get('node', list()) + for node in nodes: + node_id = node['node-id'] + tp_ids = [ + tp['tp-id'] + for tp in node['ietf-network-topology:termination-point'] + ] + te_topo.node(node_id).create(termination_point_ids=tp_ids) + + links = network_data.get('ietf-network-topology:link', list()) + for link in links: + link_id = link['link-id'] + link_src = link['source'] + link_dst = link['destination'] + link_src_node_id = link_src['source-node'] + link_src_tp_id = link_src['source-tp'] + link_dst_node_id = link_dst['dest-node'] + link_dst_tp_id = link_dst['dest-tp'] + + te_topo.link(link_id).create( + link_src_node_id, link_src_tp_id, link_dst_node_id, link_dst_tp_id + ) diff --git a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/YangHandler.py b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/YangHandler.py deleted file mode 100644 index 9df57528f..000000000 --- a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/YangHandler.py +++ /dev/null @@ -1,226 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import json, libyang, logging -import urllib.parse -from typing import Dict, List, Optional, Set - - -LOGGER = logging.getLogger(__name__) - - -def walk_schema(node : libyang.SNode, path : str = '') -> Set[str]: - current_path = f'{path}/{node.name()}' - schema_paths : Set[str] = {current_path} - for child in node.children(): - if isinstance(child, (libyang.SLeaf, libyang.SLeafList)): continue - schema_paths.update(walk_schema(child, current_path)) - return schema_paths - -def extract_schema_paths(yang_module : libyang.Module) -> Set[str]: - schema_paths : Set[str] = set() - for node in yang_module.children(): - schema_paths.update(walk_schema(node)) - return schema_paths - -class YangHandler: - def __init__( - self, yang_search_path : str, yang_module_names : List[str], - yang_startup_data : Dict - ) -> None: - self._yang_context = libyang.Context(yang_search_path) - self._loaded_modules : Set[str] = set() - self._schema_paths : Set[str] = set() - for yang_module_name in yang_module_names: - LOGGER.info('Loading module: {:s}'.format(str(yang_module_name))) - yang_module = self._yang_context.load_module(yang_module_name) - yang_module.feature_enable_all() - self._loaded_modules.add(yang_module_name) - self._schema_paths.update(extract_schema_paths(yang_module)) - - self._datastore = self._yang_context.parse_data_mem( - json.dumps(yang_startup_data), fmt='json' - ) - - def destroy(self) -> None: - self._yang_context.destroy() - - def get_schema_paths(self) -> Set[str]: - return self._schema_paths - - def get(self, path : str) -> Optional[str]: - path = self._normalize_path(path) - data = self._datastore.find_path(path) - if data is None: return None - json_data = data.print_mem( - fmt='json', with_siblings=False, pretty=True, - keep_empty_containers=False, include_implicit_defaults=True - ) - return json_data - - def get_xpath(self, xpath : str) -> List[str]: - if not xpath.startswith('/'): xpath = '/' + xpath - items = self._datastore.find_all(xpath) - result = list() - for item in items: - result.append(item.print_mem( - fmt='json', with_siblings=False, pretty=True, - keep_empty_containers=False, include_implicit_defaults=True - )) - return result - - def create(self, path : str, payload : Dict) -> str: - path = self._normalize_path(path) - # TODO: client should not provide identifier of element to be created, add it to subpath - dnode_parsed : Optional[libyang.DNode] = self._yang_context.parse_data_mem( - json.dumps(payload), 'json', strict=True, parse_only=False, - validate_present=True, validate_multi_error=True - ) - if dnode_parsed is None: raise Exception('Unable to parse Data({:s})'.format(str(payload))) - - dnode : Optional[libyang.DNode] = self._yang_context.create_data_path( - path, parent=self._datastore, value=dnode_parsed, update=False - ) - self._datastore.merge(dnode_parsed, with_siblings=True, defaults=True) - - json_data = dnode.print_mem( - fmt='json', with_siblings=True, pretty=True, - keep_empty_containers=True, include_implicit_defaults=True - ) - return json_data - - def update(self, path : str, payload : Dict) -> str: - path = self._normalize_path(path) - # NOTE: client should provide identifier of element to be updated - dnode_parsed : Optional[libyang.DNode] = self._yang_context.parse_data_mem( - json.dumps(payload), 'json', strict=True, parse_only=False, - validate_present=True, validate_multi_error=True - ) - if dnode_parsed is None: raise Exception('Unable to parse Data({:s})'.format(str(payload))) - - dnode = self._yang_context.create_data_path( - path, parent=self._datastore, value=dnode_parsed, update=True - ) - self._datastore.merge(dnode_parsed, with_siblings=True, defaults=True) - - json_data = dnode.print_mem( - fmt='json', with_siblings=True, pretty=True, - keep_empty_containers=True, include_implicit_defaults=True - ) - return json_data - - def delete(self, path : str) -> Optional[str]: - path = self._normalize_path(path) - - # NOTE: client should provide identifier of element to be deleted - - node : libyang.DNode = self._datastore.find_path(path) - if node is None: return None - - LOGGER.info('node = {:s}'.format(str(node))) - json_data = str(node.print_mem( - fmt='json', with_siblings=True, pretty=True, - keep_empty_containers=True, include_implicit_defaults=True - )) - LOGGER.info('json_data = {:s}'.format(json_data)) - - node.unlink() - node.free() - - return json_data - - def _normalize_path(self, path : str) -> str: - """ - Normalize RESTCONF path segments using the standard `list=` - syntax into the libyang bracketed predicate form expected by - the datastore (e.g. `network="admin"` -> `network[network-id="admin"]`). - - This implementation looks up the schema node for the list and - uses its key leaf names to build the proper predicates. If the - schema information is unavailable, it falls back to using the - list name as the key name. - """ - - # URL-decode each path segment so escaped characters like `%22` - # (double quotes) are properly handled when parsing list keys. - parts = [urllib.parse.unquote(p) for p in path.strip('/').split('/') if p != ''] - schema_path = '' - out_parts: List[str] = [] - - for part in parts: - if '=' in part: - # split into name and value (value may contain commas/quotes) - name, val = part.split('=', 1) - # keep original name (may include prefix) for output, but - # use local name (without module prefix) to lookup schema - local_name = name.split(':', 1)[1] if ':' in name else name - schema_path = schema_path + '/' + local_name if schema_path else '/' + local_name - schema_nodes = list(self._yang_context.find_path(schema_path)) - if len(schema_nodes) != 1: - MSG = 'No/Multiple SchemaNodes({:s}) for SchemaPath({:s})' - raise Exception(MSG.format( - str([repr(sn) for sn in schema_nodes]), schema_path - )) - schema_node = schema_nodes[0] - - # parse values splitting on commas outside quotes - values = [] - cur = '' - in_quotes = False - for ch in val: - if ch == '"': - in_quotes = not in_quotes - cur += ch - elif ch == ',' and not in_quotes: - values.append(cur) - cur = '' - else: - cur += ch - if cur != '': - values.append(cur) - - # determine key names from schema_node if possible - key_names = None - if isinstance(schema_node, libyang.SList): - key_names = [k.name() for k in schema_node.keys()] - #if isinstance(keys, (list, tuple)): - # key_names = keys - #elif isinstance(keys, str): - # key_names = [kn for kn in k.split() if kn] - #else: - # MSG = 'Unsupported keys format: {:s} / {:s}' - # raise Exception(MSG.format(str(type(keys)), str(keys))) - #elif hasattr(schema_node, 'key'): - # k = schema_node.key() - # if isinstance(k, str): - # key_names = [kn for kn in k.split() if kn] - - if not key_names: - # fallback: use the local list name as the single key - key_names = [local_name] - - # build predicate(s) - preds = [] - for idx, kn in enumerate(key_names): - kv = values[idx] if idx < len(values) else values[0] - preds.append(f'[{kn}="{kv}"]') - - out_parts.append(name + ''.join(preds)) - else: - local_part = part.split(':', 1)[1] if ':' in part else part - schema_path = schema_path + '/' + local_part if schema_path else '/' + local_part - out_parts.append(part) - - return '/' + '/'.join(out_parts) diff --git a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/YangModelDiscoverer.py b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/YangModelDiscoverer.py deleted file mode 100644 index f31305280..000000000 --- a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/YangModelDiscoverer.py +++ /dev/null @@ -1,195 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import logging, re -from collections import defaultdict -from graphlib import TopologicalSorter, CycleError -from pathlib import Path -from typing import Dict, List, Optional, Set, Tuple - - -COMMENT_SINGLE_RE = re.compile(r"//.*?$", re.MULTILINE) -COMMENT_MULTI_RE = re.compile(r"/\*.*?\*/", re.DOTALL) - -# module / submodule name -MODNAME_RE = re.compile(r"\b(module|submodule)\s+([A-Za-z0-9_.-]+)\s*\{") - -# import foo { ... } (most common form) -IMPORT_BLOCK_RE = re.compile(r"\bimport\s+([A-Za-z0-9_.-]+)\s*\{", re.IGNORECASE) - -# import foo; (very rare, but we’ll support it) -IMPORT_SEMI_RE = re.compile(r"\bimport\s+([A-Za-z0-9_.-]+)\s*;", re.IGNORECASE) - - -def _parse_yang_file(path: Path) -> Tuple[Optional[str], Set[str]]: - path_stem = path.stem # file name without extension - expected_module_name = path_stem.split('@', 1)[0] - - try: - data = path.read_text(encoding='utf-8', errors='ignore') - except Exception: - data = path.read_bytes().decode('utf-8', errors='ignore') - - data = COMMENT_MULTI_RE.sub('', data) - data = COMMENT_SINGLE_RE.sub('', data) - - match = MODNAME_RE.search(data) - if match is None: - return None, set() - module_name = match.group(2) - if module_name != expected_module_name: - MSG = 'Module({:s}) mismatches its FileName({:s})' - raise Exception(MSG.format(str(module_name), str(expected_module_name))) - - module_imports = set() - if module_name is not None: - module_imports.update(IMPORT_BLOCK_RE.findall(data)) - module_imports.update(IMPORT_SEMI_RE.findall(data)) - - # ignore modules importing themselves, just in case - module_imports.discard(module_name) - - return module_name, module_imports - - -class YangModuleDiscoverer: - def __init__(self, yang_search_path : str) -> None: - self._yang_search_path = yang_search_path - - self._module_to_paths : Dict[str, List[Path]] = defaultdict(list) - self._module_to_imports : Dict[str, Set[str]] = defaultdict(set) - self._ordered_module_names : Optional[List[str]] = None - - - def run( - self, do_print_order : bool = False, do_log_order : bool = False, - logger : Optional[logging.Logger] = None, level : int = logging.INFO - ) -> List[str]: - if self._ordered_module_names is None: - self._scan_modules() - self._sort_modules() - - if do_print_order: - self.print_order() - - if do_log_order: - if logger is None: logger = logging.getLogger(__name__) - self.log_order(logger, level=level) - - return self._ordered_module_names - - def _scan_modules(self) -> None: - yang_root = Path(self._yang_search_path).resolve() - if not yang_root.exists(): - MSG = 'Path({:s}) not found' - raise Exception(MSG.format(str(self._yang_search_path))) - - for yang_path in yang_root.rglob('*.yang'): - module_name, module_imports = _parse_yang_file(yang_path) - if module_name is None: continue - self._module_to_paths[module_name].append(yang_path) - self._module_to_imports[module_name] = module_imports - - if len(self._module_to_paths) == 0: - MSG = 'No modules found in Path({:s})' - raise Exception(MSG.format(str(self._yang_search_path))) - - self._check_duplicated_module_declaration() - self._check_missing_modules() - - - def _check_duplicated_module_declaration(self) -> None: - duplicate_module_declarations : List[str] = list() - for module_name, paths in self._module_to_paths.items(): - if len(paths) == 1: continue - str_paths = [str(p) for p in paths] - duplicate_module_declarations.append( - ' {:s} => {:s}'.format(module_name, str_paths) - ) - - if len(duplicate_module_declarations) > 0: - MSG = 'Duplicate module declarations:\n{:s}' - str_dup_mods = '\n'.join(duplicate_module_declarations) - raise Exception(MSG.format(str_dup_mods)) - - - def _check_missing_modules(self) -> None: - local_module_names = set(self._module_to_imports.keys()) - missing_modules : List[str] = list() - for module_name, imported_modules in self._module_to_imports.items(): - missing = imported_modules.difference(local_module_names) - if len(missing) == 0: continue - missing_modules.append( - ' {:s} => {:s}'.format(module_name, str(missing)) - ) - - if len(missing_modules) > 0: - MSG = 'Missing modules:\n{:s}' - str_mis_mods = '\n'.join(missing_modules) - raise Exception(MSG.format(str_mis_mods)) - - - def _sort_modules(self) -> None: - ts = TopologicalSorter() - for module_name, imported_modules in self._module_to_imports.items(): - ts.add(module_name, *imported_modules) - - try: - self._ordered_module_names = list(ts.static_order()) # raises CycleError on cycles - except CycleError as e: - cycle = list(dict.fromkeys(e.args[1])) # de-dup while preserving order - MSG = 'Circular dependencies between modules: {:s}' - raise Exception(MSG.format(str(cycle))) # pylint: disable=raise-missing-from - - - def dump_order(self) -> List[Tuple[int, str, List[str]]]: - if self._ordered_module_names is None: - raise Exception('First process the YANG Modules running method .run()') - - module_order : List[Tuple[int, str, List[str]]] = list() - for i, module_name in enumerate(self._ordered_module_names, 1): - module_imports = sorted(self._module_to_imports[module_name]) - module_order.append((i, module_name, module_imports)) - - return module_order - - - def print_order(self) -> None: - print('Ordered Modules:') - for i, module_name, module_imports in self.dump_order(): - MSG = '{:2d} : {:s} => {:s}' - print(MSG.format(i, module_name, str(module_imports))) - - - def log_order(self, logger : logging.Logger, level : int = logging.INFO) -> None: - logger.log(level, 'Ordered Modules:') - for i, module_name, module_imports in self.dump_order(): - MSG = '{:2d} : {:s} => {:s}' - logger.log(level, MSG.format(i, module_name, str(module_imports))) - - -def main() -> None: - logging.basicConfig(level=logging.INFO) - - ymd = YangModuleDiscoverer('./yang') - ordered_module_names = ymd.run( - do_print_order=True, - do_log_order=True - ) - print('ordered_module_names', ordered_module_names) - - -if __name__ == '__main__': - main() diff --git a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/app.py b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/app.py index 654971fd2..2bd587ce8 100644 --- a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/app.py +++ b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/app.py @@ -13,15 +13,12 @@ # limitations under the License. -import json, logging, os, secrets -from flask import Flask -from flask_restful import Api -from .Dispatch import RestConfDispatch -from .HostMeta import HostMeta -from .YangHandler import YangHandler -from .YangModelDiscoverer import YangModuleDiscoverer -from .simap_client.RestConfClient import RestConfClient -from .simap_client.SimapClient import SimapClient +# This file overwrites default RestConf Server `app.py` file. + + +import logging +from common.tools.rest_conf.server.restconf_server.RestConfServerApplication import RestConfServerApplication +from .SimapUpdater import SimapUpdater logging.basicConfig( @@ -32,85 +29,19 @@ LOGGER = logging.getLogger(__name__) logging.getLogger('RestConfClient').setLevel(logging.WARN) -RESTCONF_PREFIX = '/restconf' -SECRET_KEY = secrets.token_hex(64) - - -YANG_SEARCH_PATH = './yang' - -ymd = YangModuleDiscoverer(YANG_SEARCH_PATH) -YANG_MODULE_NAMES = ymd.run(do_log_order=True) - -STARTUP_FILE = './startup.json' -with open(STARTUP_FILE, mode='r', encoding='UTF-8') as fp: - YANG_STARTUP_DATA = json.loads(fp.read()) - - -SIMAP_ADDRESS = os.environ.get('SIMAP_ADDRESS') -SIMAP_PORT = os.environ.get('SIMAP_PORT' ) - -if SIMAP_ADDRESS is not None and SIMAP_PORT is not None: - restconf_client = RestConfClient( - SIMAP_ADDRESS, port=SIMAP_PORT, - logger=logging.getLogger('RestConfClient') - ) - simap_client = SimapClient(restconf_client) +LOGGER.info('Starting...') +rcs_app = RestConfServerApplication() +LOGGER.info('All connectors registered') - te_topo = simap_client.network('admin') - te_topo.update() +startup_data = rcs_app.get_startup_data() - networks = YANG_STARTUP_DATA.get('ietf-network:networks', dict()) - networks = networks.get('network', list()) - assert len(networks) == 1 - network = networks[0] - assert network['network-id'] == 'admin' +networks = startup_data.get('ietf-network:networks', dict()) +networks = networks.get('network', list()) +if len(networks) == 1 and networks[0]['network-id'] == 'admin': + simap_updater = SimapUpdater() + simap_updater.upload_topology(networks[0]) - nodes = network.get('node', list()) - for node in nodes: - node_id = node['node-id'] - tp_ids = [ - tp['tp-id'] - for tp in node['ietf-network-topology:termination-point'] - ] - te_topo.node(node_id).create(termination_point_ids=tp_ids) - - links = network.get('ietf-network-topology:link', list()) - for link in links: - link_id = link['link-id'] - link_src = link['source'] - link_dst = link['destination'] - link_src_node_id = link_src['source-node'] - link_src_tp_id = link_src['source-tp'] - link_dst_node_id = link_dst['dest-node'] - link_dst_tp_id = link_dst['dest-tp'] - - te_topo.link(link_id).create( - link_src_node_id, link_src_tp_id, link_dst_node_id, link_dst_tp_id - ) - - -yang_handler = YangHandler( - YANG_SEARCH_PATH, YANG_MODULE_NAMES, YANG_STARTUP_DATA -) -restconf_paths = yang_handler.get_schema_paths() - -app = Flask(__name__) -app.config['SECRET_KEY'] = SECRET_KEY - -api = Api(app) -api.add_resource( - HostMeta, - '/.well-known/host-meta', - resource_class_args=(RESTCONF_PREFIX,) -) -api.add_resource( - RestConfDispatch, - RESTCONF_PREFIX + '/data', - RESTCONF_PREFIX + '/data/', - RESTCONF_PREFIX + '/data/', - resource_class_args=(yang_handler,) -) +rcs_app.dump_configuration() +app = rcs_app.get_flask_app() -LOGGER.info('Available RESTCONF paths:') -for restconf_path in sorted(restconf_paths): - LOGGER.info('- {:s}'.format(str(restconf_path))) +LOGGER.info('Initialization completed!') diff --git a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/simap_client/RestConfClient.py b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/simap_client/RestConfClient.py deleted file mode 100644 index b7c057a70..000000000 --- a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/simap_client/RestConfClient.py +++ /dev/null @@ -1,191 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import enum, logging, requests -from requests.auth import HTTPBasicAuth -from typing import Any, Dict, Optional, Set - -class RestRequestMethod(enum.Enum): - GET = 'get' - POST = 'post' - PUT = 'put' - PATCH = 'patch' - DELETE = 'delete' - -EXPECTED_STATUS_CODES : Set[int] = { - requests.codes['OK' ], # 200 - OK - requests.codes['CREATED' ], # 201 - Created - requests.codes['ACCEPTED' ], # 202 - Accepted - requests.codes['NO_CONTENT'], # 204 - No Content -} - -def compose_basic_auth( - username : Optional[str] = None, password : Optional[str] = None -) -> Optional[HTTPBasicAuth]: - if username is None or password is None: return None - return HTTPBasicAuth(username, password) - -class SchemeEnum(enum.Enum): - HTTP = 'http' - HTTPS = 'https' - -def check_scheme(scheme : str) -> str: - str_scheme = str(scheme).lower() - enm_scheme = SchemeEnum._value2member_map_[str_scheme] - return enm_scheme.value - -HOST_META_URL = '{:s}://{:s}:{:d}/.well-known/host-meta' -RESTCONF_URL = '{:s}://{:s}:{:d}/{:s}' - -class RestConfClient: - def __init__( - self, address : str, port : int = 8080, scheme : str = 'http', - username : Optional[str] = None, password : Optional[str] = None, - timeout : int = 10, verify_certs : bool = True, allow_redirects : bool = True, - logger : Optional[logging.Logger] = None - ) -> None: - self._address = address - self._port = int(port) - self._scheme = check_scheme(scheme) - self._auth = compose_basic_auth(username=username, password=password) - self._base_url = '' - self._timeout = int(timeout) - self._verify_certs = verify_certs - self._allow_redirects = allow_redirects - self._logger = logger - - self._discover_base_url() - - def _discover_base_url(self) -> None: - host_meta_url = HOST_META_URL.format(self._scheme, self._address, self._port) - host_meta : Dict = self.get(host_meta_url, expected_status_codes={requests.codes['OK']}) - - links = host_meta.get('links') - if links is None: raise AttributeError('Missing attribute "links" in host-meta reply') - if not isinstance(links, list): raise AttributeError('Attribute "links" must be a list') - if len(links) != 1: raise AttributeError('Attribute "links" is expected to have exactly 1 item') - - link = links[0] - if not isinstance(link, dict): raise AttributeError('Attribute "links[0]" must be a dict') - - rel = link.get('rel') - if rel is None: raise AttributeError('Missing attribute "links[0].rel" in host-meta reply') - if not isinstance(rel, str): raise AttributeError('Attribute "links[0].rel" must be a str') - if rel != 'restconf': raise AttributeError('Attribute "links[0].rel" != "restconf"') - - href = link.get('href') - if href is None: raise AttributeError('Missing attribute "links[0]" in host-meta reply') - if not isinstance(href, str): raise AttributeError('Attribute "links[0].href" must be a str') - - self._base_url = str(href + '/data').replace('//', '/') - - def _log_msg_request( - self, method : RestRequestMethod, request_url : str, body : Optional[Any], - log_level : int = logging.INFO - ) -> str: - msg = 'Request: {:s} {:s}'.format(str(method.value).upper(), str(request_url)) - if body is not None: msg += ' body={:s}'.format(str(body)) - if self._logger is not None: self._logger.log(log_level, msg) - return msg - - def _log_msg_check_reply( - self, method : RestRequestMethod, request_url : str, body : Optional[Any], - reply : requests.Response, expected_status_codes : Set[int], - log_level : int = logging.INFO - ) -> str: - msg = 'Reply: {:s}'.format(str(reply.text)) - if self._logger is not None: self._logger.log(log_level, msg) - http_status_code = reply.status_code - if http_status_code in expected_status_codes: return msg - MSG = 'Request failed. method={:s} url={:s} body={:s} status_code={:s} reply={:s}' - msg = MSG.format( - str(method.value).upper(), str(request_url), str(body), - str(http_status_code), str(reply.text) - ) - self._logger.error(msg) - raise Exception(msg) - - def _do_rest_request( - self, method : RestRequestMethod, endpoint : str, body : Optional[Any] = None, - expected_status_codes : Set[int] = EXPECTED_STATUS_CODES - ) -> Optional[Any]: - candidate_schemes = tuple(['{:s}://'.format(m).lower() for m in SchemeEnum.__members__.keys()]) - if endpoint.lower().startswith(candidate_schemes): - request_url = endpoint.lstrip('/') - else: - endpoint = str(self._base_url + '/' + endpoint).replace('//', '/').lstrip('/') - request_url = '{:s}://{:s}:{:d}/{:s}'.format( - self._scheme, self._address, self._port, endpoint.lstrip('/') - ) - self._log_msg_request(method, request_url, body) - try: - headers = {'accept': 'application/json'} - reply = requests.request( - method.value, request_url, headers=headers, json=body, - auth=self._auth, verify=self._verify_certs, timeout=self._timeout, - allow_redirects=self._allow_redirects - ) - except Exception as e: - MSG = 'Request failed. method={:s} url={:s} body={:s}' - msg = MSG.format(str(method.value).upper(), request_url, str(body)) - self._logger.exception(msg) - raise Exception(msg) from e - self._log_msg_check_reply(method, request_url, body, reply, expected_status_codes) - if reply.content and len(reply.content) > 0: return reply.json() - return None - - def get( - self, endpoint : str, - expected_status_codes : Set[int] = {requests.codes['OK']} - ) -> Optional[Any]: - return self._do_rest_request( - RestRequestMethod.GET, endpoint, - expected_status_codes=expected_status_codes - ) - - def post( - self, endpoint : str, body : Optional[Any] = None, - expected_status_codes : Set[int] = {requests.codes['CREATED']} - ) -> Optional[Any]: - return self._do_rest_request( - RestRequestMethod.POST, endpoint, body=body, - expected_status_codes=expected_status_codes - ) - - def put( - self, endpoint : str, body : Optional[Any] = None, - expected_status_codes : Set[int] = {requests.codes['CREATED'], requests.codes['NO_CONTENT']} - ) -> Optional[Any]: - return self._do_rest_request( - RestRequestMethod.PUT, endpoint, body=body, - expected_status_codes=expected_status_codes - ) - - def patch( - self, endpoint : str, body : Optional[Any] = None, - expected_status_codes : Set[int] = {requests.codes['NO_CONTENT']} - ) -> Optional[Any]: - return self._do_rest_request( - RestRequestMethod.PATCH, endpoint, body=body, - expected_status_codes=expected_status_codes - ) - - def delete( - self, endpoint : str, body : Optional[Any] = None, - expected_status_codes : Set[int] = {requests.codes['NO_CONTENT']} - ) -> Optional[Any]: - return self._do_rest_request( - RestRequestMethod.DELETE, endpoint, body=body, - expected_status_codes=expected_status_codes - ) diff --git a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/simap_client/__init__.py b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/simap_client/__init__.py deleted file mode 100644 index 3ccc21c7d..000000000 --- a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/simap_client/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - diff --git a/src/tests/tools/mock_nce_fan_ctrl/requirements.in b/src/tests/tools/mock_nce_fan_ctrl/requirements.in deleted file mode 100644 index 17155ed58..000000000 --- a/src/tests/tools/mock_nce_fan_ctrl/requirements.in +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -cryptography==39.0.1 -eventlet==0.39.0 -Flask-HTTPAuth==4.5.0 -Flask-RESTful==0.3.9 -Flask==2.1.3 -gunicorn==23.0.0 -jsonschema==4.4.0 -libyang==2.8.4 -pyopenssl==23.0.0 -requests==2.27.1 -werkzeug==2.3.7 diff --git a/src/tests/tools/mock_nce_fan_ctrl/run_ctrl_gunicorn.sh b/src/tests/tools/mock_nce_fan_ctrl/run_ctrl_gunicorn.sh deleted file mode 100755 index 78fe25b9e..000000000 --- a/src/tests/tools/mock_nce_fan_ctrl/run_ctrl_gunicorn.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Make folder containing the script the root folder for its execution -cd $(dirname $0) - -export FLASK_ENV=development -gunicorn -w 1 --worker-class eventlet -b 0.0.0.0:8080 --log-level DEBUG nce_fan_ctrl.app:app diff --git a/src/tests/tools/mock_nce_fan_ctrl/run_ctrl_standalone.sh b/src/tests/tools/mock_nce_fan_ctrl/run_ctrl_standalone.sh deleted file mode 100755 index 06432851f..000000000 --- a/src/tests/tools/mock_nce_fan_ctrl/run_ctrl_standalone.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Make folder containing the script the root folder for its execution -cd $(dirname $0) - -python -m nce_fan_ctrl diff --git a/src/tests/tools/mock_nce_t_ctrl/Dockerfile b/src/tests/tools/mock_nce_t_ctrl/Dockerfile index 3c26ed5af..a33510928 100644 --- a/src/tests/tools/mock_nce_t_ctrl/Dockerfile +++ b/src/tests/tools/mock_nce_t_ctrl/Dockerfile @@ -45,18 +45,24 @@ RUN python3 -m pip install --upgrade pip-tools # Create component sub-folders, get specific Python packages RUN mkdir -p /var/teraflow/nce_t_ctrl/ WORKDIR /var/teraflow/nce_t_ctrl/ -COPY ./requirements.in ./requirements.in +COPY src/common/tools/rest_conf/server/requirements.in ./requirements.in RUN pip-compile --quiet --output-file=requirements.txt requirements.in RUN python3 -m pip install -r requirements.txt # Add component files into working directory -COPY ./yang/. ./yang/ -COPY ./nce_t_ctrl/*.py ./nce_t_ctrl/ -COPY ./nce_t_ctrl/simap_client/*.py ./nce_t_ctrl/simap_client/ -COPY ./startup.json ./startup.json +COPY src/common/tools/rest_conf/server/restconf_server/ ./nce_t_ctrl/ +COPY src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/*.py ./nce_t_ctrl/ +COPY src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/simap_client/*.py ./nce_t_ctrl/simap_client/ +COPY src/tests/tools/mock_nce_t_ctrl/yang/. ./yang/ +COPY src/tests/tools/mock_nce_t_ctrl/startup.json ./startup.json + +# Configure RESTCONF Server +ENV RESTCONF_PREFIX="/restconf" +ENV YANG_SEARCH_PATH="./yang" +ENV STARTUP_FILE="./startup.json" # Configure Flask for production -ENV FLASK_ENV=production +ENV FLASK_ENV="production" # Start the service ENTRYPOINT ["gunicorn", "--workers", "1", "--worker-class", "eventlet", "--bind", "0.0.0.0:8080", "nce_t_ctrl.app:app"] diff --git a/src/tests/tools/mock_nce_t_ctrl/README.md b/src/tests/tools/mock_nce_t_ctrl/README.md index 8d5a4dfbc..e6877a94b 100644 --- a/src/tests/tools/mock_nce_t_ctrl/README.md +++ b/src/tests/tools/mock_nce_t_ctrl/README.md @@ -7,17 +7,17 @@ In this case, it is prepared to load a NCE-T Controller based on: - IETF YANG Data Model for Traffic Engineering Tunnels, Label Switched Paths and Interfaces -## Build the Docker image +## Build the RESTCONF-based NCE-T Controller Docker image ```bash ./build.sh ``` -## Deploy the Controller +## Deploy the RESTCONF-based NCE-T Controller ```bash ./deploy.sh ``` -## Destroy the Controller +## Destroy the RESTCONF-based NCE-T Controller ```bash ./destroy.sh ``` diff --git a/src/tests/tools/mock_nce_t_ctrl/build.sh b/src/tests/tools/mock_nce_t_ctrl/build.sh index 16b8903bb..d02c0fc43 100755 --- a/src/tests/tools/mock_nce_t_ctrl/build.sh +++ b/src/tests/tools/mock_nce_t_ctrl/build.sh @@ -14,8 +14,9 @@ # limitations under the License. # Make folder containing the script the root folder for its execution -cd $(dirname $0) +cd $(dirname $0)/../../../../ -docker buildx build -t nce-t-ctrl:test -f Dockerfile . +# Build image for NCE-T Controller +docker buildx build -t nce-t-ctrl:test -f ./src/tests/tools/mock_nce_t_ctrl/Dockerfile . #docker tag nce-t-ctrl:test localhost:32000/tfs/nce-t-ctrl:test #docker push localhost:32000/tfs/nce-t-ctrl:test diff --git a/src/tests/tools/mock_nce_t_ctrl/deploy.sh b/src/tests/tools/mock_nce_t_ctrl/deploy.sh index b4dbfc7a6..e1d36506e 100755 --- a/src/tests/tools/mock_nce_t_ctrl/deploy.sh +++ b/src/tests/tools/mock_nce_t_ctrl/deploy.sh @@ -13,15 +13,20 @@ # See the License for the specific language governing permissions and # limitations under the License. + # Cleanup docker rm --force nce-t-ctrl + # Create NCE-T Controller docker run --detach --name nce-t-ctrl --publish 8080:8080 nce-t-ctrl:test + sleep 2 -# Dump Docker containers + +# Dump NCE-T Controller container docker ps -a + echo "Bye!" diff --git a/src/tests/tools/mock_nce_t_ctrl/destroy.sh b/src/tests/tools/mock_nce_t_ctrl/destroy.sh index 726535128..44ee8703c 100755 --- a/src/tests/tools/mock_nce_t_ctrl/destroy.sh +++ b/src/tests/tools/mock_nce_t_ctrl/destroy.sh @@ -13,10 +13,13 @@ # See the License for the specific language governing permissions and # limitations under the License. + # Cleanup docker rm --force nce-t-ctrl + # Dump Docker containers docker ps -a + echo "Bye!" diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/Dispatch.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/Dispatch.py deleted file mode 100644 index 319aa9f7b..000000000 --- a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/Dispatch.py +++ /dev/null @@ -1,148 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import json, logging -from flask import Response, abort, jsonify, request -from flask_restful import Resource -from .HttpStatusCodesEnum import HttpStatusCodesEnum -from .YangHandler import YangHandler - -LOGGER = logging.getLogger(__name__) - -class RestConfDispatch(Resource): - def __init__(self, yang_handler : YangHandler) -> None: - super().__init__() - self._yang_handler = yang_handler - - def get(self, subpath : str = '/') -> Response: - data = self._yang_handler.get(subpath) - if data is None: - abort( - HttpStatusCodesEnum.CLI_ERR_NOT_FOUND.value, - description='Path({:s}) not found'.format(str(subpath)) - ) - - LOGGER.info('[GET] {:s} => {:s}'.format(subpath, str(data))) - - response = jsonify(json.loads(data)) - response.status_code = HttpStatusCodesEnum.SUCCESS_OK.value - return response - - def post(self, subpath : str) -> Response: - # TODO: client should not provide identifier of element to be created, add it to subpath - try: - payload = request.get_json(force=True) - except Exception: - LOGGER.exception('Invalid JSON') - abort(HttpStatusCodesEnum.CLI_ERR_BAD_REQUEST.value, desctiption='Invalid JSON') - - data = self._yang_handler.get(subpath) - if data is not None: - abort( - HttpStatusCodesEnum.CLI_ERR_CONFLICT.value, - description='Path({:s}) already exists'.format(str(subpath)) - ) - - try: - json_data = self._yang_handler.create(subpath, payload) - except Exception as e: - LOGGER.exception('Create failed') - abort( - HttpStatusCodesEnum.CLI_ERR_NOT_ACCEPTABLE.value, - description=str(e) - ) - - LOGGER.info('[POST] {:s} {:s} => {:s}'.format(subpath, str(payload), str(json_data))) - - response = jsonify({'status': 'created'}) - response.status_code = HttpStatusCodesEnum.SUCCESS_CREATED.value - return response - - def put(self, subpath : str) -> Response: - # NOTE: client should provide identifier of element to be created/replaced - try: - payload = request.get_json(force=True) - except Exception: - LOGGER.exception('Invalid JSON') - abort(HttpStatusCodesEnum.CLI_ERR_BAD_REQUEST.value, desctiption='Invalid JSON') - - try: - json_data = self._yang_handler.update(subpath, payload) - except Exception as e: - LOGGER.exception('Update failed') - abort( - HttpStatusCodesEnum.CLI_ERR_NOT_ACCEPTABLE.value, - description=str(e) - ) - - LOGGER.info('[PUT] {:s} {:s} => {:s}'.format(subpath, str(payload), str(json_data))) - updated = False # TODO: compute if create or update - - response = jsonify({'status': ( - 'updated' if updated else 'created' - )}) - response.status_code = ( - HttpStatusCodesEnum.SUCCESS_NO_CONTENT.value - if updated else - HttpStatusCodesEnum.SUCCESS_CREATED.value - ) - return response - - def patch(self, subpath : str) -> Response: - # NOTE: client should provide identifier of element to be patched - try: - payload = request.get_json(force=True) - except Exception: - LOGGER.exception('Invalid JSON') - abort(HttpStatusCodesEnum.CLI_ERR_BAD_REQUEST.value, desctiption='Invalid JSON') - - try: - json_data = self._yang_handler.update(subpath, payload) - except Exception as e: - LOGGER.exception('Update failed') - abort( - HttpStatusCodesEnum.CLI_ERR_NOT_ACCEPTABLE.value, - description=str(e) - ) - - LOGGER.info('[PATCH] {:s} {:s} => {:s}'.format(subpath, str(payload), str(json_data))) - - response = jsonify({'status': 'patched'}) - response.status_code = HttpStatusCodesEnum.SUCCESS_NO_CONTENT.value - return response - - def delete(self, subpath : str) -> Response: - # NOTE: client should provide identifier of element to be patched - - try: - deleted_node = self._yang_handler.delete(subpath) - except Exception as e: - LOGGER.exception('Delete failed') - abort( - HttpStatusCodesEnum.CLI_ERR_NOT_ACCEPTABLE.value, - description=str(e) - ) - - LOGGER.info('[DELETE] {:s} => {:s}'.format(subpath, str(deleted_node))) - - if deleted_node is None: - abort( - HttpStatusCodesEnum.CLI_ERR_NOT_FOUND.value, - description='Path({:s}) not found'.format(str(subpath)) - ) - - response = jsonify({}) - response.status_code = HttpStatusCodesEnum.SUCCESS_NO_CONTENT.value - return response diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/HostMeta.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/HostMeta.py deleted file mode 100644 index 95ef34b19..000000000 --- a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/HostMeta.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import xml.etree.ElementTree as ET -from flask import abort, jsonify, make_response, request -from flask_restful import Resource -from .HttpStatusCodesEnum import HttpStatusCodesEnum - -XRD_NS = 'http://docs.oasis-open.org/ns/xri/xrd-1.0' -ET.register_namespace('', XRD_NS) - -class HostMeta(Resource): - def __init__(self, restconf_prefix : str) -> None: - super().__init__() - self._restconf_prefix = restconf_prefix - - def get(self): - best = request.accept_mimetypes.best_match([ - 'application/xrd+xml', 'application/json' - ], default='application/xrd+xml') - - if best == 'application/xrd+xml': - xrd = ET.Element('{{{:s}}}XRD'.format(str(XRD_NS))) - ET.SubElement(xrd, '{{{:s}}}Link'.format(str(XRD_NS)), attrib={ - 'rel': 'restconf', 'href': self._restconf_prefix - }) - xml_string = ET.tostring(xrd, encoding='utf-8', xml_declaration=True).decode() - response = make_response(str(xml_string)) - response.status_code = 200 - response.content_type = best - return response - elif best == 'application/json': - response = jsonify({'links': [{'rel': 'restconf', 'href': self._restconf_prefix}]}) - response.status_code = 200 - response.content_type = best - return response - else: - abort(HttpStatusCodesEnum.CLI_ERR_NOT_ACCEPTABLE) diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/HttpStatusCodesEnum.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/HttpStatusCodesEnum.py deleted file mode 100644 index c44d135c0..000000000 --- a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/HttpStatusCodesEnum.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import enum - -class HttpStatusCodesEnum(enum.IntEnum): - SUCCESS_OK = 200 - SUCCESS_CREATED = 201 - SUCCESS_ACCEPTED = 202 - SUCCESS_NO_CONTENT = 204 - CLI_ERR_BAD_REQUEST = 400 - CLI_ERR_NOT_FOUND = 404 - CLI_ERR_NOT_ACCEPTABLE = 406 - CLI_ERR_CONFLICT = 409 - SVR_ERR_NOT_IMPLEMENTED = 501 diff --git a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/simap_client/SimapClient.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/SimapClient.py similarity index 95% rename from src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/simap_client/SimapClient.py rename to src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/SimapClient.py index b4c27d43a..8f457d452 100644 --- a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/simap_client/SimapClient.py +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/SimapClient.py @@ -14,12 +14,12 @@ from typing import Dict, List, Tuple -from .RestConfClient import RestConfClient +from common.tools.rest_conf.client.RestConfClient import RestConfClient class TerminationPoint: - ENDPOINT_NO_ID = '/ietf-network:networks/network[network-id="{:s}"]/node[node-id="{:s}"]' - ENDPOINT_ID = ENDPOINT_NO_ID + '/ietf-network-topology:termination-point[tp-id="{:s}"]' + ENDPOINT_NO_ID = '/ietf-network:networks/network={:s}/node={:s}' + ENDPOINT_ID = ENDPOINT_NO_ID + '/ietf-network-topology:termination-point={:s}' def __init__(self, restconf_client : RestConfClient, network_id : str, node_id : str, tp_id : str): self._restconf_client = restconf_client @@ -63,8 +63,8 @@ class TerminationPoint: self._restconf_client.delete(endpoint) class Node: - ENDPOINT_NO_ID = '/ietf-network:networks/network[network-id="{:s}"]' - ENDPOINT_ID = ENDPOINT_NO_ID + '/node[node-id="{:s}"]' + ENDPOINT_NO_ID = '/ietf-network:networks/network={:s}' + ENDPOINT_ID = ENDPOINT_NO_ID + '/node={:s}' def __init__(self, restconf_client : RestConfClient, network_id : str, node_id : str): self._restconf_client = restconf_client @@ -120,8 +120,8 @@ class Node: self._restconf_client.delete(endpoint) class Link: - ENDPOINT_NO_ID = '/ietf-network:networks/network[network-id="{:s}"]' - ENDPOINT_ID = ENDPOINT_NO_ID + '/ietf-network-topology:link[link-id="{:s}"]' + ENDPOINT_NO_ID = '/ietf-network:networks/network={:s}' + ENDPOINT_ID = ENDPOINT_NO_ID + '/ietf-network-topology:link={:s}' def __init__(self, restconf_client : RestConfClient, network_id : str, link_id : str): self._restconf_client = restconf_client @@ -172,7 +172,7 @@ class Link: class Network: ENDPOINT_NO_ID = '/ietf-network:networks' - ENDPOINT_ID = ENDPOINT_NO_ID + '/network[network-id="{:s}"]' + ENDPOINT_ID = ENDPOINT_NO_ID + '/network={:s}' def __init__(self, restconf_client : RestConfClient, network_id : str): self._restconf_client = restconf_client diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/SimapUpdater.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/SimapUpdater.py new file mode 100644 index 000000000..4d8cff662 --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/SimapUpdater.py @@ -0,0 +1,64 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging, os +from typing import Dict +from common.tools.rest_conf.client.RestConfClient import RestConfClient +from .SimapClient import SimapClient + + +SIMAP_ADDRESS = os.environ.get('SIMAP_ADDRESS') +SIMAP_PORT = os.environ.get('SIMAP_PORT' ) + + +class SimapUpdater: + def __init__(self): + if SIMAP_ADDRESS is None: return + if SIMAP_PORT is None: return + + self._restconf_client = RestConfClient( + SIMAP_ADDRESS, port=SIMAP_PORT, + logger=logging.getLogger('RestConfClient') + ) + self._simap_client = SimapClient(self._restconf_client) + + + def upload_topology(self, network_data : Dict) -> None: + network_id = network_data['network-id'] + te_topo = self._simap_client.network(network_id) + te_topo.update() + + nodes = network_data.get('node', list()) + for node in nodes: + node_id = node['node-id'] + tp_ids = [ + tp['tp-id'] + for tp in node['ietf-network-topology:termination-point'] + ] + te_topo.node(node_id).create(termination_point_ids=tp_ids) + + links = network_data.get('ietf-network-topology:link', list()) + for link in links: + link_id = link['link-id'] + link_src = link['source'] + link_dst = link['destination'] + link_src_node_id = link_src['source-node'] + link_src_tp_id = link_src['source-tp'] + link_dst_node_id = link_dst['dest-node'] + link_dst_tp_id = link_dst['dest-tp'] + + te_topo.link(link_id).create( + link_src_node_id, link_src_tp_id, link_dst_node_id, link_dst_tp_id + ) diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/YangHandler.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/YangHandler.py deleted file mode 100644 index 9df57528f..000000000 --- a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/YangHandler.py +++ /dev/null @@ -1,226 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import json, libyang, logging -import urllib.parse -from typing import Dict, List, Optional, Set - - -LOGGER = logging.getLogger(__name__) - - -def walk_schema(node : libyang.SNode, path : str = '') -> Set[str]: - current_path = f'{path}/{node.name()}' - schema_paths : Set[str] = {current_path} - for child in node.children(): - if isinstance(child, (libyang.SLeaf, libyang.SLeafList)): continue - schema_paths.update(walk_schema(child, current_path)) - return schema_paths - -def extract_schema_paths(yang_module : libyang.Module) -> Set[str]: - schema_paths : Set[str] = set() - for node in yang_module.children(): - schema_paths.update(walk_schema(node)) - return schema_paths - -class YangHandler: - def __init__( - self, yang_search_path : str, yang_module_names : List[str], - yang_startup_data : Dict - ) -> None: - self._yang_context = libyang.Context(yang_search_path) - self._loaded_modules : Set[str] = set() - self._schema_paths : Set[str] = set() - for yang_module_name in yang_module_names: - LOGGER.info('Loading module: {:s}'.format(str(yang_module_name))) - yang_module = self._yang_context.load_module(yang_module_name) - yang_module.feature_enable_all() - self._loaded_modules.add(yang_module_name) - self._schema_paths.update(extract_schema_paths(yang_module)) - - self._datastore = self._yang_context.parse_data_mem( - json.dumps(yang_startup_data), fmt='json' - ) - - def destroy(self) -> None: - self._yang_context.destroy() - - def get_schema_paths(self) -> Set[str]: - return self._schema_paths - - def get(self, path : str) -> Optional[str]: - path = self._normalize_path(path) - data = self._datastore.find_path(path) - if data is None: return None - json_data = data.print_mem( - fmt='json', with_siblings=False, pretty=True, - keep_empty_containers=False, include_implicit_defaults=True - ) - return json_data - - def get_xpath(self, xpath : str) -> List[str]: - if not xpath.startswith('/'): xpath = '/' + xpath - items = self._datastore.find_all(xpath) - result = list() - for item in items: - result.append(item.print_mem( - fmt='json', with_siblings=False, pretty=True, - keep_empty_containers=False, include_implicit_defaults=True - )) - return result - - def create(self, path : str, payload : Dict) -> str: - path = self._normalize_path(path) - # TODO: client should not provide identifier of element to be created, add it to subpath - dnode_parsed : Optional[libyang.DNode] = self._yang_context.parse_data_mem( - json.dumps(payload), 'json', strict=True, parse_only=False, - validate_present=True, validate_multi_error=True - ) - if dnode_parsed is None: raise Exception('Unable to parse Data({:s})'.format(str(payload))) - - dnode : Optional[libyang.DNode] = self._yang_context.create_data_path( - path, parent=self._datastore, value=dnode_parsed, update=False - ) - self._datastore.merge(dnode_parsed, with_siblings=True, defaults=True) - - json_data = dnode.print_mem( - fmt='json', with_siblings=True, pretty=True, - keep_empty_containers=True, include_implicit_defaults=True - ) - return json_data - - def update(self, path : str, payload : Dict) -> str: - path = self._normalize_path(path) - # NOTE: client should provide identifier of element to be updated - dnode_parsed : Optional[libyang.DNode] = self._yang_context.parse_data_mem( - json.dumps(payload), 'json', strict=True, parse_only=False, - validate_present=True, validate_multi_error=True - ) - if dnode_parsed is None: raise Exception('Unable to parse Data({:s})'.format(str(payload))) - - dnode = self._yang_context.create_data_path( - path, parent=self._datastore, value=dnode_parsed, update=True - ) - self._datastore.merge(dnode_parsed, with_siblings=True, defaults=True) - - json_data = dnode.print_mem( - fmt='json', with_siblings=True, pretty=True, - keep_empty_containers=True, include_implicit_defaults=True - ) - return json_data - - def delete(self, path : str) -> Optional[str]: - path = self._normalize_path(path) - - # NOTE: client should provide identifier of element to be deleted - - node : libyang.DNode = self._datastore.find_path(path) - if node is None: return None - - LOGGER.info('node = {:s}'.format(str(node))) - json_data = str(node.print_mem( - fmt='json', with_siblings=True, pretty=True, - keep_empty_containers=True, include_implicit_defaults=True - )) - LOGGER.info('json_data = {:s}'.format(json_data)) - - node.unlink() - node.free() - - return json_data - - def _normalize_path(self, path : str) -> str: - """ - Normalize RESTCONF path segments using the standard `list=` - syntax into the libyang bracketed predicate form expected by - the datastore (e.g. `network="admin"` -> `network[network-id="admin"]`). - - This implementation looks up the schema node for the list and - uses its key leaf names to build the proper predicates. If the - schema information is unavailable, it falls back to using the - list name as the key name. - """ - - # URL-decode each path segment so escaped characters like `%22` - # (double quotes) are properly handled when parsing list keys. - parts = [urllib.parse.unquote(p) for p in path.strip('/').split('/') if p != ''] - schema_path = '' - out_parts: List[str] = [] - - for part in parts: - if '=' in part: - # split into name and value (value may contain commas/quotes) - name, val = part.split('=', 1) - # keep original name (may include prefix) for output, but - # use local name (without module prefix) to lookup schema - local_name = name.split(':', 1)[1] if ':' in name else name - schema_path = schema_path + '/' + local_name if schema_path else '/' + local_name - schema_nodes = list(self._yang_context.find_path(schema_path)) - if len(schema_nodes) != 1: - MSG = 'No/Multiple SchemaNodes({:s}) for SchemaPath({:s})' - raise Exception(MSG.format( - str([repr(sn) for sn in schema_nodes]), schema_path - )) - schema_node = schema_nodes[0] - - # parse values splitting on commas outside quotes - values = [] - cur = '' - in_quotes = False - for ch in val: - if ch == '"': - in_quotes = not in_quotes - cur += ch - elif ch == ',' and not in_quotes: - values.append(cur) - cur = '' - else: - cur += ch - if cur != '': - values.append(cur) - - # determine key names from schema_node if possible - key_names = None - if isinstance(schema_node, libyang.SList): - key_names = [k.name() for k in schema_node.keys()] - #if isinstance(keys, (list, tuple)): - # key_names = keys - #elif isinstance(keys, str): - # key_names = [kn for kn in k.split() if kn] - #else: - # MSG = 'Unsupported keys format: {:s} / {:s}' - # raise Exception(MSG.format(str(type(keys)), str(keys))) - #elif hasattr(schema_node, 'key'): - # k = schema_node.key() - # if isinstance(k, str): - # key_names = [kn for kn in k.split() if kn] - - if not key_names: - # fallback: use the local list name as the single key - key_names = [local_name] - - # build predicate(s) - preds = [] - for idx, kn in enumerate(key_names): - kv = values[idx] if idx < len(values) else values[0] - preds.append(f'[{kn}="{kv}"]') - - out_parts.append(name + ''.join(preds)) - else: - local_part = part.split(':', 1)[1] if ':' in part else part - schema_path = schema_path + '/' + local_part if schema_path else '/' + local_part - out_parts.append(part) - - return '/' + '/'.join(out_parts) diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/YangModelDiscoverer.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/YangModelDiscoverer.py deleted file mode 100644 index f31305280..000000000 --- a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/YangModelDiscoverer.py +++ /dev/null @@ -1,195 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import logging, re -from collections import defaultdict -from graphlib import TopologicalSorter, CycleError -from pathlib import Path -from typing import Dict, List, Optional, Set, Tuple - - -COMMENT_SINGLE_RE = re.compile(r"//.*?$", re.MULTILINE) -COMMENT_MULTI_RE = re.compile(r"/\*.*?\*/", re.DOTALL) - -# module / submodule name -MODNAME_RE = re.compile(r"\b(module|submodule)\s+([A-Za-z0-9_.-]+)\s*\{") - -# import foo { ... } (most common form) -IMPORT_BLOCK_RE = re.compile(r"\bimport\s+([A-Za-z0-9_.-]+)\s*\{", re.IGNORECASE) - -# import foo; (very rare, but we’ll support it) -IMPORT_SEMI_RE = re.compile(r"\bimport\s+([A-Za-z0-9_.-]+)\s*;", re.IGNORECASE) - - -def _parse_yang_file(path: Path) -> Tuple[Optional[str], Set[str]]: - path_stem = path.stem # file name without extension - expected_module_name = path_stem.split('@', 1)[0] - - try: - data = path.read_text(encoding='utf-8', errors='ignore') - except Exception: - data = path.read_bytes().decode('utf-8', errors='ignore') - - data = COMMENT_MULTI_RE.sub('', data) - data = COMMENT_SINGLE_RE.sub('', data) - - match = MODNAME_RE.search(data) - if match is None: - return None, set() - module_name = match.group(2) - if module_name != expected_module_name: - MSG = 'Module({:s}) mismatches its FileName({:s})' - raise Exception(MSG.format(str(module_name), str(expected_module_name))) - - module_imports = set() - if module_name is not None: - module_imports.update(IMPORT_BLOCK_RE.findall(data)) - module_imports.update(IMPORT_SEMI_RE.findall(data)) - - # ignore modules importing themselves, just in case - module_imports.discard(module_name) - - return module_name, module_imports - - -class YangModuleDiscoverer: - def __init__(self, yang_search_path : str) -> None: - self._yang_search_path = yang_search_path - - self._module_to_paths : Dict[str, List[Path]] = defaultdict(list) - self._module_to_imports : Dict[str, Set[str]] = defaultdict(set) - self._ordered_module_names : Optional[List[str]] = None - - - def run( - self, do_print_order : bool = False, do_log_order : bool = False, - logger : Optional[logging.Logger] = None, level : int = logging.INFO - ) -> List[str]: - if self._ordered_module_names is None: - self._scan_modules() - self._sort_modules() - - if do_print_order: - self.print_order() - - if do_log_order: - if logger is None: logger = logging.getLogger(__name__) - self.log_order(logger, level=level) - - return self._ordered_module_names - - def _scan_modules(self) -> None: - yang_root = Path(self._yang_search_path).resolve() - if not yang_root.exists(): - MSG = 'Path({:s}) not found' - raise Exception(MSG.format(str(self._yang_search_path))) - - for yang_path in yang_root.rglob('*.yang'): - module_name, module_imports = _parse_yang_file(yang_path) - if module_name is None: continue - self._module_to_paths[module_name].append(yang_path) - self._module_to_imports[module_name] = module_imports - - if len(self._module_to_paths) == 0: - MSG = 'No modules found in Path({:s})' - raise Exception(MSG.format(str(self._yang_search_path))) - - self._check_duplicated_module_declaration() - self._check_missing_modules() - - - def _check_duplicated_module_declaration(self) -> None: - duplicate_module_declarations : List[str] = list() - for module_name, paths in self._module_to_paths.items(): - if len(paths) == 1: continue - str_paths = [str(p) for p in paths] - duplicate_module_declarations.append( - ' {:s} => {:s}'.format(module_name, str_paths) - ) - - if len(duplicate_module_declarations) > 0: - MSG = 'Duplicate module declarations:\n{:s}' - str_dup_mods = '\n'.join(duplicate_module_declarations) - raise Exception(MSG.format(str_dup_mods)) - - - def _check_missing_modules(self) -> None: - local_module_names = set(self._module_to_imports.keys()) - missing_modules : List[str] = list() - for module_name, imported_modules in self._module_to_imports.items(): - missing = imported_modules.difference(local_module_names) - if len(missing) == 0: continue - missing_modules.append( - ' {:s} => {:s}'.format(module_name, str(missing)) - ) - - if len(missing_modules) > 0: - MSG = 'Missing modules:\n{:s}' - str_mis_mods = '\n'.join(missing_modules) - raise Exception(MSG.format(str_mis_mods)) - - - def _sort_modules(self) -> None: - ts = TopologicalSorter() - for module_name, imported_modules in self._module_to_imports.items(): - ts.add(module_name, *imported_modules) - - try: - self._ordered_module_names = list(ts.static_order()) # raises CycleError on cycles - except CycleError as e: - cycle = list(dict.fromkeys(e.args[1])) # de-dup while preserving order - MSG = 'Circular dependencies between modules: {:s}' - raise Exception(MSG.format(str(cycle))) # pylint: disable=raise-missing-from - - - def dump_order(self) -> List[Tuple[int, str, List[str]]]: - if self._ordered_module_names is None: - raise Exception('First process the YANG Modules running method .run()') - - module_order : List[Tuple[int, str, List[str]]] = list() - for i, module_name in enumerate(self._ordered_module_names, 1): - module_imports = sorted(self._module_to_imports[module_name]) - module_order.append((i, module_name, module_imports)) - - return module_order - - - def print_order(self) -> None: - print('Ordered Modules:') - for i, module_name, module_imports in self.dump_order(): - MSG = '{:2d} : {:s} => {:s}' - print(MSG.format(i, module_name, str(module_imports))) - - - def log_order(self, logger : logging.Logger, level : int = logging.INFO) -> None: - logger.log(level, 'Ordered Modules:') - for i, module_name, module_imports in self.dump_order(): - MSG = '{:2d} : {:s} => {:s}' - logger.log(level, MSG.format(i, module_name, str(module_imports))) - - -def main() -> None: - logging.basicConfig(level=logging.INFO) - - ymd = YangModuleDiscoverer('./yang') - ordered_module_names = ymd.run( - do_print_order=True, - do_log_order=True - ) - print('ordered_module_names', ordered_module_names) - - -if __name__ == '__main__': - main() diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py index 654971fd2..2bd587ce8 100644 --- a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py @@ -13,15 +13,12 @@ # limitations under the License. -import json, logging, os, secrets -from flask import Flask -from flask_restful import Api -from .Dispatch import RestConfDispatch -from .HostMeta import HostMeta -from .YangHandler import YangHandler -from .YangModelDiscoverer import YangModuleDiscoverer -from .simap_client.RestConfClient import RestConfClient -from .simap_client.SimapClient import SimapClient +# This file overwrites default RestConf Server `app.py` file. + + +import logging +from common.tools.rest_conf.server.restconf_server.RestConfServerApplication import RestConfServerApplication +from .SimapUpdater import SimapUpdater logging.basicConfig( @@ -32,85 +29,19 @@ LOGGER = logging.getLogger(__name__) logging.getLogger('RestConfClient').setLevel(logging.WARN) -RESTCONF_PREFIX = '/restconf' -SECRET_KEY = secrets.token_hex(64) - - -YANG_SEARCH_PATH = './yang' - -ymd = YangModuleDiscoverer(YANG_SEARCH_PATH) -YANG_MODULE_NAMES = ymd.run(do_log_order=True) - -STARTUP_FILE = './startup.json' -with open(STARTUP_FILE, mode='r', encoding='UTF-8') as fp: - YANG_STARTUP_DATA = json.loads(fp.read()) - - -SIMAP_ADDRESS = os.environ.get('SIMAP_ADDRESS') -SIMAP_PORT = os.environ.get('SIMAP_PORT' ) - -if SIMAP_ADDRESS is not None and SIMAP_PORT is not None: - restconf_client = RestConfClient( - SIMAP_ADDRESS, port=SIMAP_PORT, - logger=logging.getLogger('RestConfClient') - ) - simap_client = SimapClient(restconf_client) +LOGGER.info('Starting...') +rcs_app = RestConfServerApplication() +LOGGER.info('All connectors registered') - te_topo = simap_client.network('admin') - te_topo.update() +startup_data = rcs_app.get_startup_data() - networks = YANG_STARTUP_DATA.get('ietf-network:networks', dict()) - networks = networks.get('network', list()) - assert len(networks) == 1 - network = networks[0] - assert network['network-id'] == 'admin' +networks = startup_data.get('ietf-network:networks', dict()) +networks = networks.get('network', list()) +if len(networks) == 1 and networks[0]['network-id'] == 'admin': + simap_updater = SimapUpdater() + simap_updater.upload_topology(networks[0]) - nodes = network.get('node', list()) - for node in nodes: - node_id = node['node-id'] - tp_ids = [ - tp['tp-id'] - for tp in node['ietf-network-topology:termination-point'] - ] - te_topo.node(node_id).create(termination_point_ids=tp_ids) - - links = network.get('ietf-network-topology:link', list()) - for link in links: - link_id = link['link-id'] - link_src = link['source'] - link_dst = link['destination'] - link_src_node_id = link_src['source-node'] - link_src_tp_id = link_src['source-tp'] - link_dst_node_id = link_dst['dest-node'] - link_dst_tp_id = link_dst['dest-tp'] - - te_topo.link(link_id).create( - link_src_node_id, link_src_tp_id, link_dst_node_id, link_dst_tp_id - ) - - -yang_handler = YangHandler( - YANG_SEARCH_PATH, YANG_MODULE_NAMES, YANG_STARTUP_DATA -) -restconf_paths = yang_handler.get_schema_paths() - -app = Flask(__name__) -app.config['SECRET_KEY'] = SECRET_KEY - -api = Api(app) -api.add_resource( - HostMeta, - '/.well-known/host-meta', - resource_class_args=(RESTCONF_PREFIX,) -) -api.add_resource( - RestConfDispatch, - RESTCONF_PREFIX + '/data', - RESTCONF_PREFIX + '/data/', - RESTCONF_PREFIX + '/data/', - resource_class_args=(yang_handler,) -) +rcs_app.dump_configuration() +app = rcs_app.get_flask_app() -LOGGER.info('Available RESTCONF paths:') -for restconf_path in sorted(restconf_paths): - LOGGER.info('- {:s}'.format(str(restconf_path))) +LOGGER.info('Initialization completed!') diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/simap_client/RestConfClient.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/simap_client/RestConfClient.py deleted file mode 100644 index b7c057a70..000000000 --- a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/simap_client/RestConfClient.py +++ /dev/null @@ -1,191 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import enum, logging, requests -from requests.auth import HTTPBasicAuth -from typing import Any, Dict, Optional, Set - -class RestRequestMethod(enum.Enum): - GET = 'get' - POST = 'post' - PUT = 'put' - PATCH = 'patch' - DELETE = 'delete' - -EXPECTED_STATUS_CODES : Set[int] = { - requests.codes['OK' ], # 200 - OK - requests.codes['CREATED' ], # 201 - Created - requests.codes['ACCEPTED' ], # 202 - Accepted - requests.codes['NO_CONTENT'], # 204 - No Content -} - -def compose_basic_auth( - username : Optional[str] = None, password : Optional[str] = None -) -> Optional[HTTPBasicAuth]: - if username is None or password is None: return None - return HTTPBasicAuth(username, password) - -class SchemeEnum(enum.Enum): - HTTP = 'http' - HTTPS = 'https' - -def check_scheme(scheme : str) -> str: - str_scheme = str(scheme).lower() - enm_scheme = SchemeEnum._value2member_map_[str_scheme] - return enm_scheme.value - -HOST_META_URL = '{:s}://{:s}:{:d}/.well-known/host-meta' -RESTCONF_URL = '{:s}://{:s}:{:d}/{:s}' - -class RestConfClient: - def __init__( - self, address : str, port : int = 8080, scheme : str = 'http', - username : Optional[str] = None, password : Optional[str] = None, - timeout : int = 10, verify_certs : bool = True, allow_redirects : bool = True, - logger : Optional[logging.Logger] = None - ) -> None: - self._address = address - self._port = int(port) - self._scheme = check_scheme(scheme) - self._auth = compose_basic_auth(username=username, password=password) - self._base_url = '' - self._timeout = int(timeout) - self._verify_certs = verify_certs - self._allow_redirects = allow_redirects - self._logger = logger - - self._discover_base_url() - - def _discover_base_url(self) -> None: - host_meta_url = HOST_META_URL.format(self._scheme, self._address, self._port) - host_meta : Dict = self.get(host_meta_url, expected_status_codes={requests.codes['OK']}) - - links = host_meta.get('links') - if links is None: raise AttributeError('Missing attribute "links" in host-meta reply') - if not isinstance(links, list): raise AttributeError('Attribute "links" must be a list') - if len(links) != 1: raise AttributeError('Attribute "links" is expected to have exactly 1 item') - - link = links[0] - if not isinstance(link, dict): raise AttributeError('Attribute "links[0]" must be a dict') - - rel = link.get('rel') - if rel is None: raise AttributeError('Missing attribute "links[0].rel" in host-meta reply') - if not isinstance(rel, str): raise AttributeError('Attribute "links[0].rel" must be a str') - if rel != 'restconf': raise AttributeError('Attribute "links[0].rel" != "restconf"') - - href = link.get('href') - if href is None: raise AttributeError('Missing attribute "links[0]" in host-meta reply') - if not isinstance(href, str): raise AttributeError('Attribute "links[0].href" must be a str') - - self._base_url = str(href + '/data').replace('//', '/') - - def _log_msg_request( - self, method : RestRequestMethod, request_url : str, body : Optional[Any], - log_level : int = logging.INFO - ) -> str: - msg = 'Request: {:s} {:s}'.format(str(method.value).upper(), str(request_url)) - if body is not None: msg += ' body={:s}'.format(str(body)) - if self._logger is not None: self._logger.log(log_level, msg) - return msg - - def _log_msg_check_reply( - self, method : RestRequestMethod, request_url : str, body : Optional[Any], - reply : requests.Response, expected_status_codes : Set[int], - log_level : int = logging.INFO - ) -> str: - msg = 'Reply: {:s}'.format(str(reply.text)) - if self._logger is not None: self._logger.log(log_level, msg) - http_status_code = reply.status_code - if http_status_code in expected_status_codes: return msg - MSG = 'Request failed. method={:s} url={:s} body={:s} status_code={:s} reply={:s}' - msg = MSG.format( - str(method.value).upper(), str(request_url), str(body), - str(http_status_code), str(reply.text) - ) - self._logger.error(msg) - raise Exception(msg) - - def _do_rest_request( - self, method : RestRequestMethod, endpoint : str, body : Optional[Any] = None, - expected_status_codes : Set[int] = EXPECTED_STATUS_CODES - ) -> Optional[Any]: - candidate_schemes = tuple(['{:s}://'.format(m).lower() for m in SchemeEnum.__members__.keys()]) - if endpoint.lower().startswith(candidate_schemes): - request_url = endpoint.lstrip('/') - else: - endpoint = str(self._base_url + '/' + endpoint).replace('//', '/').lstrip('/') - request_url = '{:s}://{:s}:{:d}/{:s}'.format( - self._scheme, self._address, self._port, endpoint.lstrip('/') - ) - self._log_msg_request(method, request_url, body) - try: - headers = {'accept': 'application/json'} - reply = requests.request( - method.value, request_url, headers=headers, json=body, - auth=self._auth, verify=self._verify_certs, timeout=self._timeout, - allow_redirects=self._allow_redirects - ) - except Exception as e: - MSG = 'Request failed. method={:s} url={:s} body={:s}' - msg = MSG.format(str(method.value).upper(), request_url, str(body)) - self._logger.exception(msg) - raise Exception(msg) from e - self._log_msg_check_reply(method, request_url, body, reply, expected_status_codes) - if reply.content and len(reply.content) > 0: return reply.json() - return None - - def get( - self, endpoint : str, - expected_status_codes : Set[int] = {requests.codes['OK']} - ) -> Optional[Any]: - return self._do_rest_request( - RestRequestMethod.GET, endpoint, - expected_status_codes=expected_status_codes - ) - - def post( - self, endpoint : str, body : Optional[Any] = None, - expected_status_codes : Set[int] = {requests.codes['CREATED']} - ) -> Optional[Any]: - return self._do_rest_request( - RestRequestMethod.POST, endpoint, body=body, - expected_status_codes=expected_status_codes - ) - - def put( - self, endpoint : str, body : Optional[Any] = None, - expected_status_codes : Set[int] = {requests.codes['CREATED'], requests.codes['NO_CONTENT']} - ) -> Optional[Any]: - return self._do_rest_request( - RestRequestMethod.PUT, endpoint, body=body, - expected_status_codes=expected_status_codes - ) - - def patch( - self, endpoint : str, body : Optional[Any] = None, - expected_status_codes : Set[int] = {requests.codes['NO_CONTENT']} - ) -> Optional[Any]: - return self._do_rest_request( - RestRequestMethod.PATCH, endpoint, body=body, - expected_status_codes=expected_status_codes - ) - - def delete( - self, endpoint : str, body : Optional[Any] = None, - expected_status_codes : Set[int] = {requests.codes['NO_CONTENT']} - ) -> Optional[Any]: - return self._do_rest_request( - RestRequestMethod.DELETE, endpoint, body=body, - expected_status_codes=expected_status_codes - ) diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/simap_client/__init__.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/simap_client/__init__.py deleted file mode 100644 index 3ccc21c7d..000000000 --- a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/simap_client/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - diff --git a/src/tests/tools/mock_nce_t_ctrl/requirements.in b/src/tests/tools/mock_nce_t_ctrl/requirements.in deleted file mode 100644 index 17155ed58..000000000 --- a/src/tests/tools/mock_nce_t_ctrl/requirements.in +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -cryptography==39.0.1 -eventlet==0.39.0 -Flask-HTTPAuth==4.5.0 -Flask-RESTful==0.3.9 -Flask==2.1.3 -gunicorn==23.0.0 -jsonschema==4.4.0 -libyang==2.8.4 -pyopenssl==23.0.0 -requests==2.27.1 -werkzeug==2.3.7 diff --git a/src/tests/tools/mock_nce_t_ctrl/run_ctrl_gunicorn.sh b/src/tests/tools/mock_nce_t_ctrl/run_ctrl_gunicorn.sh deleted file mode 100755 index 593347cb8..000000000 --- a/src/tests/tools/mock_nce_t_ctrl/run_ctrl_gunicorn.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Make folder containing the script the root folder for its execution -cd $(dirname $0) - -export FLASK_ENV=development -gunicorn -w 1 --worker-class eventlet -b 0.0.0.0:8080 --log-level DEBUG nce_t_ctrl.app:app diff --git a/src/tests/tools/mock_nce_t_ctrl/run_ctrl_standalone.sh b/src/tests/tools/mock_nce_t_ctrl/run_ctrl_standalone.sh deleted file mode 100755 index 9b47a3e21..000000000 --- a/src/tests/tools/mock_nce_t_ctrl/run_ctrl_standalone.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Make folder containing the script the root folder for its execution -cd $(dirname $0) - -python -m nce_t_ctrl -- GitLab From 3c7048fdf64ee5068e8b1827fb085426a1b04f13 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 7 Sep 2025 13:06:09 +0000 Subject: [PATCH 175/367] SIMAP Connector: - Upgraded logging logic - Skip Slice events --- .../service/simap_updater/SimapUpdater.py | 63 +++++++++++-------- 1 file changed, 38 insertions(+), 25 deletions(-) diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 2322be3b6..61c1f9637 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -17,7 +17,7 @@ import logging, queue, threading, uuid from typing import Any, Optional, Set from common.Constants import DEFAULT_TOPOLOGY_NAME from common.DeviceTypes import DeviceTypeEnum -from common.proto.context_pb2 import ContextEvent, DeviceEvent, Empty, LinkEvent, ServiceEvent, TopologyEvent +from common.proto.context_pb2 import ContextEvent, DeviceEvent, Empty, LinkEvent, ServiceEvent, SliceEvent, TopologyEvent from common.tools.grpc.BaseEventCollector import BaseEventCollector from common.tools.grpc.BaseEventDispatcher import BaseEventDispatcher from common.tools.grpc.Tools import grpc_message_to_json_string @@ -83,7 +83,12 @@ class EventDispatcher(BaseEventDispatcher): LOGGER.debug(MSG.format(grpc_message_to_json_string(context_event))) - def _dispatch_topology_set(self, topology_event : TopologyEvent) -> None: + def dispatch_slice(self, slice_event : SliceEvent) -> None: + MSG = 'Skipping Slice Event: {:s}' + LOGGER.debug(MSG.format(grpc_message_to_json_string(slice_event))) + + + def _dispatch_topology_set(self, topology_event : TopologyEvent) -> bool: MSG = 'Processing Topology Event: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(topology_event))) @@ -100,17 +105,18 @@ class EventDispatcher(BaseEventDispatcher): self._simap_client.network(topology_name).update( supporting_network_ids=supporting_network_ids ) + return True def dispatch_topology_create(self, topology_event : TopologyEvent) -> None: - self._dispatch_topology_set(topology_event) + if not self._dispatch_topology_set(topology_event): return MSG = 'Topology Create: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(topology_event))) def dispatch_topology_update(self, topology_event : TopologyEvent) -> None: - self._dispatch_topology_set(topology_event) + if not self._dispatch_topology_set(topology_event): return MSG = 'Topology Updated: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(topology_event))) @@ -132,7 +138,7 @@ class EventDispatcher(BaseEventDispatcher): LOGGER.info(MSG.format(grpc_message_to_json_string(topology_event))) - def _dispatch_device_set(self, device_event : DeviceEvent) -> None: + def _dispatch_device_set(self, device_event : DeviceEvent) -> bool: MSG = 'Processing Device Event: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(device_event))) @@ -149,7 +155,7 @@ class EventDispatcher(BaseEventDispatcher): str_device_event = grpc_message_to_json_string(device_event) str_device = grpc_message_to_json_string(device) LOGGER.warning(MSG.format(str_device_event, str_device)) - return + return False #device_controller_uuid = device.controller_id.device_uuid.uuid #if len(device_controller_uuid) > 0: @@ -170,7 +176,7 @@ class EventDispatcher(BaseEventDispatcher): str_device_event = grpc_message_to_json_string(device_event) str_device = grpc_message_to_json_string(device) LOGGER.warning(MSG.format(str_device_event, str_device)) - return + return False topology = self._object_cache.get(CachedEntities.TOPOLOGY, topology_uuid) topology_name = topology.name @@ -186,17 +192,18 @@ class EventDispatcher(BaseEventDispatcher): te_device.termination_point(endpoint_name).update() #self._remove_skipped_device(device) + return True def dispatch_device_create(self, device_event : DeviceEvent) -> None: - self._dispatch_device_set(device_event) + if not self._dispatch_device_set(device_event): return MSG = 'Device Created: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(device_event))) def dispatch_device_update(self, device_event : DeviceEvent) -> None: - self._dispatch_device_set(device_event) + if not self._dispatch_device_set(device_event): return MSG = 'Device Updated: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(device_event))) @@ -264,7 +271,7 @@ class EventDispatcher(BaseEventDispatcher): LOGGER.info(MSG.format(grpc_message_to_json_string(device_event))) - def _dispatch_link_set(self, link_event : LinkEvent) -> None: + def _dispatch_link_set(self, link_event : LinkEvent) -> bool: MSG = 'Processing Link Event: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(link_event))) @@ -278,7 +285,7 @@ class EventDispatcher(BaseEventDispatcher): str_link_event = grpc_message_to_json_string(link_event) str_link = grpc_message_to_json_string(link) LOGGER.warning(MSG.format(str_link_event, str_link)) - return + return False topology = self._object_cache.get(CachedEntities.TOPOLOGY, topology_uuid) topology_name = topology.name @@ -298,7 +305,7 @@ class EventDispatcher(BaseEventDispatcher): str_link_event = grpc_message_to_json_string(link_event) str_link = grpc_message_to_json_string(link) LOGGER.warning(MSG.format(str_link_event, str_link)) - return + return False # Skip links that connect to devices previously marked as skipped src_uuid = src_device.device_id.device_uuid.uuid @@ -311,7 +318,7 @@ class EventDispatcher(BaseEventDispatcher): str_link_event = grpc_message_to_json_string(link_event) str_link = grpc_message_to_json_string(link) LOGGER.warning(MSG.format(str_link_event, str_link)) - return + return False try: if src_device is None: @@ -332,17 +339,18 @@ class EventDispatcher(BaseEventDispatcher): te_link = te_topo.link(link_name) te_link.update(src_device.name, src_endpoint.name, dst_device.name, dst_endpoint.name) + return True def dispatch_link_create(self, link_event : LinkEvent) -> None: - self._dispatch_link_set(link_event) + if not self._dispatch_link_set(link_event): return MSG = 'Link Created: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(link_event))) def dispatch_link_update(self, link_event : LinkEvent) -> None: - self._dispatch_link_set(link_event) + if not self._dispatch_link_set(link_event): return MSG = 'Link Updated: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(link_event))) @@ -400,7 +408,7 @@ class EventDispatcher(BaseEventDispatcher): LOGGER.info(MSG.format(grpc_message_to_json_string(link_event))) - def _dispatch_service_set(self, service_event : ServiceEvent) -> None: + def _dispatch_service_set(self, service_event : ServiceEvent) -> bool: MSG = 'Processing Service Event: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(service_event))) @@ -411,7 +419,11 @@ class EventDispatcher(BaseEventDispatcher): try: uuid.UUID(hex=service_name) # skip it if properly parsed, means it is a service with a UUID-based name, i.e., a sub-service - return + MSG = 'ServiceEvent({:s}) skipped, it is a subservice: {:s}' + str_service_event = grpc_message_to_json_string(service_event) + str_service = grpc_message_to_json_string(service) + LOGGER.warning(MSG.format(str_service_event, str_service)) + return False except: # pylint: disable=bare-except pass @@ -422,14 +434,14 @@ class EventDispatcher(BaseEventDispatcher): str_service_event = grpc_message_to_json_string(service_event) str_service = grpc_message_to_json_string(service) LOGGER.warning(MSG.format(str_service_event, str_service)) - return + return False if len(endpoint_uuids) < 2: MSG = 'ServiceEvent({:s}) skipped, not enough endpoint_ids to compose link: {:s}' str_service_event = grpc_message_to_json_string(service_event) str_service = grpc_message_to_json_string(service) LOGGER.warning(MSG.format(str_service_event, str_service)) - return + return False topologies = self._object_cache.get_all(CachedEntities.TOPOLOGY, fresh=False) topology_names = {t.name for t in topologies} @@ -438,9 +450,9 @@ class EventDispatcher(BaseEventDispatcher): MSG = 'ServiceEvent({:s}) skipped, unable to identify on which topology to insert it' str_service_event = grpc_message_to_json_string(service_event) LOGGER.warning(MSG.format(str_service_event)) - return - domain_name = topology_names.pop() # trans-pkt/agg-net/e2e-net + return False + domain_name = topology_names.pop() # trans-pkt/agg-net/e2e-net domain_topo = self._simap_client.network(domain_name) domain_topo.update() @@ -490,17 +502,18 @@ class EventDispatcher(BaseEventDispatcher): ) dom_link = domain_topo.link(link_name) dom_link.update(src_dev_name, src_ep_name, dst_dev_name, dst_ep_name) + return True - def dispatch_service_created(self, service_event : ServiceEvent) -> None: - self._dispatch_service_set(service_event) + def dispatch_service_create(self, service_event : ServiceEvent) -> None: + if not self._dispatch_service_set(service_event): return MSG = 'Logical Link Created for Service: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(service_event))) def dispatch_service_update(self, service_event : ServiceEvent) -> None: - self._dispatch_service_set(service_event) + if not self._dispatch_service_set(service_event): return MSG = 'Logical Link Updated for Service: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(service_event))) @@ -530,8 +543,8 @@ class EventDispatcher(BaseEventDispatcher): str_service_event = grpc_message_to_json_string(service_event) LOGGER.warning(MSG.format(str_service_event)) return - domain_name = topology_names.pop() # trans-pkt/agg-net/e2e-net + domain_name = topology_names.pop() # trans-pkt/agg-net/e2e-net domain_topo = self._simap_client.network(domain_name) domain_topo.update() -- GitLab From faa343b5792855799a33932c53fbee825533454a Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 7 Sep 2025 13:11:16 +0000 Subject: [PATCH 176/367] Tests - Tools - Mock NCE-FAN / NCE-T Controllers - Fixed Dockerfiles --- src/tests/tools/mock_nce_fan_ctrl/Dockerfile | 1 - src/tests/tools/mock_nce_t_ctrl/Dockerfile | 1 - 2 files changed, 2 deletions(-) diff --git a/src/tests/tools/mock_nce_fan_ctrl/Dockerfile b/src/tests/tools/mock_nce_fan_ctrl/Dockerfile index cae06e98a..0a26cc096 100644 --- a/src/tests/tools/mock_nce_fan_ctrl/Dockerfile +++ b/src/tests/tools/mock_nce_fan_ctrl/Dockerfile @@ -52,7 +52,6 @@ RUN python3 -m pip install -r requirements.txt # Add component files into working directory COPY src/common/tools/rest_conf/server/restconf_server/ ./nce_fan_ctrl/ COPY src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/*.py ./nce_fan_ctrl/ -COPY src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/simap_client/*.py ./nce_fan_ctrl/simap_client/ COPY src/tests/tools/mock_nce_fan_ctrl/yang/. ./yang/ COPY src/tests/tools/mock_nce_fan_ctrl/startup.json ./startup.json diff --git a/src/tests/tools/mock_nce_t_ctrl/Dockerfile b/src/tests/tools/mock_nce_t_ctrl/Dockerfile index a33510928..0c99f35c3 100644 --- a/src/tests/tools/mock_nce_t_ctrl/Dockerfile +++ b/src/tests/tools/mock_nce_t_ctrl/Dockerfile @@ -52,7 +52,6 @@ RUN python3 -m pip install -r requirements.txt # Add component files into working directory COPY src/common/tools/rest_conf/server/restconf_server/ ./nce_t_ctrl/ COPY src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/*.py ./nce_t_ctrl/ -COPY src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/simap_client/*.py ./nce_t_ctrl/simap_client/ COPY src/tests/tools/mock_nce_t_ctrl/yang/. ./yang/ COPY src/tests/tools/mock_nce_t_ctrl/startup.json ./startup.json -- GitLab From 695221907503f2d15f3ef5cd6e17ef525a56d72e Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 7 Sep 2025 13:12:02 +0000 Subject: [PATCH 177/367] ECOC F5GA Telemetry Demo: - Fixed build of mocks --- src/tests/ecoc25-f5ga-telemetry/redeploy.sh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/tests/ecoc25-f5ga-telemetry/redeploy.sh b/src/tests/ecoc25-f5ga-telemetry/redeploy.sh index fd798ed4c..66cd43d0b 100755 --- a/src/tests/ecoc25-f5ga-telemetry/redeploy.sh +++ b/src/tests/ecoc25-f5ga-telemetry/redeploy.sh @@ -24,16 +24,16 @@ echo "Deploying in ${HOSTNAME}..." case "$HOSTNAME" in simap-server) echo "Building SIMAP Server..." - cd ~/tfs-ctrl/src/tests/tools/simap_server - docker buildx build -t simap-server:mock -f Dockerfile . + cd ~/tfs-ctrl/ + docker buildx build -t simap-server:mock -f ./src/tests/tools/simap_server/Dockerfile . echo "Building NCE-FAN Controller..." - cd ~/tfs-ctrl/src/tests/tools/mock_nce_fan_ctrl - docker buildx build -t nce-fan-ctrl:mock -f Dockerfile . + cd ~/tfs-ctrl/ + docker buildx build -t nce-fan-ctrl:mock -f ./src/tests/tools/mock_nce_fan_ctrl/Dockerfile . echo "Building NCE-T Controller..." - cd ~/tfs-ctrl/src/tests/tools/mock_nce_t_ctrl - docker buildx build -t nce-t-ctrl:mock -f Dockerfile . + cd ~/tfs-ctrl/ + docker buildx build -t nce-t-ctrl:mock -f ./src/tests/tools/mock_t_fan_ctrl/Dockerfile . echo "Cleaning up..." docker rm --force simap-server -- GitLab From 4b1643e42a4559a688f05163d843256a519ab62c Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 7 Sep 2025 13:35:30 +0000 Subject: [PATCH 178/367] ECOC F5GA Telemetry Demo: - Fixed build of mocks --- src/tests/ecoc25-f5ga-telemetry/redeploy.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests/ecoc25-f5ga-telemetry/redeploy.sh b/src/tests/ecoc25-f5ga-telemetry/redeploy.sh index 66cd43d0b..ba942cb01 100755 --- a/src/tests/ecoc25-f5ga-telemetry/redeploy.sh +++ b/src/tests/ecoc25-f5ga-telemetry/redeploy.sh @@ -33,7 +33,7 @@ case "$HOSTNAME" in echo "Building NCE-T Controller..." cd ~/tfs-ctrl/ - docker buildx build -t nce-t-ctrl:mock -f ./src/tests/tools/mock_t_fan_ctrl/Dockerfile . + docker buildx build -t nce-t-ctrl:mock -f ./src/tests/tools/mock_nce_t_ctrl/Dockerfile . echo "Cleaning up..." docker rm --force simap-server -- GitLab From 5c185cadbb6f7413bee0b803de7edb996490c4bf Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 7 Sep 2025 13:35:48 +0000 Subject: [PATCH 179/367] Tests - Tools - Mock NCE-FAN / NCE-T Controllers - Fixed Dockerfiles - Fixed SimapUpdater logic --- src/tests/tools/mock_nce_fan_ctrl/Dockerfile | 13 ++++++++----- .../mock_nce_fan_ctrl/nce_fan_ctrl/SimapUpdater.py | 4 ++++ src/tests/tools/mock_nce_t_ctrl/Dockerfile | 13 ++++++++----- .../mock_nce_t_ctrl/nce_t_ctrl/SimapUpdater.py | 4 ++++ 4 files changed, 24 insertions(+), 10 deletions(-) diff --git a/src/tests/tools/mock_nce_fan_ctrl/Dockerfile b/src/tests/tools/mock_nce_fan_ctrl/Dockerfile index 0a26cc096..b0a3da4e3 100644 --- a/src/tests/tools/mock_nce_fan_ctrl/Dockerfile +++ b/src/tests/tools/mock_nce_fan_ctrl/Dockerfile @@ -42,15 +42,18 @@ RUN python3 -m pip install --upgrade pip RUN python3 -m pip install --upgrade setuptools wheel RUN python3 -m pip install --upgrade pip-tools -# Create component sub-folders, get specific Python packages -RUN mkdir -p /var/teraflow/nce_fan_ctrl/ -WORKDIR /var/teraflow/nce_fan_ctrl/ +# Get specific Python packages +RUN mkdir -p /var/teraflow/ +WORKDIR /var/teraflow/ COPY src/common/tools/rest_conf/server/requirements.in ./requirements.in RUN pip-compile --quiet --output-file=requirements.txt requirements.in RUN python3 -m pip install -r requirements.txt -# Add component files into working directory -COPY src/common/tools/rest_conf/server/restconf_server/ ./nce_fan_ctrl/ +# Get component files +RUN mkdir -p /var/teraflow/common/tools/ +WORKDIR /var/teraflow/ +COPY src/common/tools/rest_api/ ./common/tools/rest_api/ +COPY src/common/tools/rest_conf/ ./common/tools/rest_conf/ COPY src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/*.py ./nce_fan_ctrl/ COPY src/tests/tools/mock_nce_fan_ctrl/yang/. ./yang/ COPY src/tests/tools/mock_nce_fan_ctrl/startup.json ./startup.json diff --git a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/SimapUpdater.py b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/SimapUpdater.py index 4d8cff662..cce0179c2 100644 --- a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/SimapUpdater.py +++ b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/SimapUpdater.py @@ -25,6 +25,8 @@ SIMAP_PORT = os.environ.get('SIMAP_PORT' ) class SimapUpdater: def __init__(self): + self._simap_client = None + if SIMAP_ADDRESS is None: return if SIMAP_PORT is None: return @@ -36,6 +38,8 @@ class SimapUpdater: def upload_topology(self, network_data : Dict) -> None: + if self._simap_client is None: return + network_id = network_data['network-id'] te_topo = self._simap_client.network(network_id) te_topo.update() diff --git a/src/tests/tools/mock_nce_t_ctrl/Dockerfile b/src/tests/tools/mock_nce_t_ctrl/Dockerfile index 0c99f35c3..60f7a7b06 100644 --- a/src/tests/tools/mock_nce_t_ctrl/Dockerfile +++ b/src/tests/tools/mock_nce_t_ctrl/Dockerfile @@ -42,15 +42,18 @@ RUN python3 -m pip install --upgrade pip RUN python3 -m pip install --upgrade setuptools wheel RUN python3 -m pip install --upgrade pip-tools -# Create component sub-folders, get specific Python packages -RUN mkdir -p /var/teraflow/nce_t_ctrl/ -WORKDIR /var/teraflow/nce_t_ctrl/ +# Get specific Python packages +RUN mkdir -p /var/teraflow/ +WORKDIR /var/teraflow/ COPY src/common/tools/rest_conf/server/requirements.in ./requirements.in RUN pip-compile --quiet --output-file=requirements.txt requirements.in RUN python3 -m pip install -r requirements.txt -# Add component files into working directory -COPY src/common/tools/rest_conf/server/restconf_server/ ./nce_t_ctrl/ +# Get component files +RUN mkdir -p /var/teraflow/common/tools/ +WORKDIR /var/teraflow/ +COPY src/common/tools/rest_api/ ./common/tools/rest_api/ +COPY src/common/tools/rest_conf/ ./common/tools/rest_conf/ COPY src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/*.py ./nce_t_ctrl/ COPY src/tests/tools/mock_nce_t_ctrl/yang/. ./yang/ COPY src/tests/tools/mock_nce_t_ctrl/startup.json ./startup.json diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/SimapUpdater.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/SimapUpdater.py index 4d8cff662..cce0179c2 100644 --- a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/SimapUpdater.py +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/SimapUpdater.py @@ -25,6 +25,8 @@ SIMAP_PORT = os.environ.get('SIMAP_PORT' ) class SimapUpdater: def __init__(self): + self._simap_client = None + if SIMAP_ADDRESS is None: return if SIMAP_PORT is None: return @@ -36,6 +38,8 @@ class SimapUpdater: def upload_topology(self, network_data : Dict) -> None: + if self._simap_client is None: return + network_id = network_data['network-id'] te_topo = self._simap_client.network(network_id) te_topo.update() -- GitLab From 6bf02ba4d5d2712f2b33ea0f2a934649c5d29af2 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 7 Sep 2025 13:42:54 +0000 Subject: [PATCH 180/367] Tests - Tools - Mock NCE-FAN / NCE-T Controllers - Fixed registration of endpoints --- src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/app.py | 2 ++ src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/app.py b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/app.py index 2bd587ce8..0b7648466 100644 --- a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/app.py +++ b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/app.py @@ -31,6 +31,8 @@ logging.getLogger('RestConfClient').setLevel(logging.WARN) LOGGER.info('Starting...') rcs_app = RestConfServerApplication() +rcs_app.register_host_meta() +rcs_app.register_restconf() LOGGER.info('All connectors registered') startup_data = rcs_app.get_startup_data() diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py index 2bd587ce8..0b7648466 100644 --- a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py @@ -31,6 +31,8 @@ logging.getLogger('RestConfClient').setLevel(logging.WARN) LOGGER.info('Starting...') rcs_app = RestConfServerApplication() +rcs_app.register_host_meta() +rcs_app.register_restconf() LOGGER.info('All connectors registered') startup_data = rcs_app.get_startup_data() -- GitLab From 19541fd324d687c1aa365776f0872b00eb80e817 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 7 Sep 2025 13:43:19 +0000 Subject: [PATCH 181/367] SIMAP Connector: - Updated SimapClient --- .../simap_client/RestConfClient.py | 191 ------------------ .../simap_updater/simap_client/SimapClient.py | 16 +- 2 files changed, 8 insertions(+), 199 deletions(-) delete mode 100644 src/simap_connector/service/simap_updater/simap_client/RestConfClient.py diff --git a/src/simap_connector/service/simap_updater/simap_client/RestConfClient.py b/src/simap_connector/service/simap_updater/simap_client/RestConfClient.py deleted file mode 100644 index b7c057a70..000000000 --- a/src/simap_connector/service/simap_updater/simap_client/RestConfClient.py +++ /dev/null @@ -1,191 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import enum, logging, requests -from requests.auth import HTTPBasicAuth -from typing import Any, Dict, Optional, Set - -class RestRequestMethod(enum.Enum): - GET = 'get' - POST = 'post' - PUT = 'put' - PATCH = 'patch' - DELETE = 'delete' - -EXPECTED_STATUS_CODES : Set[int] = { - requests.codes['OK' ], # 200 - OK - requests.codes['CREATED' ], # 201 - Created - requests.codes['ACCEPTED' ], # 202 - Accepted - requests.codes['NO_CONTENT'], # 204 - No Content -} - -def compose_basic_auth( - username : Optional[str] = None, password : Optional[str] = None -) -> Optional[HTTPBasicAuth]: - if username is None or password is None: return None - return HTTPBasicAuth(username, password) - -class SchemeEnum(enum.Enum): - HTTP = 'http' - HTTPS = 'https' - -def check_scheme(scheme : str) -> str: - str_scheme = str(scheme).lower() - enm_scheme = SchemeEnum._value2member_map_[str_scheme] - return enm_scheme.value - -HOST_META_URL = '{:s}://{:s}:{:d}/.well-known/host-meta' -RESTCONF_URL = '{:s}://{:s}:{:d}/{:s}' - -class RestConfClient: - def __init__( - self, address : str, port : int = 8080, scheme : str = 'http', - username : Optional[str] = None, password : Optional[str] = None, - timeout : int = 10, verify_certs : bool = True, allow_redirects : bool = True, - logger : Optional[logging.Logger] = None - ) -> None: - self._address = address - self._port = int(port) - self._scheme = check_scheme(scheme) - self._auth = compose_basic_auth(username=username, password=password) - self._base_url = '' - self._timeout = int(timeout) - self._verify_certs = verify_certs - self._allow_redirects = allow_redirects - self._logger = logger - - self._discover_base_url() - - def _discover_base_url(self) -> None: - host_meta_url = HOST_META_URL.format(self._scheme, self._address, self._port) - host_meta : Dict = self.get(host_meta_url, expected_status_codes={requests.codes['OK']}) - - links = host_meta.get('links') - if links is None: raise AttributeError('Missing attribute "links" in host-meta reply') - if not isinstance(links, list): raise AttributeError('Attribute "links" must be a list') - if len(links) != 1: raise AttributeError('Attribute "links" is expected to have exactly 1 item') - - link = links[0] - if not isinstance(link, dict): raise AttributeError('Attribute "links[0]" must be a dict') - - rel = link.get('rel') - if rel is None: raise AttributeError('Missing attribute "links[0].rel" in host-meta reply') - if not isinstance(rel, str): raise AttributeError('Attribute "links[0].rel" must be a str') - if rel != 'restconf': raise AttributeError('Attribute "links[0].rel" != "restconf"') - - href = link.get('href') - if href is None: raise AttributeError('Missing attribute "links[0]" in host-meta reply') - if not isinstance(href, str): raise AttributeError('Attribute "links[0].href" must be a str') - - self._base_url = str(href + '/data').replace('//', '/') - - def _log_msg_request( - self, method : RestRequestMethod, request_url : str, body : Optional[Any], - log_level : int = logging.INFO - ) -> str: - msg = 'Request: {:s} {:s}'.format(str(method.value).upper(), str(request_url)) - if body is not None: msg += ' body={:s}'.format(str(body)) - if self._logger is not None: self._logger.log(log_level, msg) - return msg - - def _log_msg_check_reply( - self, method : RestRequestMethod, request_url : str, body : Optional[Any], - reply : requests.Response, expected_status_codes : Set[int], - log_level : int = logging.INFO - ) -> str: - msg = 'Reply: {:s}'.format(str(reply.text)) - if self._logger is not None: self._logger.log(log_level, msg) - http_status_code = reply.status_code - if http_status_code in expected_status_codes: return msg - MSG = 'Request failed. method={:s} url={:s} body={:s} status_code={:s} reply={:s}' - msg = MSG.format( - str(method.value).upper(), str(request_url), str(body), - str(http_status_code), str(reply.text) - ) - self._logger.error(msg) - raise Exception(msg) - - def _do_rest_request( - self, method : RestRequestMethod, endpoint : str, body : Optional[Any] = None, - expected_status_codes : Set[int] = EXPECTED_STATUS_CODES - ) -> Optional[Any]: - candidate_schemes = tuple(['{:s}://'.format(m).lower() for m in SchemeEnum.__members__.keys()]) - if endpoint.lower().startswith(candidate_schemes): - request_url = endpoint.lstrip('/') - else: - endpoint = str(self._base_url + '/' + endpoint).replace('//', '/').lstrip('/') - request_url = '{:s}://{:s}:{:d}/{:s}'.format( - self._scheme, self._address, self._port, endpoint.lstrip('/') - ) - self._log_msg_request(method, request_url, body) - try: - headers = {'accept': 'application/json'} - reply = requests.request( - method.value, request_url, headers=headers, json=body, - auth=self._auth, verify=self._verify_certs, timeout=self._timeout, - allow_redirects=self._allow_redirects - ) - except Exception as e: - MSG = 'Request failed. method={:s} url={:s} body={:s}' - msg = MSG.format(str(method.value).upper(), request_url, str(body)) - self._logger.exception(msg) - raise Exception(msg) from e - self._log_msg_check_reply(method, request_url, body, reply, expected_status_codes) - if reply.content and len(reply.content) > 0: return reply.json() - return None - - def get( - self, endpoint : str, - expected_status_codes : Set[int] = {requests.codes['OK']} - ) -> Optional[Any]: - return self._do_rest_request( - RestRequestMethod.GET, endpoint, - expected_status_codes=expected_status_codes - ) - - def post( - self, endpoint : str, body : Optional[Any] = None, - expected_status_codes : Set[int] = {requests.codes['CREATED']} - ) -> Optional[Any]: - return self._do_rest_request( - RestRequestMethod.POST, endpoint, body=body, - expected_status_codes=expected_status_codes - ) - - def put( - self, endpoint : str, body : Optional[Any] = None, - expected_status_codes : Set[int] = {requests.codes['CREATED'], requests.codes['NO_CONTENT']} - ) -> Optional[Any]: - return self._do_rest_request( - RestRequestMethod.PUT, endpoint, body=body, - expected_status_codes=expected_status_codes - ) - - def patch( - self, endpoint : str, body : Optional[Any] = None, - expected_status_codes : Set[int] = {requests.codes['NO_CONTENT']} - ) -> Optional[Any]: - return self._do_rest_request( - RestRequestMethod.PATCH, endpoint, body=body, - expected_status_codes=expected_status_codes - ) - - def delete( - self, endpoint : str, body : Optional[Any] = None, - expected_status_codes : Set[int] = {requests.codes['NO_CONTENT']} - ) -> Optional[Any]: - return self._do_rest_request( - RestRequestMethod.DELETE, endpoint, body=body, - expected_status_codes=expected_status_codes - ) diff --git a/src/simap_connector/service/simap_updater/simap_client/SimapClient.py b/src/simap_connector/service/simap_updater/simap_client/SimapClient.py index b4c27d43a..8f457d452 100644 --- a/src/simap_connector/service/simap_updater/simap_client/SimapClient.py +++ b/src/simap_connector/service/simap_updater/simap_client/SimapClient.py @@ -14,12 +14,12 @@ from typing import Dict, List, Tuple -from .RestConfClient import RestConfClient +from common.tools.rest_conf.client.RestConfClient import RestConfClient class TerminationPoint: - ENDPOINT_NO_ID = '/ietf-network:networks/network[network-id="{:s}"]/node[node-id="{:s}"]' - ENDPOINT_ID = ENDPOINT_NO_ID + '/ietf-network-topology:termination-point[tp-id="{:s}"]' + ENDPOINT_NO_ID = '/ietf-network:networks/network={:s}/node={:s}' + ENDPOINT_ID = ENDPOINT_NO_ID + '/ietf-network-topology:termination-point={:s}' def __init__(self, restconf_client : RestConfClient, network_id : str, node_id : str, tp_id : str): self._restconf_client = restconf_client @@ -63,8 +63,8 @@ class TerminationPoint: self._restconf_client.delete(endpoint) class Node: - ENDPOINT_NO_ID = '/ietf-network:networks/network[network-id="{:s}"]' - ENDPOINT_ID = ENDPOINT_NO_ID + '/node[node-id="{:s}"]' + ENDPOINT_NO_ID = '/ietf-network:networks/network={:s}' + ENDPOINT_ID = ENDPOINT_NO_ID + '/node={:s}' def __init__(self, restconf_client : RestConfClient, network_id : str, node_id : str): self._restconf_client = restconf_client @@ -120,8 +120,8 @@ class Node: self._restconf_client.delete(endpoint) class Link: - ENDPOINT_NO_ID = '/ietf-network:networks/network[network-id="{:s}"]' - ENDPOINT_ID = ENDPOINT_NO_ID + '/ietf-network-topology:link[link-id="{:s}"]' + ENDPOINT_NO_ID = '/ietf-network:networks/network={:s}' + ENDPOINT_ID = ENDPOINT_NO_ID + '/ietf-network-topology:link={:s}' def __init__(self, restconf_client : RestConfClient, network_id : str, link_id : str): self._restconf_client = restconf_client @@ -172,7 +172,7 @@ class Link: class Network: ENDPOINT_NO_ID = '/ietf-network:networks' - ENDPOINT_ID = ENDPOINT_NO_ID + '/network[network-id="{:s}"]' + ENDPOINT_ID = ENDPOINT_NO_ID + '/network={:s}' def __init__(self, restconf_client : RestConfClient, network_id : str): self._restconf_client = restconf_client -- GitLab From 642f4202d48a1b2224a7ee591cb5a823ff1b1589 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 7 Sep 2025 15:16:43 +0000 Subject: [PATCH 182/367] SIMAP Connector: - Fixed imports --- src/simap_connector/service/simap_updater/SimapUpdater.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 61c1f9637..8b42640d6 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -21,12 +21,12 @@ from common.proto.context_pb2 import ContextEvent, DeviceEvent, Empty, LinkEvent from common.tools.grpc.BaseEventCollector import BaseEventCollector from common.tools.grpc.BaseEventDispatcher import BaseEventDispatcher from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.rest_conf.client.RestConfClient import RestConfClient from context.client.ContextClient import ContextClient from simap_connector.Config import ( SIMAP_SERVER_SCHEME, SIMAP_SERVER_ADDRESS, SIMAP_SERVER_PORT, SIMAP_SERVER_USERNAME, SIMAP_SERVER_PASSWORD, ) -from .simap_client.RestConfClient import RestConfClient from .simap_client.SimapClient import SimapClient from .ObjectCache import CachedEntities, ObjectCache from .Tools import get_device_endpoint, get_link_endpoint, get_service_endpoint -- GitLab From df9f3483a6f94826f5365d4e10a5f580e9c559cd Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 7 Sep 2025 16:27:41 +0000 Subject: [PATCH 183/367] SIMAP Connector: - Implemented provisional mocks for testing - Code styling --- .../service/simap_updater/MockSimaps.py | 143 +++++++++++ .../{simap_client => }/SimapClient.py | 0 .../service/simap_updater/SimapUpdater.py | 222 ++++++++++-------- .../simap_updater/simap_client/__init__.py | 14 -- .../simap_updater/simap_client/__main__.py | 128 ---------- 5 files changed, 263 insertions(+), 244 deletions(-) create mode 100644 src/simap_connector/service/simap_updater/MockSimaps.py rename src/simap_connector/service/simap_updater/{simap_client => }/SimapClient.py (100%) delete mode 100644 src/simap_connector/service/simap_updater/simap_client/__init__.py delete mode 100644 src/simap_connector/service/simap_updater/simap_client/__main__.py diff --git a/src/simap_connector/service/simap_updater/MockSimaps.py b/src/simap_connector/service/simap_updater/MockSimaps.py new file mode 100644 index 000000000..6b6fe62ee --- /dev/null +++ b/src/simap_connector/service/simap_updater/MockSimaps.py @@ -0,0 +1,143 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging +from .SimapClient import SimapClient + + +LOGGER = logging.getLogger(__name__) + + +def set_simap_e2e_net(simap_client : SimapClient) -> None: + simap = simap_client.network('simap-e2e') + simap.create(supporting_network_ids=['admin', 'simap-aggnet']) + + node_a = simap.node('sdp1') + node_a.create(supporting_node_ids=[('admin', 'ONT1')]) + node_a.termination_point('200').create(supporting_termination_point_ids=[('admin', 'ONT1', '200')]) + node_a.termination_point('500').create(supporting_termination_point_ids=[('admin', 'ONT1', '500')]) + + node_b = simap.node('sdp2') + node_b.create(supporting_node_ids=[('admin', 'POP2')]) + node_b.termination_point('200').create(supporting_termination_point_ids=[('admin', 'POP2', '200')]) + node_b.termination_point('201').create(supporting_termination_point_ids=[('admin', 'POP2', '201')]) + node_b.termination_point('500').create(supporting_termination_point_ids=[('admin', 'POP2', '500')]) + + link = simap.link('E2E-L1') + link.create( + 'sdp1', '500', 'sdp2', '500', + supporting_link_ids=[ + ('admin', 'L1'), ('admin', 'L3'), ('simap-aggnet', 'AggNet-L1') + ] + ) + + +def delete_simap_e2e_net(simap_client : SimapClient) -> None: + simap = simap_client.network('simap-e2e') + simap.create(supporting_network_ids=['admin', 'simap-aggnet']) + + link = simap.link('E2E-L1') + link.delete() + + +def set_simap_agg_net(simap_client : SimapClient) -> None: + simap = simap_client.network('simap-aggnet') + simap.create(supporting_network_ids=['admin', 'simap-trans']) + + node_a = simap.node('sdp1') + node_a.create(supporting_node_ids=[('admin', 'OLT')]) + node_a.termination_point('200').create(supporting_termination_point_ids=[('admin', 'OLT', '200')]) + node_a.termination_point('201').create(supporting_termination_point_ids=[('admin', 'OLT', '201')]) + node_a.termination_point('500').create(supporting_termination_point_ids=[('admin', 'OLT', '500')]) + node_a.termination_point('501').create(supporting_termination_point_ids=[('admin', 'OLT', '501')]) + + node_b = simap.node('sdp2') + node_b.create(supporting_node_ids=[('admin', 'POP2')]) + node_b.termination_point('200').create(supporting_termination_point_ids=[('admin', 'POP2', '200')]) + node_b.termination_point('201').create(supporting_termination_point_ids=[('admin', 'POP2', '201')]) + node_b.termination_point('500').create(supporting_termination_point_ids=[('admin', 'POP2', '500')]) + + link = simap.link('AggNet-L1') + link.create( + 'sdp1', '500', 'sdp2', '500', + supporting_link_ids=[ + ('simap-trans-pkt', 'Trans-L1'), ('admin', 'L13') + ] + ) + + +def delete_simap_agg_net(simap_client : SimapClient) -> None: + simap = simap_client.network('simap-aggnet') + simap.create(supporting_network_ids=['admin', 'simap-trans']) + + link = simap.link('AggNet-L1') + link.delete() + + +def set_simap_trans_pkt(simap_client : SimapClient) -> None: + simap = simap_client.network('simap-trans-pkt') + simap.update(supporting_network_ids=['admin']) + + node_a = simap.node('site1') + node_a.update(supporting_node_ids=[('admin', 'P-PE1')]) + node_a.termination_point('200').update(supporting_termination_point_ids=[('admin', 'P-PE1', '200')]) + node_a.termination_point('500').update(supporting_termination_point_ids=[('admin', 'P-PE1', '500')]) + node_a.termination_point('501').update(supporting_termination_point_ids=[('admin', 'P-PE1', '501')]) + + node_b = simap.node('site2') + node_b.update(supporting_node_ids=[('admin', 'P-PE2')]) + node_b.termination_point('200').update(supporting_termination_point_ids=[('admin', 'P-PE2', '200')]) + node_b.termination_point('500').update(supporting_termination_point_ids=[('admin', 'P-PE2', '500')]) + node_b.termination_point('501').update(supporting_termination_point_ids=[('admin', 'P-PE2', '501')]) + + link = simap.link('Trans-L1') + link.update( + 'site1', '500', 'site2', '500', + supporting_link_ids=[ + ('admin', 'L5'), ('admin', 'L9') + ] + ) + + +def delete_simap_trans_pkt(simap_client : SimapClient) -> None: + simap = simap_client.network('simap-trans-pkt') + simap.update(supporting_network_ids=['admin']) + + link = simap.link('Trans-L1') + link.delete() + + +def set_mock_simap(simap_client : SimapClient, domain_name : str) -> None: + if domain_name == 'trans-pkt': + set_simap_trans_pkt(simap_client) + elif domain_name == 'agg-net': + set_simap_agg_net(simap_client) + elif domain_name == 'e2e-net': + set_simap_e2e_net(simap_client) + else: + MSG = 'Unsupported Topology({:s}) to set SIMAP' + LOGGER.warning(MSG.format(str(domain_name))) + + +def delete_mock_simap(simap_client : SimapClient, domain_name : str) -> None: + if domain_name == 'trans-pkt': + delete_simap_trans_pkt(simap_client) + elif domain_name == 'agg-net': + delete_simap_agg_net(simap_client) + elif domain_name == 'e2e-net': + delete_simap_e2e_net(simap_client) + else: + MSG = 'Unsupported Topology({:s}) to delete SIMAP' + LOGGER.warning(MSG.format(str(domain_name))) diff --git a/src/simap_connector/service/simap_updater/simap_client/SimapClient.py b/src/simap_connector/service/simap_updater/SimapClient.py similarity index 100% rename from src/simap_connector/service/simap_updater/simap_client/SimapClient.py rename to src/simap_connector/service/simap_updater/SimapClient.py diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 8b42640d6..5731a9fa5 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -17,7 +17,9 @@ import logging, queue, threading, uuid from typing import Any, Optional, Set from common.Constants import DEFAULT_TOPOLOGY_NAME from common.DeviceTypes import DeviceTypeEnum -from common.proto.context_pb2 import ContextEvent, DeviceEvent, Empty, LinkEvent, ServiceEvent, SliceEvent, TopologyEvent +from common.proto.context_pb2 import ( + ContextEvent, DeviceEvent, Empty, LinkEvent, ServiceEvent, SliceEvent, TopologyEvent +) from common.tools.grpc.BaseEventCollector import BaseEventCollector from common.tools.grpc.BaseEventDispatcher import BaseEventDispatcher from common.tools.grpc.Tools import grpc_message_to_json_string @@ -27,7 +29,8 @@ from simap_connector.Config import ( SIMAP_SERVER_SCHEME, SIMAP_SERVER_ADDRESS, SIMAP_SERVER_PORT, SIMAP_SERVER_USERNAME, SIMAP_SERVER_PASSWORD, ) -from .simap_client.SimapClient import SimapClient +from simap_connector.service.simap_updater.MockSimaps import delete_mock_simap, set_mock_simap +from .SimapClient import SimapClient from .ObjectCache import CachedEntities, ObjectCache from .Tools import get_device_endpoint, get_link_endpoint, get_service_endpoint @@ -427,21 +430,21 @@ class EventDispatcher(BaseEventDispatcher): except: # pylint: disable=bare-except pass - topology_uuid, endpoint_uuids = get_service_endpoint(service) + #topology_uuid, endpoint_uuids = get_service_endpoint(service) - if topology_uuid is None: - MSG = 'ServiceEvent({:s}) skipped, no endpoint_ids to identify topology: {:s}' - str_service_event = grpc_message_to_json_string(service_event) - str_service = grpc_message_to_json_string(service) - LOGGER.warning(MSG.format(str_service_event, str_service)) - return False + #if topology_uuid is None: + # MSG = 'ServiceEvent({:s}) skipped, no endpoint_ids to identify topology: {:s}' + # str_service_event = grpc_message_to_json_string(service_event) + # str_service = grpc_message_to_json_string(service) + # LOGGER.warning(MSG.format(str_service_event, str_service)) + # return False - if len(endpoint_uuids) < 2: - MSG = 'ServiceEvent({:s}) skipped, not enough endpoint_ids to compose link: {:s}' - str_service_event = grpc_message_to_json_string(service_event) - str_service = grpc_message_to_json_string(service) - LOGGER.warning(MSG.format(str_service_event, str_service)) - return False + #if len(endpoint_uuids) < 2: + # MSG = 'ServiceEvent({:s}) skipped, not enough endpoint_ids to compose link: {:s}' + # str_service_event = grpc_message_to_json_string(service_event) + # str_service = grpc_message_to_json_string(service) + # LOGGER.warning(MSG.format(str_service_event, str_service)) + # return False topologies = self._object_cache.get_all(CachedEntities.TOPOLOGY, fresh=False) topology_names = {t.name for t in topologies} @@ -453,55 +456,57 @@ class EventDispatcher(BaseEventDispatcher): return False domain_name = topology_names.pop() # trans-pkt/agg-net/e2e-net - domain_topo = self._simap_client.network(domain_name) - domain_topo.update() - - src_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[0][0], auto_retrieve=False) - src_endpoint = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[0]), auto_retrieve=False) - dst_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[1][0], auto_retrieve=False) - dst_endpoint = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[1]), auto_retrieve=False) - - try: - if src_device is None: - MSG = 'Device({:s}) not found in cache' - raise Exception(MSG.format(str(endpoint_uuids[0][0]))) - if src_endpoint is None: - MSG = 'Endpoint({:s}) not found in cache' - raise Exception(MSG.format(str(endpoint_uuids[0]))) - if dst_device is None: - MSG = 'Device({:s}) not found in cache' - raise Exception(MSG.format(str(endpoint_uuids[1][0]))) - if dst_endpoint is None: - MSG = 'Endpoint({:s}) not found in cache' - raise Exception(MSG.format(str(endpoint_uuids[1]))) - except Exception as e: - MSG = '{:s} in Service({:s})' - raise Exception(MSG.format(str(e), grpc_message_to_json_string(service))) from e - - src_dev_name = src_device.name - src_ep_name = src_endpoint.name - dst_dev_name = dst_device.name - dst_ep_name = dst_endpoint.name - - parent_domain_name = DEFAULT_TOPOLOGY_NAME # TODO: compute from service settings - - site_1_name = 'site1' # TODO: compute from service settings - site_1 = domain_topo.node(site_1_name) - site_1.update(supporting_node_ids=[(parent_domain_name, src_dev_name)]) - site_1_tp = site_1.termination_point(src_ep_name) - site_1_tp.update(supporting_termination_point_ids=[(parent_domain_name, src_dev_name, src_ep_name)]) - - site_2_name = 'site2' # TODO: compute from service settings - site_2 = domain_topo.node(site_2_name) - site_2.update(supporting_node_ids=[(parent_domain_name, dst_dev_name)]) - site_2_tp = site_2.termination_point(dst_ep_name) - site_2_tp.update(supporting_termination_point_ids=[(parent_domain_name, dst_dev_name, dst_ep_name)]) - - link_name = '{:s}:{:s}-{:s}=={:s}-{:s}'.format( - service_name, src_dev_name, src_ep_name, dst_dev_name, dst_ep_name - ) - dom_link = domain_topo.link(link_name) - dom_link.update(src_dev_name, src_ep_name, dst_dev_name, dst_ep_name) + set_mock_simap(self._simap_client, domain_name) + + #domain_topo = self._simap_client.network(domain_name) + #domain_topo.update() + + #src_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[0][0], auto_retrieve=False) + #src_endpoint = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[0]), auto_retrieve=False) + #dst_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[1][0], auto_retrieve=False) + #dst_endpoint = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[1]), auto_retrieve=False) + + #try: + # if src_device is None: + # MSG = 'Device({:s}) not found in cache' + # raise Exception(MSG.format(str(endpoint_uuids[0][0]))) + # if src_endpoint is None: + # MSG = 'Endpoint({:s}) not found in cache' + # raise Exception(MSG.format(str(endpoint_uuids[0]))) + # if dst_device is None: + # MSG = 'Device({:s}) not found in cache' + # raise Exception(MSG.format(str(endpoint_uuids[1][0]))) + # if dst_endpoint is None: + # MSG = 'Endpoint({:s}) not found in cache' + # raise Exception(MSG.format(str(endpoint_uuids[1]))) + #except Exception as e: + # MSG = '{:s} in Service({:s})' + # raise Exception(MSG.format(str(e), grpc_message_to_json_string(service))) from e + + #src_dev_name = src_device.name + #src_ep_name = src_endpoint.name + #dst_dev_name = dst_device.name + #dst_ep_name = dst_endpoint.name + + #parent_domain_name = DEFAULT_TOPOLOGY_NAME # TODO: compute from service settings + + #site_1_name = 'site1' # TODO: compute from service settings + #site_1 = domain_topo.node(site_1_name) + #site_1.update(supporting_node_ids=[(parent_domain_name, src_dev_name)]) + #site_1_tp = site_1.termination_point(src_ep_name) + #site_1_tp.update(supporting_termination_point_ids=[(parent_domain_name, src_dev_name, src_ep_name)]) + + #site_2_name = 'site2' # TODO: compute from service settings + #site_2 = domain_topo.node(site_2_name) + #site_2.update(supporting_node_ids=[(parent_domain_name, dst_dev_name)]) + #site_2_tp = site_2.termination_point(dst_ep_name) + #site_2_tp.update(supporting_termination_point_ids=[(parent_domain_name, dst_dev_name, dst_ep_name)]) + + #link_name = '{:s}:{:s}-{:s}=={:s}-{:s}'.format( + # service_name, src_dev_name, src_ep_name, dst_dev_name, dst_ep_name + #) + #dom_link = domain_topo.link(link_name) + #dom_link.update(src_dev_name, src_ep_name, dst_dev_name, dst_ep_name) return True @@ -527,6 +532,17 @@ class EventDispatcher(BaseEventDispatcher): service = self._object_cache.get(CachedEntities.SERVICE, service_uuid) service_name = service.name + try: + uuid.UUID(hex=service_name) + # skip it if properly parsed, means it is a service with a UUID-based name, i.e., a sub-service + MSG = 'ServiceEvent({:s}) skipped, it is a subservice: {:s}' + str_service_event = grpc_message_to_json_string(service_event) + str_service = grpc_message_to_json_string(service) + LOGGER.warning(MSG.format(str_service_event, str_service)) + return False + except: # pylint: disable=bare-except + pass + topology_uuid, endpoint_uuids = get_service_endpoint(service) if topology_uuid is None: MSG = 'ServiceEvent({:s}) skipped, no endpoint_ids to identify topology: {:s}' @@ -545,44 +561,46 @@ class EventDispatcher(BaseEventDispatcher): return domain_name = topology_names.pop() # trans-pkt/agg-net/e2e-net - domain_topo = self._simap_client.network(domain_name) - domain_topo.update() - - src_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[0][0], auto_retrieve=False) - src_endpoint = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[0]), auto_retrieve=False) - dst_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[1][0], auto_retrieve=False) - dst_endpoint = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[1]), auto_retrieve=False) - - try: - if src_device is None: - MSG = 'Device({:s}) not found in cache' - raise Exception(MSG.format(str(endpoint_uuids[0][0]))) - if src_endpoint is None: - MSG = 'Endpoint({:s}) not found in cache' - raise Exception(MSG.format(str(endpoint_uuids[0]))) - if dst_device is None: - MSG = 'Device({:s}) not found in cache' - raise Exception(MSG.format(str(endpoint_uuids[1][0]))) - if dst_endpoint is None: - MSG = 'Endpoint({:s}) not found in cache' - raise Exception(MSG.format(str(endpoint_uuids[1]))) - except Exception as e: - MSG = '{:s} in Service({:s})' - raise Exception(MSG.format(str(e), grpc_message_to_json_string(service))) from e - - src_dev_name = src_device.name - src_ep_name = src_endpoint.name - dst_dev_name = dst_device.name - dst_ep_name = dst_endpoint.name - - link_name = '{:s}:{:s}-{:s}=={:s}-{:s}'.format( - service_name, src_dev_name, src_ep_name, dst_dev_name, dst_ep_name - ) - te_link = domain_topo.link(link_name) - te_link.delete() - - self._object_cache.delete(CachedEntities.SERVICE, service_uuid) - self._object_cache.delete(CachedEntities.SERVICE, service_name) + delete_mock_simap(self._simap_client, domain_name) + + #domain_topo = self._simap_client.network(domain_name) + #domain_topo.update() + + #src_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[0][0], auto_retrieve=False) + #src_endpoint = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[0]), auto_retrieve=False) + #dst_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[1][0], auto_retrieve=False) + #dst_endpoint = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[1]), auto_retrieve=False) + + #try: + # if src_device is None: + # MSG = 'Device({:s}) not found in cache' + # raise Exception(MSG.format(str(endpoint_uuids[0][0]))) + # if src_endpoint is None: + # MSG = 'Endpoint({:s}) not found in cache' + # raise Exception(MSG.format(str(endpoint_uuids[0]))) + # if dst_device is None: + # MSG = 'Device({:s}) not found in cache' + # raise Exception(MSG.format(str(endpoint_uuids[1][0]))) + # if dst_endpoint is None: + # MSG = 'Endpoint({:s}) not found in cache' + # raise Exception(MSG.format(str(endpoint_uuids[1]))) + #except Exception as e: + # MSG = '{:s} in Service({:s})' + # raise Exception(MSG.format(str(e), grpc_message_to_json_string(service))) from e + + #src_dev_name = src_device.name + #src_ep_name = src_endpoint.name + #dst_dev_name = dst_device.name + #dst_ep_name = dst_endpoint.name + + #link_name = '{:s}:{:s}-{:s}=={:s}-{:s}'.format( + # service_name, src_dev_name, src_ep_name, dst_dev_name, dst_ep_name + #) + #te_link = domain_topo.link(link_name) + #te_link.delete() + + #self._object_cache.delete(CachedEntities.SERVICE, service_uuid) + #self._object_cache.delete(CachedEntities.SERVICE, service_name) MSG = 'Logical Link Removed for Service: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(service_event))) diff --git a/src/simap_connector/service/simap_updater/simap_client/__init__.py b/src/simap_connector/service/simap_updater/simap_client/__init__.py deleted file mode 100644 index 3ccc21c7d..000000000 --- a/src/simap_connector/service/simap_updater/simap_client/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - diff --git a/src/simap_connector/service/simap_updater/simap_client/__main__.py b/src/simap_connector/service/simap_updater/simap_client/__main__.py deleted file mode 100644 index 3aecad42e..000000000 --- a/src/simap_connector/service/simap_updater/simap_client/__main__.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import json, logging -from .RestConfClient import RestConfClient -from .SimapClient import SimapClient - -logging.basicConfig(level=logging.INFO) -LOGGER = logging.getLogger(__name__) - -logging.getLogger('RestConfClient').setLevel(logging.WARN) - -def main() -> None: - restconf_client = RestConfClient('127.0.0.1', logger=logging.getLogger('RestConfClient')) - simap_client = SimapClient(restconf_client) - - te_topo = simap_client.network('te') - te_topo.create() - - te_topo.node('ONT1').create(termination_point_ids=['200', '500']) - te_topo.node('ONT2').create(termination_point_ids=['200', '500']) - te_topo.node('OLT' ).create(termination_point_ids=['200', '201', '500', '501']) - te_topo.link('L1').create('ONT1', '500', 'OLT', '200') - te_topo.link('L2').create('ONT2', '500', 'OLT', '201') - - te_topo.node('PE1').create(termination_point_ids=['200', '500', '501']) - te_topo.node('P1' ).create(termination_point_ids=['500', '501']) - te_topo.node('P2' ).create(termination_point_ids=['500', '501']) - te_topo.node('PE2').create(termination_point_ids=['200', '500', '501']) - te_topo.link('L5' ).create('PE1', '500', 'P1', '500') - te_topo.link('L6' ).create('PE1', '501', 'P2', '500') - te_topo.link('L9' ).create('P1', '501', 'PE2', '500') - te_topo.link('L10').create('P2', '501', 'PE2', '501') - - te_topo.node('OA' ).create(termination_point_ids=['200', '500', '501']) - te_topo.node('OTN1').create(termination_point_ids=['500', '501']) - te_topo.node('OTN2').create(termination_point_ids=['500', '501']) - te_topo.node('OE' ).create(termination_point_ids=['200', '500', '501']) - te_topo.link('L7' ).create('OA', '500', 'OTN1', '500') - te_topo.link('L8' ).create('OA', '501', 'OTN2', '500') - te_topo.link('L11' ).create('OTN1', '501', 'OE', '500') - te_topo.link('L12' ).create('OTN2', '501', 'OE', '501') - - te_topo.link('L3').create('OLT', '500', 'PE1', '200') - te_topo.link('L4').create('OLT', '501', 'OA', '200') - - te_topo.node('POP1').create(termination_point_ids=['200', '201', '500']) - te_topo.link('L13').create('PE2', '200', 'POP1', '500') - - te_topo.node('POP2').create(termination_point_ids=['200', '201', '500']) - te_topo.link('L14').create('OE', '200', 'POP2', '500') - - - - simap_trans = simap_client.network('simap-trans') - simap_trans.create(supporting_network_ids=['te']) - - site_1 = simap_trans.node('site1') - site_1.create(supporting_node_ids=[('te', 'PE1')]) - site_1.termination_point('200').create(supporting_termination_point_ids=[('te', 'PE1', '200')]) - site_1.termination_point('500').create(supporting_termination_point_ids=[('te', 'PE1', '500')]) - site_1.termination_point('501').create(supporting_termination_point_ids=[('te', 'PE1', '501')]) - - site_2 = simap_trans.node('site2') - site_2.create(supporting_node_ids=[('te', 'PE2')]) - site_2.termination_point('200').create(supporting_termination_point_ids=[('te', 'PE2', '200')]) - site_2.termination_point('500').create(supporting_termination_point_ids=[('te', 'PE2', '500')]) - site_2.termination_point('501').create(supporting_termination_point_ids=[('te', 'PE2', '501')]) - - simap_trans.link('Trans-L1').create('site1', '500', 'site2', '500', supporting_link_ids=[('te', 'L5'), ('te', 'L9')]) - - - - - simap_aggnet = simap_client.network('simap-aggnet') - simap_aggnet.create(supporting_network_ids=['te', 'simap-trans']) - - sdp_1 = simap_aggnet.node('sdp1') - sdp_1.create(supporting_node_ids=[('te', 'OLT')]) - sdp_1.termination_point('200').create(supporting_termination_point_ids=[('te', 'OLT', '200')]) - sdp_1.termination_point('201').create(supporting_termination_point_ids=[('te', 'OLT', '201')]) - sdp_1.termination_point('500').create(supporting_termination_point_ids=[('te', 'OLT', '500')]) - sdp_1.termination_point('501').create(supporting_termination_point_ids=[('te', 'OLT', '501')]) - - sdp_2 = simap_aggnet.node('sdp2') - sdp_2.create(supporting_node_ids=[('te', 'POP1')]) - sdp_2.termination_point('200').create(supporting_termination_point_ids=[('te', 'POP1', '200')]) - sdp_2.termination_point('201').create(supporting_termination_point_ids=[('te', 'POP1', '201')]) - sdp_2.termination_point('500').create(supporting_termination_point_ids=[('te', 'POP1', '500')]) - - simap_aggnet.link('AggNet-L1').create('sdp1', '500', 'sdp2', '500', supporting_link_ids=[('te', 'L3'), ('simap-trans', 'Trans-L1'), ('te', 'L13')]) - - - - - simap_e2e = simap_client.network('simap-e2e') - simap_e2e.create(supporting_network_ids=['te', 'simap-trans']) - - sdp_1 = simap_e2e.node('sdp1') - sdp_1.create(supporting_node_ids=[('te', 'ONT1')]) - sdp_1.termination_point('200').create(supporting_termination_point_ids=[('te', 'ONT1', '200')]) - sdp_1.termination_point('500').create(supporting_termination_point_ids=[('te', 'ONT1', '500')]) - - sdp_2 = simap_e2e.node('sdp2') - sdp_2.create(supporting_node_ids=[('te', 'POP1')]) - sdp_2.termination_point('200').create(supporting_termination_point_ids=[('te', 'POP1', '200')]) - sdp_2.termination_point('201').create(supporting_termination_point_ids=[('te', 'POP1', '201')]) - sdp_2.termination_point('500').create(supporting_termination_point_ids=[('te', 'POP1', '500')]) - - simap_e2e.link('E2E-L1').create('sdp1', '500', 'sdp2', '500', supporting_link_ids=[('te', 'L1'), ('simap-aggnet', 'AggNet-L1')]) - - - print('networks=', json.dumps(simap_client.networks())) - -if __name__ == '__main__': - main() -- GitLab From 05ab026c1c4cdf65a8f25620250aa2008cfac780 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 7 Sep 2025 16:28:30 +0000 Subject: [PATCH 184/367] NBI Component - SSE Telemetry: - Added SIAMP interrogation - Code Styling - Disabled old code --- src/nbi/service/sse_telemetry/SimapClient.py | 242 ++++++++++++++++++ .../sse_telemetry/create_subscription.py | 7 +- .../sse_telemetry/database/Subscription.py | 4 +- src/nbi/service/sse_telemetry/database_tmp.py | 16 -- src/nbi/service/sse_telemetry/topology.py | 136 +++++----- 5 files changed, 320 insertions(+), 85 deletions(-) create mode 100644 src/nbi/service/sse_telemetry/SimapClient.py delete mode 100644 src/nbi/service/sse_telemetry/database_tmp.py diff --git a/src/nbi/service/sse_telemetry/SimapClient.py b/src/nbi/service/sse_telemetry/SimapClient.py new file mode 100644 index 000000000..8f457d452 --- /dev/null +++ b/src/nbi/service/sse_telemetry/SimapClient.py @@ -0,0 +1,242 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from typing import Dict, List, Tuple +from common.tools.rest_conf.client.RestConfClient import RestConfClient + + +class TerminationPoint: + ENDPOINT_NO_ID = '/ietf-network:networks/network={:s}/node={:s}' + ENDPOINT_ID = ENDPOINT_NO_ID + '/ietf-network-topology:termination-point={:s}' + + def __init__(self, restconf_client : RestConfClient, network_id : str, node_id : str, tp_id : str): + self._restconf_client = restconf_client + self._network_id = network_id + self._node_id = node_id + self._tp_id = tp_id + + def create(self, supporting_termination_point_ids : List[Tuple[str, str, str]] = []) -> None: + endpoint = TerminationPoint.ENDPOINT_ID.format(self._network_id, self._node_id, self._tp_id) + tp = {'tp-id': self._tp_id} + stps = [ + {'network-ref': snet_id, 'node-ref': snode_id, 'tp-ref': stp_id} + for snet_id,snode_id,stp_id in supporting_termination_point_ids + ] + if len(stps) > 0: tp['supporting-termination-point'] = stps + node = {'node-id': self._node_id, 'ietf-network-topology:termination-point': [tp]} + network = {'network-id': self._network_id, 'node': [node]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.post(endpoint, payload) + + def get(self) -> Dict: + endpoint = TerminationPoint.ENDPOINT_ID.format(self._network_id, self._node_id, self._tp_id) + node : Dict = self._restconf_client.get(endpoint) + return node['ietf-network-topology:termination-point'][0] + + def update(self, supporting_termination_point_ids : List[Tuple[str, str, str]] = []) -> None: + endpoint = TerminationPoint.ENDPOINT_ID.format(self._network_id, self._node_id, self._tp_id) + tp = {'tp-id': self._tp_id} + stps = [ + {'network-ref': snet_id, 'node-ref': snode_id, 'tp-ref': stp_id} + for snet_id,snode_id,stp_id in supporting_termination_point_ids + ] + if len(stps) > 0: tp['supporting-termination-point'] = stps + node = {'node-id': self._node_id, 'ietf-network-topology:termination-point': [tp]} + network = {'network-id': self._network_id, 'node': [node]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.patch(endpoint, payload) + + def delete(self) -> None: + endpoint = TerminationPoint.ENDPOINT_ID.format(self._network_id, self._node_id, self._tp_id) + self._restconf_client.delete(endpoint) + +class Node: + ENDPOINT_NO_ID = '/ietf-network:networks/network={:s}' + ENDPOINT_ID = ENDPOINT_NO_ID + '/node={:s}' + + def __init__(self, restconf_client : RestConfClient, network_id : str, node_id : str): + self._restconf_client = restconf_client + self._network_id = network_id + self._node_id = node_id + self._tps : Dict[str, TerminationPoint] = dict() + + def termination_points(self) -> List[Dict]: + tps : Dict = self._restconf_client.get(TerminationPoint.ENDPOINT_NO_ID) + return tps['ietf-network-topology:termination-point'].get('termination-point', list()) + + def termination_point(self, tp_id : str) -> TerminationPoint: + _tp = self._tps.get(tp_id) + if _tp is not None: return _tp + _tp = TerminationPoint(self._restconf_client, self._network_id, self._node_id, tp_id) + return self._tps.setdefault(tp_id, _tp) + + def create( + self, termination_point_ids : List[str] = [], + supporting_node_ids : List[Tuple[str, str]] = [] + ) -> None: + endpoint = Node.ENDPOINT_ID.format(self._network_id, self._node_id) + node = {'node-id': self._node_id} + tps = [{'tp-id': tp_id} for tp_id in termination_point_ids] + if len(tps) > 0: node['ietf-network-topology:termination-point'] = tps + sns = [{'network-ref': snet_id, 'node-ref': snode_id} for snet_id,snode_id in supporting_node_ids] + if len(sns) > 0: node['supporting-node'] = sns + network = {'network-id': self._network_id, 'node': [node]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.post(endpoint, payload) + + def get(self) -> Dict: + endpoint = Node.ENDPOINT_ID.format(self._network_id, self._node_id) + node : Dict = self._restconf_client.get(endpoint) + return node['ietf-network:node'][0] + + def update( + self, termination_point_ids : List[str] = [], + supporting_node_ids : List[Tuple[str, str]] = [] + ) -> None: + endpoint = Node.ENDPOINT_ID.format(self._network_id, self._node_id) + node = {'node-id': self._node_id} + tps = [{'tp-id': tp_id} for tp_id in termination_point_ids] + if len(tps) > 0: node['ietf-network-topology:termination-point'] = tps + sns = [{'network-ref': snet_id, 'node-ref': snode_id} for snet_id,snode_id in supporting_node_ids] + if len(sns) > 0: node['supporting-node'] = sns + network = {'network-id': self._network_id, 'node': [node]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.patch(endpoint, payload) + + def delete(self) -> None: + endpoint = Node.ENDPOINT_ID.format(self._network_id, self._node_id) + self._restconf_client.delete(endpoint) + +class Link: + ENDPOINT_NO_ID = '/ietf-network:networks/network={:s}' + ENDPOINT_ID = ENDPOINT_NO_ID + '/ietf-network-topology:link={:s}' + + def __init__(self, restconf_client : RestConfClient, network_id : str, link_id : str): + self._restconf_client = restconf_client + self._network_id = network_id + self._link_id = link_id + + def create( + self, src_node_id : str, src_tp_id : str, dst_node_id : str, dst_tp_id : str, + supporting_link_ids : List[Tuple[str, str]] = [] + ) -> None: + endpoint = Link.ENDPOINT_ID.format(self._network_id, self._link_id) + link = { + 'link-id' : self._link_id, + 'source' : {'source-node': src_node_id, 'source-tp': src_tp_id}, + 'destination': {'dest-node' : dst_node_id, 'dest-tp' : dst_tp_id}, + } + sls = [{'network-ref': snet_id, 'link-ref': slink_id} for snet_id,slink_id in supporting_link_ids] + if len(sls) > 0: link['supporting-link'] = sls + network = {'network-id': self._network_id, 'ietf-network-topology:link': [link]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.post(endpoint, payload) + + def get(self) -> Dict: + endpoint = Link.ENDPOINT_ID.format(self._network_id, self._link_id) + link : Dict = self._restconf_client.get(endpoint) + return link['ietf-network-topology:link'][0] + + def update( + self, src_node_id : str, src_tp_id : str, dst_node_id : str, dst_tp_id : str, + supporting_link_ids : List[Tuple[str, str]] = [] + ) -> None: + endpoint = Link.ENDPOINT_ID.format(self._network_id, self._link_id) + link = { + 'link-id' : self._link_id, + 'source' : {'source-node': src_node_id, 'source-tp': src_tp_id}, + 'destination': {'dest-node' : dst_node_id, 'dest-tp' : dst_tp_id}, + } + sls = [{'network-ref': snet_id, 'link-ref': slink_id} for snet_id,slink_id in supporting_link_ids] + if len(sls) > 0: link['supporting-link'] = sls + network = {'network-id': self._network_id, 'ietf-network-topology:link': [link]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.patch(endpoint, payload) + + def delete(self) -> None: + endpoint = Link.ENDPOINT_ID.format(self._network_id, self._link_id) + self._restconf_client.delete(endpoint) + + +class Network: + ENDPOINT_NO_ID = '/ietf-network:networks' + ENDPOINT_ID = ENDPOINT_NO_ID + '/network={:s}' + + def __init__(self, restconf_client : RestConfClient, network_id : str): + self._restconf_client = restconf_client + self._network_id = network_id + self._nodes : Dict[str, Node] = dict() + self._links : Dict[str, Link] = dict() + + def nodes(self) -> List[Dict]: + reply : Dict = self._restconf_client.get(Node.ENDPOINT_NO_ID.format(self._network_id)) + return reply['ietf-network:network'][0].get('node', list()) + + def links(self) -> List[Dict]: + reply : Dict = self._restconf_client.get(Link.ENDPOINT_NO_ID.format(self._network_id)) + return reply['ietf-network:network'][0].get('ietf-network-topology:link', list()) + + def node(self, node_id : str) -> Node: + _node = self._nodes.get(node_id) + if _node is not None: return _node + _node = Node(self._restconf_client, self._network_id, node_id) + return self._nodes.setdefault(node_id, _node) + + def link(self, link_id : str) -> Link: + _link = self._links.get(link_id) + if _link is not None: return _link + _link = Link(self._restconf_client, self._network_id, link_id) + return self._links.setdefault(link_id, _link) + + def create(self, supporting_network_ids : List[str] = []) -> None: + endpoint = Network.ENDPOINT_ID.format(self._network_id) + network = {'network-id': self._network_id} + sns = [{'network-ref': sn_id} for sn_id in supporting_network_ids] + if len(sns) > 0: network['supporting-network'] = sns + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.post(endpoint, payload) + + def get(self) -> Dict: + endpoint = Network.ENDPOINT_ID.format(self._network_id) + networks : Dict = self._restconf_client.get(endpoint) + return networks['ietf-network:network'][0] + + def update(self, supporting_network_ids : List[str] = []) -> None: + endpoint = Network.ENDPOINT_ID.format(self._network_id) + network = {'network-id': self._network_id} + sns = [{'network-ref': sn_id} for sn_id in supporting_network_ids] + if len(sns) > 0: network['supporting-network'] = sns + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.patch(endpoint, payload) + + def delete(self) -> None: + endpoint = Network.ENDPOINT_ID.format(self._network_id) + self._restconf_client.delete(endpoint) + + +class SimapClient: + def __init__(self, restconf_client : RestConfClient) -> None: + self._restconf_client = restconf_client + self._networks : Dict[str, Network] = dict() + + def networks(self) -> List[Dict]: + reply : Dict = self._restconf_client.get(Network.ENDPOINT_NO_ID) + return reply['ietf-network:networks'].get('network', list()) + + def network(self, network_id : str) -> Network: + _network = self._networks.get(network_id) + if _network is not None: return _network + _network = Network(self._restconf_client, network_id) + return self._networks.setdefault(network_id, _network) diff --git a/src/nbi/service/sse_telemetry/create_subscription.py b/src/nbi/service/sse_telemetry/create_subscription.py index 8283bad9b..ef022dcc2 100644 --- a/src/nbi/service/sse_telemetry/create_subscription.py +++ b/src/nbi/service/sse_telemetry/create_subscription.py @@ -45,7 +45,6 @@ from .topology import ( get_controller_name, ) -from .database_tmp import SERVICE_ID class SubscriptionId(TypedDict): @@ -73,10 +72,11 @@ class CreateSubscription(Resource): LOGGER.debug('Received subscription request data: {:s}'.format(str(request_data))) # break the request into its abstract components for telemetry subscription + list_db_ids = list_identifiers(db) request_identifier = str( - choice([x for x in range(1000, 10000) if x not in list_identifiers(db)]) + choice([x for x in range(1000, 10000) if x not in list_db_ids]) ) - sub_subs = decompose_subscription(request_data, SERVICE_ID) + sub_subs = decompose_subscription(request_data) # subscribe to each component device_client = DeviceClient() @@ -86,6 +86,7 @@ class CreateSubscription(Resource): 'ietf-yang-push:datastore-xpath-filter' ] + SERVICE_ID = '' device_controller = get_controller_name(xpath, SERVICE_ID, context_client) if device_controller == Controllers.CONTROLLERLESS: LOGGER.warning( diff --git a/src/nbi/service/sse_telemetry/database/Subscription.py b/src/nbi/service/sse_telemetry/database/Subscription.py index f654dfec6..339390690 100644 --- a/src/nbi/service/sse_telemetry/database/Subscription.py +++ b/src/nbi/service/sse_telemetry/database/Subscription.py @@ -18,7 +18,7 @@ from sqlalchemy.dialects.postgresql import insert from sqlalchemy.engine import Engine from sqlalchemy.orm import Session, sessionmaker from sqlalchemy_cockroachdb import run_transaction -from typing import Any, List, Optional, TypedDict +from typing import Any, List, Optional, Set, TypedDict from .models.Subscription import SSESubscriptionModel @@ -140,7 +140,7 @@ def get_subscriptions(db_engine: Engine) -> List[SSESubsciprionDict]: return run_transaction(sessionmaker(bind=db_engine), callback) -def list_identifiers(db_engine: Engine) -> List[str]: +def list_identifiers(db_engine: Engine) -> Set[str]: def callback(session: Session) -> set[str]: obj_list: List[SSESubscriptionModel] = session.query(SSESubscriptionModel).all() return {obj.identifier for obj in obj_list} diff --git a/src/nbi/service/sse_telemetry/database_tmp.py b/src/nbi/service/sse_telemetry/database_tmp.py deleted file mode 100644 index fb8778b9b..000000000 --- a/src/nbi/service/sse_telemetry/database_tmp.py +++ /dev/null @@ -1,16 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -SERVICE_ID = 'simap1' diff --git a/src/nbi/service/sse_telemetry/topology.py b/src/nbi/service/sse_telemetry/topology.py index d7af66aab..158380f3b 100644 --- a/src/nbi/service/sse_telemetry/topology.py +++ b/src/nbi/service/sse_telemetry/topology.py @@ -13,19 +13,15 @@ # limitations under the License. -import json -import logging -import os +import json, logging, os, re from enum import Enum -from string import octdigits -from typing_extensions import List, TypedDict, Optional -import re - -from context.client.ContextClient import ContextClient -from device.client.DeviceClient import DeviceClient +from typing_extensions import List, TypedDict from common.proto.context_pb2 import Device, DeviceId, Empty from common.tools.object_factory.Device import json_device_id +from common.tools.rest_conf.client.RestConfClient import RestConfClient from common.DeviceTypes import DeviceTypeEnum +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient Periodic = TypedDict('Periodic', {'ietf-yang-push:period': str}) @@ -64,26 +60,26 @@ phy_network = re.compile(r'providerId-\d+-clientId-\d+-topologyId-\d+') LOGGER = logging.getLogger(__name__) -dir_path = os.path.dirname(__file__) +#dir_path = os.path.dirname(__file__) -with open(os.path.join(dir_path, 'Full-Te-Topology-simap1.json'), 'r') as f: - NETWORK_DATA_SIMAP1 = json.load(f) +#with open(os.path.join(dir_path, 'Full-Te-Topology-simap1.json'), 'r') as f: +# NETWORK_DATA_SIMAP1 = json.load(f) -with open(os.path.join(dir_path, 'Full-Te-Topology-simap2.json'), 'r') as f: - NETWORK_DATA_SIMAP2 = json.load(f) +#with open(os.path.join(dir_path, 'Full-Te-Topology-simap2.json'), 'r') as f: +# NETWORK_DATA_SIMAP2 = json.load(f) -def get_network_data(service_id: str) -> dict: - if service_id == 'simap1': - return NETWORK_DATA_SIMAP1 - elif service_id == 'simap2': - return NETWORK_DATA_SIMAP2 - else: - raise ValueError(f'Unsupported service_id: {service_id}. Expected "simap1" or "simap2".') +#def get_network_data(service_id: str) -> dict: +# if service_id == 'simap1': +# return NETWORK_DATA_SIMAP1 +# elif service_id == 'simap2': +# return NETWORK_DATA_SIMAP2 +# else: +# raise ValueError(f'Unsupported service_id: {service_id}. Expected "simap1" or "simap2".') def decompose_subscription( - s: SubscribedNotificationsSchema, service_id: str + s : SubscribedNotificationsSchema ) -> List[SubscribedNotificationsSchema]: """ Decomposes a subscription into its components by finding supporting links @@ -92,52 +88,64 @@ def decompose_subscription( input_data = s['ietf-subscribed-notifications:input'] xpath_filter = input_data['ietf-yang-push:datastore-xpath-filter'] - # Parse the XPath to extract network and link information - # Format: /ietf-network:networks/network=/ietf-network-topology:link=/simap-telemetry - parts = xpath_filter.split('/') - network_part = None - link_part = None - - for part in parts: - if part.startswith('network='): - network_part = part[8:] # Remove 'network=' prefix - elif part.startswith('ietf-network-topology:link='): - link_part = part[27:] # Remove 'ietf-network-topology:link=' prefix - - if not network_part or not link_part: - raise ValueError('Invalid XPath filter format') - - # Find the network in the topology data - networks = get_network_data(service_id)['ietf-network:networks']['network'] - target_network = None - - for network in networks: - if network['network-id'] == network_part: - target_network = network - break - - if not target_network: - raise ValueError(f'Network {network_part} not found in topology data') - - # Find the link in the network - links = target_network.get('ietf-network-topology:link', []) - target_link = None - - for link in links: - if link['link-id'] == link_part: - target_link = link - break - - if not target_link: - raise ValueError(f'Link {link_part} not found in network {network_part}') + rest_conf_client = RestConfClient( + '10.254.0.9', port=8080, scheme='http', username='admin', password='admin', + logger=logging.getLogger('RestConfClient') + ) + xpath_data = rest_conf_client.get(xpath_filter) + if not xpath_data: + MSG = 'Resource({:s}) not found in SIMAP Server' + raise Exception(MSG.format(str(xpath_filter))) + +# # Parse the XPath to extract network and link information +# # Format: /ietf-network:networks/network=/ietf-network-topology:link=/simap-telemetry +# parts = xpath_filter.split('/') +# network_part = None +# link_part = None + +# for part in parts: +# if part.startswith('network='): +# network_part = part[8:] # Remove 'network=' prefix +# elif part.startswith('ietf-network-topology:link='): +# link_part = part[27:] # Remove 'ietf-network-topology:link=' prefix + +# if not network_part or not link_part: +# raise ValueError('Invalid XPath filter format') + +# # Find the network in the topology data +# networks = get_network_data(service_id)['ietf-network:networks']['network'] +# target_network = None + +# for network in networks: +# if network['network-id'] == network_part: +# target_network = network +# break + +# if not target_network: +# raise ValueError(f'Network {network_part} not found in topology data') + +# # Find the link in the network +# links = target_network.get('ietf-network-topology:link', []) +# target_link = None + +# for link in links: +# if link['link-id'] == link_part: +# target_link = link +# break + +# if not target_link: +# raise ValueError(f'Link {link_part} not found in network {network_part}') # Get supporting links - supporting_links = target_link.get('ietf-network-topology:supporting-link', []) + #supporting_links = target_link.get('ietf-network-topology:supporting-link', []) + supporting_links = xpath_data.get('ietf-network-topology:supporting-link', list()) if not supporting_links: - raise ValueError( - f'No supporting links found for link {link_part} in network {network_part}' - ) + #raise ValueError( + # f'No supporting links found for link {link_part} in network {network_part}' + #) + MSG = 'No supporting links found for Resource({:s}, {:s})' + raise Exception(MSG.format(str(xpath_filter), str(xpath_data))) # Create decomposed subscriptions decomposed = [] -- GitLab From adffe11e96c2691115adb3dbf4c0631b484b7fac Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 7 Sep 2025 16:35:07 +0000 Subject: [PATCH 185/367] NBI Component - SSE Telemetry: - Minor fix --- .../sse_telemetry/delete_subscription.py | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/src/nbi/service/sse_telemetry/delete_subscription.py b/src/nbi/service/sse_telemetry/delete_subscription.py index e7a030735..d68fef4bc 100644 --- a/src/nbi/service/sse_telemetry/delete_subscription.py +++ b/src/nbi/service/sse_telemetry/delete_subscription.py @@ -37,8 +37,6 @@ from nbi.service.sse_telemetry.topology import ( get_controller_name, ) -from .database_tmp import SERVICE_ID - LOGGER = logging.getLogger(__name__) @@ -46,8 +44,6 @@ LOGGER = logging.getLogger(__name__) class DeleteSubscription(Resource): # @HTTP_AUTH.login_required def post(self): - global SERVICE_ID - db = Engine.get_engine() if db is None: LOGGER.error('Database engine is not initialized') @@ -79,6 +75,7 @@ class DeleteSubscription(Resource): # Unsubscribe from each sub-subscription for sub_sub in sub_subscriptions: # Create unsubscribe request + SERVICE_ID = '' device_controller = get_controller_name(sub_sub['xpath'], SERVICE_ID, context_client) if device_controller == Controllers.CONTROLLERLESS: LOGGER.warning( @@ -105,11 +102,11 @@ class DeleteSubscription(Resource): LOGGER.info('Successfully deleted main subscription: {:s}'.format(main_subscription_id)) - if SERVICE_ID == 'simap1': - SERVICE_ID = 'simap2' - elif SERVICE_ID == 'simap2': - SERVICE_ID = 'simap1' - else: - LOGGER.warning('Unknown service ID, not switching: {:s}'.format(SERVICE_ID)) + #if SERVICE_ID == 'simap1': + # SERVICE_ID = 'simap2' + #elif SERVICE_ID == 'simap2': + # SERVICE_ID = 'simap1' + #else: + # LOGGER.warning('Unknown service ID, not switching: {:s}'.format(SERVICE_ID)) return jsonify({}) -- GitLab From 9d791f9dcfb2125c63782a010392b2a7c03406b3 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 7 Sep 2025 16:49:21 +0000 Subject: [PATCH 186/367] SIMAP Connector: - Normalized mock SIMAP names --- .../service/simap_updater/MockSimaps.py | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/src/simap_connector/service/simap_updater/MockSimaps.py b/src/simap_connector/service/simap_updater/MockSimaps.py index 6b6fe62ee..cd534fb5c 100644 --- a/src/simap_connector/service/simap_updater/MockSimaps.py +++ b/src/simap_connector/service/simap_updater/MockSimaps.py @@ -21,8 +21,8 @@ LOGGER = logging.getLogger(__name__) def set_simap_e2e_net(simap_client : SimapClient) -> None: - simap = simap_client.network('simap-e2e') - simap.create(supporting_network_ids=['admin', 'simap-aggnet']) + simap = simap_client.network('e2e') + simap.create(supporting_network_ids=['admin', 'agg']) node_a = simap.node('sdp1') node_a.create(supporting_node_ids=[('admin', 'ONT1')]) @@ -39,22 +39,22 @@ def set_simap_e2e_net(simap_client : SimapClient) -> None: link.create( 'sdp1', '500', 'sdp2', '500', supporting_link_ids=[ - ('admin', 'L1'), ('admin', 'L3'), ('simap-aggnet', 'AggNet-L1') + ('admin', 'L1'), ('admin', 'L3'), ('agg', 'AggNet-L1') ] ) def delete_simap_e2e_net(simap_client : SimapClient) -> None: - simap = simap_client.network('simap-e2e') - simap.create(supporting_network_ids=['admin', 'simap-aggnet']) + simap = simap_client.network('e2e') + simap.create(supporting_network_ids=['admin', 'agg']) link = simap.link('E2E-L1') link.delete() def set_simap_agg_net(simap_client : SimapClient) -> None: - simap = simap_client.network('simap-aggnet') - simap.create(supporting_network_ids=['admin', 'simap-trans']) + simap = simap_client.network('agg') + simap.create(supporting_network_ids=['admin', 'trans-pkt']) node_a = simap.node('sdp1') node_a.create(supporting_node_ids=[('admin', 'OLT')]) @@ -73,21 +73,21 @@ def set_simap_agg_net(simap_client : SimapClient) -> None: link.create( 'sdp1', '500', 'sdp2', '500', supporting_link_ids=[ - ('simap-trans-pkt', 'Trans-L1'), ('admin', 'L13') + ('trans-pkt', 'Trans-L1'), ('admin', 'L13') ] ) def delete_simap_agg_net(simap_client : SimapClient) -> None: - simap = simap_client.network('simap-aggnet') - simap.create(supporting_network_ids=['admin', 'simap-trans']) + simap = simap_client.network('agg') + simap.create(supporting_network_ids=['admin', 'trans-pkt']) link = simap.link('AggNet-L1') link.delete() def set_simap_trans_pkt(simap_client : SimapClient) -> None: - simap = simap_client.network('simap-trans-pkt') + simap = simap_client.network('trans-pkt') simap.update(supporting_network_ids=['admin']) node_a = simap.node('site1') @@ -112,7 +112,7 @@ def set_simap_trans_pkt(simap_client : SimapClient) -> None: def delete_simap_trans_pkt(simap_client : SimapClient) -> None: - simap = simap_client.network('simap-trans-pkt') + simap = simap_client.network('trans-pkt') simap.update(supporting_network_ids=['admin']) link = simap.link('Trans-L1') @@ -122,9 +122,9 @@ def delete_simap_trans_pkt(simap_client : SimapClient) -> None: def set_mock_simap(simap_client : SimapClient, domain_name : str) -> None: if domain_name == 'trans-pkt': set_simap_trans_pkt(simap_client) - elif domain_name == 'agg-net': + elif domain_name == 'agg': set_simap_agg_net(simap_client) - elif domain_name == 'e2e-net': + elif domain_name == 'e2e': set_simap_e2e_net(simap_client) else: MSG = 'Unsupported Topology({:s}) to set SIMAP' @@ -134,9 +134,9 @@ def set_mock_simap(simap_client : SimapClient, domain_name : str) -> None: def delete_mock_simap(simap_client : SimapClient, domain_name : str) -> None: if domain_name == 'trans-pkt': delete_simap_trans_pkt(simap_client) - elif domain_name == 'agg-net': + elif domain_name == 'agg': delete_simap_agg_net(simap_client) - elif domain_name == 'e2e-net': + elif domain_name == 'e2e': delete_simap_e2e_net(simap_client) else: MSG = 'Unsupported Topology({:s}) to delete SIMAP' -- GitLab From d231d04d5ad6e5872973d12894435bc07b5bf5e1 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 7 Sep 2025 16:49:34 +0000 Subject: [PATCH 187/367] ECOC F5GA Telemetry Demo: - Normalized mock SIMAP names --- src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json | 2 +- src/tests/ecoc25-f5ga-telemetry/data/topology/topology-e2e.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json index 2f07dc48b..eace7a399 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json @@ -4,7 +4,7 @@ ], "topologies": [ {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}}, - {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "agg-net"}}} + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "agg"}}} ], "devices": [ {"device_id": {"device_uuid": {"uuid": "TFS-IP"}}, "device_type": "teraflowsdn", diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-e2e.json b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-e2e.json index 4cd5c8c68..117e97e61 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-e2e.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-e2e.json @@ -4,7 +4,7 @@ ], "topologies": [ {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}}, - {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "e2e-net"}}} + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "e2e"}}} ], "devices": [ {"device_id": {"device_uuid": {"uuid": "TFS-AGG"}}, "device_type": "teraflowsdn", -- GitLab From c56155a0a455a9e1aac53e0fc2dc42b9cbbc08ec Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 7 Sep 2025 16:54:45 +0000 Subject: [PATCH 188/367] ECOC F5GA Telemetry Demo: - Updated subscription payloads - Added teardown scripts --- .../data/telemetry/subscription-slice1.json | 2 +- .../data/telemetry/subscription-slice2.json | 2 +- .../ecoc25-f5ga-telemetry/teardown-slice1.sh | 23 +++++++++++++++++++ .../ecoc25-f5ga-telemetry/teardown-slice2.sh | 23 +++++++++++++++++++ 4 files changed, 48 insertions(+), 2 deletions(-) create mode 100755 src/tests/ecoc25-f5ga-telemetry/teardown-slice1.sh create mode 100755 src/tests/ecoc25-f5ga-telemetry/teardown-slice2.sh diff --git a/src/tests/ecoc25-f5ga-telemetry/data/telemetry/subscription-slice1.json b/src/tests/ecoc25-f5ga-telemetry/data/telemetry/subscription-slice1.json index 40d167c47..3a2c4b96c 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/telemetry/subscription-slice1.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/telemetry/subscription-slice1.json @@ -1,7 +1,7 @@ { "ietf-subscribed-notifications:input": { "datastore": "operational", - "ietf-yang-push:datastore-xpath-filter": "/ietf-network:networks/network=e2e-net/ietf-network-topology:link=slice1:ONT1-200==POP2-200/simap-telemetry", + "ietf-yang-push:datastore-xpath-filter": "/ietf-network:networks/network=e2e/ietf-network-topology:link=E2E-L1/simap-telemetry", "ietf-yang-push:periodic": { "ietf-yang-push:period": 10 } diff --git a/src/tests/ecoc25-f5ga-telemetry/data/telemetry/subscription-slice2.json b/src/tests/ecoc25-f5ga-telemetry/data/telemetry/subscription-slice2.json index 2c4478ea7..cd0954ac1 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/telemetry/subscription-slice2.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/telemetry/subscription-slice2.json @@ -1,7 +1,7 @@ { "ietf-subscribed-notifications:input": { "datastore": "operational", - "ietf-yang-push:datastore-xpath-filter": "/ietf-network:networks/network=e2e-net/ietf-network-topology:link=slice1:ONT1-200==POP1-200/simap-telemetry", + "ietf-yang-push:datastore-xpath-filter": "/ietf-network:networks/network=e2e/ietf-network-topology:link=E2E-L2/simap-telemetry", "ietf-yang-push:periodic": { "ietf-yang-push:period": 10 } diff --git a/src/tests/ecoc25-f5ga-telemetry/teardown-slice1.sh b/src/tests/ecoc25-f5ga-telemetry/teardown-slice1.sh new file mode 100755 index 000000000..fe5158f58 --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/teardown-slice1.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +echo "[E2E] Tear Down slice1..." +curl --request DELETE --location \ + http://0.0.0.0:80/restconf/data/ietf-network-slice-service:network-slice-services/slice-service=slice1 +echo + + +echo "Done!" diff --git a/src/tests/ecoc25-f5ga-telemetry/teardown-slice2.sh b/src/tests/ecoc25-f5ga-telemetry/teardown-slice2.sh new file mode 100755 index 000000000..8bd612b3d --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/teardown-slice2.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +echo "[E2E] Tear Down slice2..." +curl --request DELETE --location \ + http://0.0.0.0:80/restconf/data/ietf-network-slice-service:network-slice-services/slice-service=slice2 +echo + + +echo "Done!" -- GitLab From 81eb9896d10c578bfc93544d5120ee532fb01791 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 7 Sep 2025 17:05:25 +0000 Subject: [PATCH 189/367] SIMAP Connector: - Updated mocks to update instead of create to prevent already-exists errors --- .../service/simap_updater/MockSimaps.py | 44 +++++++++---------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/src/simap_connector/service/simap_updater/MockSimaps.py b/src/simap_connector/service/simap_updater/MockSimaps.py index cd534fb5c..37526d019 100644 --- a/src/simap_connector/service/simap_updater/MockSimaps.py +++ b/src/simap_connector/service/simap_updater/MockSimaps.py @@ -22,21 +22,21 @@ LOGGER = logging.getLogger(__name__) def set_simap_e2e_net(simap_client : SimapClient) -> None: simap = simap_client.network('e2e') - simap.create(supporting_network_ids=['admin', 'agg']) + simap.update(supporting_network_ids=['admin', 'agg']) node_a = simap.node('sdp1') - node_a.create(supporting_node_ids=[('admin', 'ONT1')]) - node_a.termination_point('200').create(supporting_termination_point_ids=[('admin', 'ONT1', '200')]) - node_a.termination_point('500').create(supporting_termination_point_ids=[('admin', 'ONT1', '500')]) + node_a.update(supporting_node_ids=[('admin', 'ONT1')]) + node_a.termination_point('200').update(supporting_termination_point_ids=[('admin', 'ONT1', '200')]) + node_a.termination_point('500').update(supporting_termination_point_ids=[('admin', 'ONT1', '500')]) node_b = simap.node('sdp2') - node_b.create(supporting_node_ids=[('admin', 'POP2')]) - node_b.termination_point('200').create(supporting_termination_point_ids=[('admin', 'POP2', '200')]) - node_b.termination_point('201').create(supporting_termination_point_ids=[('admin', 'POP2', '201')]) - node_b.termination_point('500').create(supporting_termination_point_ids=[('admin', 'POP2', '500')]) + node_b.update(supporting_node_ids=[('admin', 'POP2')]) + node_b.termination_point('200').update(supporting_termination_point_ids=[('admin', 'POP2', '200')]) + node_b.termination_point('201').update(supporting_termination_point_ids=[('admin', 'POP2', '201')]) + node_b.termination_point('500').update(supporting_termination_point_ids=[('admin', 'POP2', '500')]) link = simap.link('E2E-L1') - link.create( + link.update( 'sdp1', '500', 'sdp2', '500', supporting_link_ids=[ ('admin', 'L1'), ('admin', 'L3'), ('agg', 'AggNet-L1') @@ -46,7 +46,7 @@ def set_simap_e2e_net(simap_client : SimapClient) -> None: def delete_simap_e2e_net(simap_client : SimapClient) -> None: simap = simap_client.network('e2e') - simap.create(supporting_network_ids=['admin', 'agg']) + simap.update(supporting_network_ids=['admin', 'agg']) link = simap.link('E2E-L1') link.delete() @@ -54,23 +54,23 @@ def delete_simap_e2e_net(simap_client : SimapClient) -> None: def set_simap_agg_net(simap_client : SimapClient) -> None: simap = simap_client.network('agg') - simap.create(supporting_network_ids=['admin', 'trans-pkt']) + simap.update(supporting_network_ids=['admin', 'trans-pkt']) node_a = simap.node('sdp1') - node_a.create(supporting_node_ids=[('admin', 'OLT')]) - node_a.termination_point('200').create(supporting_termination_point_ids=[('admin', 'OLT', '200')]) - node_a.termination_point('201').create(supporting_termination_point_ids=[('admin', 'OLT', '201')]) - node_a.termination_point('500').create(supporting_termination_point_ids=[('admin', 'OLT', '500')]) - node_a.termination_point('501').create(supporting_termination_point_ids=[('admin', 'OLT', '501')]) + node_a.update(supporting_node_ids=[('admin', 'OLT')]) + node_a.termination_point('200').update(supporting_termination_point_ids=[('admin', 'OLT', '200')]) + node_a.termination_point('201').update(supporting_termination_point_ids=[('admin', 'OLT', '201')]) + node_a.termination_point('500').update(supporting_termination_point_ids=[('admin', 'OLT', '500')]) + node_a.termination_point('501').update(supporting_termination_point_ids=[('admin', 'OLT', '501')]) node_b = simap.node('sdp2') - node_b.create(supporting_node_ids=[('admin', 'POP2')]) - node_b.termination_point('200').create(supporting_termination_point_ids=[('admin', 'POP2', '200')]) - node_b.termination_point('201').create(supporting_termination_point_ids=[('admin', 'POP2', '201')]) - node_b.termination_point('500').create(supporting_termination_point_ids=[('admin', 'POP2', '500')]) + node_b.update(supporting_node_ids=[('admin', 'POP2')]) + node_b.termination_point('200').update(supporting_termination_point_ids=[('admin', 'POP2', '200')]) + node_b.termination_point('201').update(supporting_termination_point_ids=[('admin', 'POP2', '201')]) + node_b.termination_point('500').update(supporting_termination_point_ids=[('admin', 'POP2', '500')]) link = simap.link('AggNet-L1') - link.create( + link.update( 'sdp1', '500', 'sdp2', '500', supporting_link_ids=[ ('trans-pkt', 'Trans-L1'), ('admin', 'L13') @@ -80,7 +80,7 @@ def set_simap_agg_net(simap_client : SimapClient) -> None: def delete_simap_agg_net(simap_client : SimapClient) -> None: simap = simap_client.network('agg') - simap.create(supporting_network_ids=['admin', 'trans-pkt']) + simap.update(supporting_network_ids=['admin', 'trans-pkt']) link = simap.link('AggNet-L1') link.delete() -- GitLab From a49f657ba8eb0c75851e3b621361f6659e0ab9c9 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 7 Sep 2025 17:08:53 +0000 Subject: [PATCH 190/367] NBI Component - SSE Telemetry: - Fixed xpath handling --- src/nbi/service/sse_telemetry/topology.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/nbi/service/sse_telemetry/topology.py b/src/nbi/service/sse_telemetry/topology.py index 158380f3b..4eeeefcaf 100644 --- a/src/nbi/service/sse_telemetry/topology.py +++ b/src/nbi/service/sse_telemetry/topology.py @@ -88,14 +88,16 @@ def decompose_subscription( input_data = s['ietf-subscribed-notifications:input'] xpath_filter = input_data['ietf-yang-push:datastore-xpath-filter'] + xpath_filter_2 = xpath_filter.replace('/simap-telemetry', '') + rest_conf_client = RestConfClient( '10.254.0.9', port=8080, scheme='http', username='admin', password='admin', logger=logging.getLogger('RestConfClient') ) - xpath_data = rest_conf_client.get(xpath_filter) + xpath_data = rest_conf_client.get(xpath_filter_2) if not xpath_data: - MSG = 'Resource({:s}) not found in SIMAP Server' - raise Exception(MSG.format(str(xpath_filter))) + MSG = 'Resource({:s} => {:s}) not found in SIMAP Server' + raise Exception(MSG.format(str(xpath_filter), str(xpath_filter_2))) # # Parse the XPath to extract network and link information # # Format: /ietf-network:networks/network=/ietf-network-topology:link=/simap-telemetry -- GitLab From de5f92a57f1cf20f40ec3a35344ae05e2b94a238 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 7 Sep 2025 17:32:39 +0000 Subject: [PATCH 191/367] Device component - NCE Driver: - Fixed AppFlow RESTCONF POST calls --- src/device/service/drivers/nce/handlers/AppFlowHandler.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/device/service/drivers/nce/handlers/AppFlowHandler.py b/src/device/service/drivers/nce/handlers/AppFlowHandler.py index 7e8082abe..11670e9fa 100644 --- a/src/device/service/drivers/nce/handlers/AppFlowHandler.py +++ b/src/device/service/drivers/nce/handlers/AppFlowHandler.py @@ -48,7 +48,7 @@ class AppFlowHandler: for qos_profile in qos_profiles: request = {'huawei-nce-app-flow:qos-profiles': {'qos-profile': qos_profile}} LOGGER.info('Creating QoS Profile: {:s}'.format(str(request))) - self._rest_conf_client.post(self._url_qos_profile, json=request) + self._rest_conf_client.post(self._url_qos_profile, body=request) applications = ( data @@ -59,7 +59,7 @@ class AppFlowHandler: for application in applications: request = {'huawei-nce-app-flow:applications': {'application': application}} LOGGER.info('Creating Application: {:s}'.format(str(request))) - self._rest_conf_client.post(self._url_application, json=request) + self._rest_conf_client.post(self._url_application, body=request) app_flows = ( data @@ -69,7 +69,7 @@ class AppFlowHandler: for app_flow in app_flows: request = {'huawei-nce-app-flow:app-flows': {'app-flow': app_flow}} LOGGER.info('Creating App Flow: {:s}'.format(str(request))) - self._rest_conf_client.post(self._url_app_flow, json=request) + self._rest_conf_client.post(self._url_app_flow, body=request) except requests.exceptions.ConnectionError as e: MSG = 'Failed to send POST requests to NCE FAN NBI' -- GitLab From 1f8f79ea786b0b0cd28cf6ef3f9b2dbc1ee19aa1 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 7 Sep 2025 17:53:53 +0000 Subject: [PATCH 192/367] Service component - L3NM IETF Slice: - Fixed delete config rules --- .../L3NM_IETFSlice_ServiceHandler.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/src/service/service/service_handlers/l3nm_ietfslice/L3NM_IETFSlice_ServiceHandler.py b/src/service/service/service_handlers/l3nm_ietfslice/L3NM_IETFSlice_ServiceHandler.py index 143eb37d2..0b9c3a543 100644 --- a/src/service/service/service_handlers/l3nm_ietfslice/L3NM_IETFSlice_ServiceHandler.py +++ b/src/service/service/service_handlers/l3nm_ietfslice/L3NM_IETFSlice_ServiceHandler.py @@ -183,9 +183,22 @@ class L3NM_IETFSlice_ServiceHandler(_ServiceHandler): DeviceId(**json_device_id(src_device_uuid)) ) controller = self.__task_executor.get_device_controller(src_device_obj) + + datastore_delta = DataStoreDelta(self.__service) + running_slice = datastore_delta.running_data + + if len(running_slice) != 1: + MSG = 'Unsupported number of Slices[{:d}]({:s})' + raise Exception(MSG.format(len(running_slice), str(running_slice))) + running_slice = running_slice[0] + + slice_data_model = {'network-slice-services': {'slice-service': [{ + 'id': running_slice['id'], + }]}} + del controller.device_config.config_rules[:] controller.device_config.config_rules.append(ConfigRule(**json_config_rule_delete( - '/service[{:s}]/IETFSlice'.format(service_uuid), {} + '/service[{:s}]/IETFSlice'.format(service_uuid), slice_data_model ))) self.__task_executor.configure_device(controller) results.append(True) -- GitLab From 7d7011826f62c9b3015a580f042cd82d831c7224 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 7 Sep 2025 17:54:11 +0000 Subject: [PATCH 193/367] Service component - L3NM NCE FAN: - Fixed delete config rules --- .../l3nm_ncefan/ConfigRules.py | 20 +++++++- .../l3nm_ncefan/L3NM_NCEFAN_ServiceHandler.py | 47 +++++++++++++++++-- 2 files changed, 62 insertions(+), 5 deletions(-) diff --git a/src/service/service/service_handlers/l3nm_ncefan/ConfigRules.py b/src/service/service/service_handlers/l3nm_ncefan/ConfigRules.py index 0544d8976..7e49f5438 100644 --- a/src/service/service/service_handlers/l3nm_ncefan/ConfigRules.py +++ b/src/service/service/service_handlers/l3nm_ncefan/ConfigRules.py @@ -107,14 +107,32 @@ def setup_config_rules(service_uuid: str, json_settings: Dict) -> List[Dict]: def teardown_config_rules(service_uuid: str, json_settings: Dict) -> List[Dict]: + app_flow_id : str = json_settings["app_flow_id"] + application_name : str = f"App_Flow_{app_flow_id}" + app_flow_name : str = f"App_Flow_{app_flow_id}" + qos_profile_name : str = json_settings.get("app_flow_qos_profile", "AR_VR_Gaming") + + app_flow = {"name": app_flow_name } + qos_profile = {"name": qos_profile_name} + application = {"name": application_name} + + app_flow_datamodel = { + "huawei-nce-app-flow:app-flows": { + "app-flow": [app_flow], + "qos-profiles": {"qos-profile": [qos_profile]}, + "applications": {"application": [application]}, + } + } + json_config_rules = [ json_config_rule_delete( "/service[{:s}]/AppFlow".format(service_uuid), - {}, + app_flow_datamodel ), json_config_rule_delete( "/service[{:s}]/AppFlow/operation".format(service_uuid), {}, ), ] + return json_config_rules diff --git a/src/service/service/service_handlers/l3nm_ncefan/L3NM_NCEFAN_ServiceHandler.py b/src/service/service/service_handlers/l3nm_ncefan/L3NM_NCEFAN_ServiceHandler.py index cc9f1eafc..0fe6088f9 100644 --- a/src/service/service/service_handlers/l3nm_ncefan/L3NM_NCEFAN_ServiceHandler.py +++ b/src/service/service/service_handlers/l3nm_ncefan/L3NM_NCEFAN_ServiceHandler.py @@ -480,12 +480,51 @@ class L3NM_NCEFAN_ServiceHandler(_ServiceHandler): service_uuid = self.__service.service_id.service_uuid.uuid results = [] try: + context_client = ContextClient() + service_config = self.__service.service_config + src_device_uuid, src_endpoint_uuid = get_device_endpoint_uuids(endpoints[0]) - src_device_obj = self.__task_executor.get_device( - DeviceId(**json_device_id(src_device_uuid)) - ) + src_device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(src_device_uuid))) controller = self.__task_executor.get_device_controller(src_device_obj) - json_config_rules = teardown_config_rules(service_uuid, {}) + + list_devices = context_client.ListDevices(Empty()) + devices = list_devices.devices + device_name_map = {d.name: d for d in devices} + + running_ietf_slice_cr = get_custom_config_rule( + service_config, RUNNING_RESOURCE_KEY + ) + running_resource_value_dict = json.loads( + running_ietf_slice_cr.custom.resource_value + ) + + slice_service = running_resource_value_dict["network-slice-services"][ + "slice-service" + ][0] + service_name = slice_service["id"] + sdps = slice_service["sdps"]["sdp"] + sdp_ids = [sdp["id"] for sdp in sdps] + for sdp in sdps: + node_id = sdp["node-id"] + device_obj = device_name_map[node_id] + device_controller = self.__task_executor.get_device_controller( + device_obj + ) + if ( + device_controller is None + or controller.name != device_controller.name + ): + continue + src_sdp_idx = sdp_ids.pop(sdp_ids.index(sdp["id"])) + dst_sdp_idx = sdp_ids[0] + break + else: + raise Exception("connection group id not found") + + resource_value_dict = { + "app_flow_id": f"{src_sdp_idx}_{dst_sdp_idx}_{service_name}", + } + json_config_rules = teardown_config_rules(service_uuid, resource_value_dict) if len(json_config_rules) > 0: del controller.device_config.config_rules[:] for json_config_rule in json_config_rules: -- GitLab From 12e0fb59c0ccdd9231c603e60fa1c562e508c04c Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 7 Sep 2025 18:17:57 +0000 Subject: [PATCH 194/367] NBI Component - SSE Telemetry: - Fixed handling of target link --- src/nbi/service/sse_telemetry/topology.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/nbi/service/sse_telemetry/topology.py b/src/nbi/service/sse_telemetry/topology.py index 4eeeefcaf..78e75426c 100644 --- a/src/nbi/service/sse_telemetry/topology.py +++ b/src/nbi/service/sse_telemetry/topology.py @@ -140,9 +140,13 @@ def decompose_subscription( # Get supporting links #supporting_links = target_link.get('ietf-network-topology:supporting-link', []) - supporting_links = xpath_data.get('ietf-network-topology:supporting-link', list()) - if not supporting_links: + links = xpath_data.get('ietf-network-topology:link', list()) + if len(links) == 0: raise Exception('Link({:s}) not found'.format(str(xpath_filter_2))) + if len(links) > 1: raise Exception('Multiple occurrences for Link({:s})'.format(str(xpath_filter_2))) + link = links[0] + supporting_links = link.get('supporting-link', list()) + if len(supporting_links) == 0: #raise ValueError( # f'No supporting links found for link {link_part} in network {network_part}' #) -- GitLab From 30753efd7c3df4fd37dae4bead812fcedc819a16 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 7 Sep 2025 18:18:25 +0000 Subject: [PATCH 195/367] Device component - NCE Driver: - Fixed AppFlow RESTCONF POST calls - Added log messages --- src/device/service/drivers/nce/NCEDriver.py | 4 ++-- .../service/drivers/nce/handlers/AppFlowHandler.py | 12 +++++++++--- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/src/device/service/drivers/nce/NCEDriver.py b/src/device/service/drivers/nce/NCEDriver.py index 9ff0125f5..31ccad1df 100644 --- a/src/device/service/drivers/nce/NCEDriver.py +++ b/src/device/service/drivers/nce/NCEDriver.py @@ -179,7 +179,7 @@ class NCEDriver(_Driver): if len(resources) == 0: return results with self.__lock: for resource in resources: - LOGGER.info('resource = {:s}'.format(str(resource))) + LOGGER.info('[SetConfig] resource = {:s}'.format(str(resource))) resource_key, resource_value = resource if not RE_NCE_APP_FLOW_DATA.match(resource_key): continue @@ -199,7 +199,7 @@ class NCEDriver(_Driver): if len(resources) == 0: return results with self.__lock: for resource in resources: - LOGGER.info('resource = {:s}'.format(str(resource))) + LOGGER.info('[DeleteConfig] resource = {:s}'.format(str(resource))) resource_key, resource_value = resource if not RE_NCE_APP_FLOW_DATA.match(resource_key): continue diff --git a/src/device/service/drivers/nce/handlers/AppFlowHandler.py b/src/device/service/drivers/nce/handlers/AppFlowHandler.py index 11670e9fa..d0cc8853e 100644 --- a/src/device/service/drivers/nce/handlers/AppFlowHandler.py +++ b/src/device/service/drivers/nce/handlers/AppFlowHandler.py @@ -47,8 +47,10 @@ class AppFlowHandler: ) for qos_profile in qos_profiles: request = {'huawei-nce-app-flow:qos-profiles': {'qos-profile': qos_profile}} + qos_profile_name = qos_profile['name'] LOGGER.info('Creating QoS Profile: {:s}'.format(str(request))) - self._rest_conf_client.post(self._url_qos_profile, body=request) + url = self._url_qos_profile_item.format(qos_profile_name) + self._rest_conf_client.post(url, body=request) applications = ( data @@ -58,8 +60,10 @@ class AppFlowHandler: ) for application in applications: request = {'huawei-nce-app-flow:applications': {'application': application}} + application_name = application['name'] LOGGER.info('Creating Application: {:s}'.format(str(request))) - self._rest_conf_client.post(self._url_application, body=request) + url = self._url_application_item.format(application_name) + self._rest_conf_client.post(url, body=request) app_flows = ( data @@ -68,8 +72,10 @@ class AppFlowHandler: ) for app_flow in app_flows: request = {'huawei-nce-app-flow:app-flows': {'app-flow': app_flow}} + app_flow_name = app_flow['name'] LOGGER.info('Creating App Flow: {:s}'.format(str(request))) - self._rest_conf_client.post(self._url_app_flow, body=request) + url = self._url_app_flow_item.format(app_flow_name) + self._rest_conf_client.post(url, body=request) except requests.exceptions.ConnectionError as e: MSG = 'Failed to send POST requests to NCE FAN NBI' -- GitLab From 4d81dc2e7b24ab66e37ecedd43eb606d8429e239 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 7 Sep 2025 18:18:38 +0000 Subject: [PATCH 196/367] Service component - L3NM IETF Slice: - Added log messages --- src/device/service/drivers/ietf_slice/IetfSliceDriver.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/device/service/drivers/ietf_slice/IetfSliceDriver.py b/src/device/service/drivers/ietf_slice/IetfSliceDriver.py index 456fea2a8..e5f2d3820 100644 --- a/src/device/service/drivers/ietf_slice/IetfSliceDriver.py +++ b/src/device/service/drivers/ietf_slice/IetfSliceDriver.py @@ -143,7 +143,7 @@ class IetfSliceDriver(_Driver): with self.__lock: for i, resource in enumerate(resources): str_resource_name = 'resource_key[#{:d}]'.format(i) - LOGGER.info('resource = {:s}'.format(str(resource))) + LOGGER.info('[SetConfig] resource = {:s}'.format(str(resource))) resource_key, resource_value = resource if not RE_IETF_SLICE_DATA.match(resource_key): continue @@ -168,7 +168,7 @@ class IetfSliceDriver(_Driver): with self.__lock: for i, resource in enumerate(resources): str_resource_name = 'resource_key[#{:d}]'.format(i) - LOGGER.info('resource = {:s}'.format(str(resource))) + LOGGER.info('[DeleteConfig] resource = {:s}'.format(str(resource))) resource_key, resource_value = resource if not RE_IETF_SLICE_DATA.match(resource_key): continue -- GitLab From 2b94d8250cf097c87f47c2feeab49a39d810adc5 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 7 Sep 2025 18:53:26 +0000 Subject: [PATCH 197/367] Device component - NCE Driver: - Fixed AppFlow RESTCONF POST calls --- src/device/service/drivers/nce/handlers/AppFlowHandler.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/device/service/drivers/nce/handlers/AppFlowHandler.py b/src/device/service/drivers/nce/handlers/AppFlowHandler.py index d0cc8853e..7e3b75982 100644 --- a/src/device/service/drivers/nce/handlers/AppFlowHandler.py +++ b/src/device/service/drivers/nce/handlers/AppFlowHandler.py @@ -46,7 +46,7 @@ class AppFlowHandler: .get('qos-profile', list()) ) for qos_profile in qos_profiles: - request = {'huawei-nce-app-flow:qos-profiles': {'qos-profile': qos_profile}} + request = {'huawei-nce-app-flow:qos-profiles': {'qos-profile': [qos_profile]}} qos_profile_name = qos_profile['name'] LOGGER.info('Creating QoS Profile: {:s}'.format(str(request))) url = self._url_qos_profile_item.format(qos_profile_name) @@ -59,7 +59,7 @@ class AppFlowHandler: .get('application', list()) ) for application in applications: - request = {'huawei-nce-app-flow:applications': {'application': application}} + request = {'huawei-nce-app-flow:applications': {'application': [application]}} application_name = application['name'] LOGGER.info('Creating Application: {:s}'.format(str(request))) url = self._url_application_item.format(application_name) @@ -71,7 +71,7 @@ class AppFlowHandler: .get('app-flow', list()) ) for app_flow in app_flows: - request = {'huawei-nce-app-flow:app-flows': {'app-flow': app_flow}} + request = {'huawei-nce-app-flow:app-flows': {'app-flow': [app_flow]}} app_flow_name = app_flow['name'] LOGGER.info('Creating App Flow: {:s}'.format(str(request))) url = self._url_app_flow_item.format(app_flow_name) -- GitLab From 985d056a7b5f2185acd44da5002965d06a1c6120 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 7 Sep 2025 18:54:01 +0000 Subject: [PATCH 198/367] NBI Component - SSE Telemetry: - Fixed handling of target link and selection of controller --- .../sse_telemetry/create_subscription.py | 44 +++++++++++++++---- src/nbi/service/sse_telemetry/topology.py | 6 +-- 2 files changed, 36 insertions(+), 14 deletions(-) diff --git a/src/nbi/service/sse_telemetry/create_subscription.py b/src/nbi/service/sse_telemetry/create_subscription.py index ef022dcc2..e70220526 100644 --- a/src/nbi/service/sse_telemetry/create_subscription.py +++ b/src/nbi/service/sse_telemetry/create_subscription.py @@ -29,6 +29,7 @@ from common.proto.monitoring_pb2 import ( SSEMonitoringSubscriptionConfig, SSEMonitoringSubscriptionResponse, ) +from common.tools.rest_conf.client.RestConfClient import RestConfClient from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient from nbi.service._tools.Authentication import HTTP_AUTH @@ -71,32 +72,57 @@ class CreateSubscription(Resource): raise UnsupportedMediaType('JSON payload is required') LOGGER.debug('Received subscription request data: {:s}'.format(str(request_data))) + rest_conf_client = RestConfClient( + '10.254.0.9', port=8080, scheme='http', username='admin', password='admin', + logger=logging.getLogger('RestConfClient') + ) + # break the request into its abstract components for telemetry subscription list_db_ids = list_identifiers(db) request_identifier = str( choice([x for x in range(1000, 10000) if x not in list_db_ids]) ) - sub_subs = decompose_subscription(request_data) + sub_subs = decompose_subscription(rest_conf_client, request_data) # subscribe to each component device_client = DeviceClient() context_client = ContextClient() for s in sub_subs: - xpath = s['ietf-subscribed-notifications:input'][ + xpath_filter = s['ietf-subscribed-notifications:input'][ 'ietf-yang-push:datastore-xpath-filter' ] - - SERVICE_ID = '' - device_controller = get_controller_name(xpath, SERVICE_ID, context_client) - if device_controller == Controllers.CONTROLLERLESS: + xpath_filter_prefix = xpath_filter.split('/ietf-network-topology:link')[0] + xpath_network = rest_conf_client.get(xpath_filter_prefix) + if not xpath_network: + MSG = 'Resource({:s} => {:s}) not found in SIMAP Server' + raise Exception(MSG.format(str(xpath_filter), str(xpath_filter_prefix))) + network_id = xpath_network['network-id'] + controller_name_map = { + 'e2e' : 'TFS-E2E', + 'agg' : 'TFS-AGG', + 'trans-pkt': 'TFS-IP', + 'trans-opt': 'NCE-T', + 'access' : 'NCE-FAN', + } + controller_name = controller_name_map.get(network_id) + if controller_name is None: LOGGER.warning( 'Controllerless device detected, skipping subscription for: {:s}'.format(xpath) ) continue + + #SERVICE_ID = '' + #device_controller = get_controller_name(xpath, SERVICE_ID, context_client) + #if device_controller == Controllers.CONTROLLERLESS: + # LOGGER.warning( + # 'Controllerless device detected, skipping subscription for: {:s}'.format(xpath) + # ) + # continue s_req = SSEMonitoringSubscriptionConfig() - s_req.device_id.device_uuid.uuid = device_controller.value + #s_req.device_id.device_uuid.uuid = device_controller.value + s_req.device_id.device_uuid.uuid = controller_name s_req.config_type = SSEMonitoringSubscriptionConfig.Subscribe - s_req.uri = xpath + s_req.uri = xpath_filter s_req.sampling_interval = s['ietf-subscribed-notifications:input'][ 'ietf-yang-push:periodic' ]['ietf-yang-push:period'] @@ -105,7 +131,7 @@ class CreateSubscription(Resource): uuid=str(uuid4()), identifier=r.identifier, uri=r.uri, - xpath=xpath, + xpath=xpath_filter, status=True, main_subscription=False, main_subscription_id=request_identifier, diff --git a/src/nbi/service/sse_telemetry/topology.py b/src/nbi/service/sse_telemetry/topology.py index 78e75426c..174471886 100644 --- a/src/nbi/service/sse_telemetry/topology.py +++ b/src/nbi/service/sse_telemetry/topology.py @@ -79,6 +79,7 @@ LOGGER = logging.getLogger(__name__) def decompose_subscription( + rest_conf_client : RestConfClient, s : SubscribedNotificationsSchema ) -> List[SubscribedNotificationsSchema]: """ @@ -89,11 +90,6 @@ def decompose_subscription( xpath_filter = input_data['ietf-yang-push:datastore-xpath-filter'] xpath_filter_2 = xpath_filter.replace('/simap-telemetry', '') - - rest_conf_client = RestConfClient( - '10.254.0.9', port=8080, scheme='http', username='admin', password='admin', - logger=logging.getLogger('RestConfClient') - ) xpath_data = rest_conf_client.get(xpath_filter_2) if not xpath_data: MSG = 'Resource({:s} => {:s}) not found in SIMAP Server' -- GitLab From 63e234604c28532ccccef294c1afdf5b2e34ae82 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 7 Sep 2025 18:58:50 +0000 Subject: [PATCH 199/367] Service component - L3NM IETF Slice and NCE FAN: - Added log messages for method DeleteEndpoints() --- .../l3nm_ietfslice/L3NM_IETFSlice_ServiceHandler.py | 4 ++++ .../l3nm_ncefan/L3NM_NCEFAN_ServiceHandler.py | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/src/service/service/service_handlers/l3nm_ietfslice/L3NM_IETFSlice_ServiceHandler.py b/src/service/service/service_handlers/l3nm_ietfslice/L3NM_IETFSlice_ServiceHandler.py index 0b9c3a543..d6e2274e6 100644 --- a/src/service/service/service_handlers/l3nm_ietfslice/L3NM_IETFSlice_ServiceHandler.py +++ b/src/service/service/service_handlers/l3nm_ietfslice/L3NM_IETFSlice_ServiceHandler.py @@ -172,6 +172,10 @@ class L3NM_IETFSlice_ServiceHandler(_ServiceHandler): endpoints: List[Tuple[str, str, Optional[str]]], connection_uuid: Optional[str] = None, ) -> List[Union[bool, Exception]]: + LOGGER.debug('[DeleteEndpoint] service={:s}'.format(grpc_message_to_json_string(self.__service))) + LOGGER.debug('[DeleteEndpoint] endpoints={:s}'.format(str(endpoints))) + LOGGER.debug('[DeleteEndpoint] connection_uuid={:s}'.format(str(connection_uuid))) + chk_type("endpoints", endpoints, list) if len(endpoints) == 0: return [] diff --git a/src/service/service/service_handlers/l3nm_ncefan/L3NM_NCEFAN_ServiceHandler.py b/src/service/service/service_handlers/l3nm_ncefan/L3NM_NCEFAN_ServiceHandler.py index 0fe6088f9..3f1bc6d92 100644 --- a/src/service/service/service_handlers/l3nm_ncefan/L3NM_NCEFAN_ServiceHandler.py +++ b/src/service/service/service_handlers/l3nm_ncefan/L3NM_NCEFAN_ServiceHandler.py @@ -474,6 +474,10 @@ class L3NM_NCEFAN_ServiceHandler(_ServiceHandler): endpoints: List[Tuple[str, str, Optional[str]]], connection_uuid: Optional[str] = None, ) -> List[Union[bool, Exception]]: + LOGGER.debug('[DeleteEndpoint] service={:s}'.format(grpc_message_to_json_string(self.__service))) + LOGGER.debug('[DeleteEndpoint] endpoints={:s}'.format(str(endpoints))) + LOGGER.debug('[DeleteEndpoint] connection_uuid={:s}'.format(str(connection_uuid))) + chk_type("endpoints", endpoints, list) if len(endpoints) == 0: return [] -- GitLab From 22f6bb3d123bb151272eda45cacd4888cf4122fe Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 8 Sep 2025 08:07:06 +0000 Subject: [PATCH 200/367] Service component - NCE FAN: - Corrected Config Rules --- .../service_handlers/l3nm_ncefan/ConfigRules.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/service/service/service_handlers/l3nm_ncefan/ConfigRules.py b/src/service/service/service_handlers/l3nm_ncefan/ConfigRules.py index 7e49f5438..5500b113b 100644 --- a/src/service/service/service_handlers/l3nm_ncefan/ConfigRules.py +++ b/src/service/service/service_handlers/l3nm_ncefan/ConfigRules.py @@ -52,7 +52,7 @@ def setup_config_rules(service_uuid: str, json_settings: Dict) -> List[Dict]: "user-id": app_flow_user_id, "app-name": app_flow_app_name, "max-online-users": app_flow_max_online_users, - "stas": app_flow_stas, + "stas": [app_flow_stas], "qos-profile": qos_profile_name, "service-profile": app_flow_service_profile, "duration": app_flow_duration, @@ -61,19 +61,19 @@ def setup_config_rules(service_uuid: str, json_settings: Dict) -> List[Dict]: "name": qos_profile_name, "max-latency": max_latency, "max-jitter": max_jitter, - "max-loss": max_loss, + "max-loss": str(max_loss), "upstream": { - "assure-bandwidth": upstream_assure_bw, - "max-bandwidth": upstream_max_bw, + "assure-bandwidth": str(int(upstream_assure_bw)), + "max-bandwidth": str(int(upstream_max_bw)), }, "downstream": { - "assure-bandwidth": downstream_assure_bw, - "max-bandwidth": downstream_max_bw, + "assure-bandwidth": str(int(downstream_assure_bw)), + "max-bandwidth": str(int(downstream_max_bw)), }, } application = { "name": app_flow_app_name, - "app-id": app_id, + "app-id": [app_id], "app-features": { "app-feature": [ { -- GitLab From af56d8de65ecf35738160e17d64d09aa02c479d3 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 8 Sep 2025 08:15:48 +0000 Subject: [PATCH 201/367] NBI Component - SSE Telemetry: - Fixed handling of target link and selection of controller --- src/nbi/service/sse_telemetry/create_subscription.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/nbi/service/sse_telemetry/create_subscription.py b/src/nbi/service/sse_telemetry/create_subscription.py index e70220526..b09e58d44 100644 --- a/src/nbi/service/sse_telemetry/create_subscription.py +++ b/src/nbi/service/sse_telemetry/create_subscription.py @@ -96,7 +96,15 @@ class CreateSubscription(Resource): if not xpath_network: MSG = 'Resource({:s} => {:s}) not found in SIMAP Server' raise Exception(MSG.format(str(xpath_filter), str(xpath_filter_prefix))) - network_id = xpath_network['network-id'] + networks = xpath_network.get('ietf-network:network', list()) + if len(networks) != 1: + MSG = 'Resource({:s} => {:s}) wrong number of entries: {:s}' + raise Exception(MSG.format( + str(xpath_filter), str(xpath_filter_prefix), str(xpath_network) + )) + network = networks[0] + network_id = network['network-id'] + controller_name_map = { 'e2e' : 'TFS-E2E', 'agg' : 'TFS-AGG', -- GitLab From a6d7adb979d6bdc393be563669ca7ffa221fd5f5 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 8 Sep 2025 08:25:07 +0000 Subject: [PATCH 202/367] Device component - NCE Driver: - Add log message --- src/device/service/drivers/nce/NCEDriver.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/device/service/drivers/nce/NCEDriver.py b/src/device/service/drivers/nce/NCEDriver.py index 31ccad1df..e154abffb 100644 --- a/src/device/service/drivers/nce/NCEDriver.py +++ b/src/device/service/drivers/nce/NCEDriver.py @@ -195,6 +195,8 @@ class NCEDriver(_Driver): @metered_subclass_method(METRICS_POOL) def DeleteConfig(self, resources: List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + LOGGER.debug('[DeleteConfig] resources={:s}'.format(str(resources))) + results = [] if len(resources) == 0: return results with self.__lock: -- GitLab From 14b31a7327b8914298b688a563a8bf156abc4c05 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 8 Sep 2025 08:40:58 +0000 Subject: [PATCH 203/367] NBI Component - SSE Telemetry: - Fixed handling of target link and selection of controller --- src/nbi/service/sse_telemetry/create_subscription.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/nbi/service/sse_telemetry/create_subscription.py b/src/nbi/service/sse_telemetry/create_subscription.py index b09e58d44..3bf6e5507 100644 --- a/src/nbi/service/sse_telemetry/create_subscription.py +++ b/src/nbi/service/sse_telemetry/create_subscription.py @@ -115,7 +115,7 @@ class CreateSubscription(Resource): controller_name = controller_name_map.get(network_id) if controller_name is None: LOGGER.warning( - 'Controllerless device detected, skipping subscription for: {:s}'.format(xpath) + 'Controllerless device detected, skipping subscription for: {:s}'.format(xpath_filter) ) continue -- GitLab From 5888b41ddf86796eef51d2bf6a3e9b7ddc623c61 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 8 Sep 2025 08:55:10 +0000 Subject: [PATCH 204/367] Service component - L3NM IETF Slice and NCE FAN: - Corrected composition of DELETE config rules; was using service uuid instead of service name --- .../L3NM_IETFSlice_ServiceHandler.py | 5 ++-- .../l3nm_ncefan/ConfigRules.py | 24 +++++++++---------- .../l3nm_ncefan/L3NM_NCEFAN_ServiceHandler.py | 2 +- 3 files changed, 16 insertions(+), 15 deletions(-) diff --git a/src/service/service/service_handlers/l3nm_ietfslice/L3NM_IETFSlice_ServiceHandler.py b/src/service/service/service_handlers/l3nm_ietfslice/L3NM_IETFSlice_ServiceHandler.py index d6e2274e6..54a4a0d30 100644 --- a/src/service/service/service_handlers/l3nm_ietfslice/L3NM_IETFSlice_ServiceHandler.py +++ b/src/service/service/service_handlers/l3nm_ietfslice/L3NM_IETFSlice_ServiceHandler.py @@ -195,14 +195,15 @@ class L3NM_IETFSlice_ServiceHandler(_ServiceHandler): MSG = 'Unsupported number of Slices[{:d}]({:s})' raise Exception(MSG.format(len(running_slice), str(running_slice))) running_slice = running_slice[0] + slice_name = running_slice['id'] slice_data_model = {'network-slice-services': {'slice-service': [{ - 'id': running_slice['id'], + 'id': slice_name, }]}} del controller.device_config.config_rules[:] controller.device_config.config_rules.append(ConfigRule(**json_config_rule_delete( - '/service[{:s}]/IETFSlice'.format(service_uuid), slice_data_model + '/service[{:s}]/IETFSlice'.format(slice_name), slice_data_model ))) self.__task_executor.configure_device(controller) results.append(True) diff --git a/src/service/service/service_handlers/l3nm_ncefan/ConfigRules.py b/src/service/service/service_handlers/l3nm_ncefan/ConfigRules.py index 5500b113b..312bbbf50 100644 --- a/src/service/service/service_handlers/l3nm_ncefan/ConfigRules.py +++ b/src/service/service/service_handlers/l3nm_ncefan/ConfigRules.py @@ -20,7 +20,7 @@ from common.tools.object_factory.ConfigRule import ( ) -def setup_config_rules(service_uuid: str, json_settings: Dict) -> List[Dict]: +def setup_config_rules(service_name: str, json_settings: Dict) -> List[Dict]: operation_type: str = json_settings["operation_type"] app_flow_id: str = json_settings["app_flow_id"] app_flow_user_id: str = json_settings["app_flow_user_id"] @@ -96,17 +96,17 @@ def setup_config_rules(service_uuid: str, json_settings: Dict) -> List[Dict]: } json_config_rules = [ json_config_rule_set( - "/service[{:s}]/AppFlow".format(service_uuid), app_flow_datamodel - ), - json_config_rule_set( - "/service[{:s}]/AppFlow/operation".format(service_uuid), - {"type": operation_type}, + "/service[{:s}]/AppFlow".format(service_name), app_flow_datamodel ), + #json_config_rule_set( + # "/service[{:s}]/AppFlow/operation".format(service_name), + # {"type": operation_type}, + #), ] return json_config_rules -def teardown_config_rules(service_uuid: str, json_settings: Dict) -> List[Dict]: +def teardown_config_rules(service_name: str, json_settings: Dict) -> List[Dict]: app_flow_id : str = json_settings["app_flow_id"] application_name : str = f"App_Flow_{app_flow_id}" app_flow_name : str = f"App_Flow_{app_flow_id}" @@ -126,13 +126,13 @@ def teardown_config_rules(service_uuid: str, json_settings: Dict) -> List[Dict]: json_config_rules = [ json_config_rule_delete( - "/service[{:s}]/AppFlow".format(service_uuid), + "/service[{:s}]/AppFlow".format(service_name), app_flow_datamodel ), - json_config_rule_delete( - "/service[{:s}]/AppFlow/operation".format(service_uuid), - {}, - ), + #json_config_rule_delete( + # "/service[{:s}]/AppFlow/operation".format(service_name), + # {}, + #), ] return json_config_rules diff --git a/src/service/service/service_handlers/l3nm_ncefan/L3NM_NCEFAN_ServiceHandler.py b/src/service/service/service_handlers/l3nm_ncefan/L3NM_NCEFAN_ServiceHandler.py index 3f1bc6d92..6fb709b40 100644 --- a/src/service/service/service_handlers/l3nm_ncefan/L3NM_NCEFAN_ServiceHandler.py +++ b/src/service/service/service_handlers/l3nm_ncefan/L3NM_NCEFAN_ServiceHandler.py @@ -528,7 +528,7 @@ class L3NM_NCEFAN_ServiceHandler(_ServiceHandler): resource_value_dict = { "app_flow_id": f"{src_sdp_idx}_{dst_sdp_idx}_{service_name}", } - json_config_rules = teardown_config_rules(service_uuid, resource_value_dict) + json_config_rules = teardown_config_rules(service_name, resource_value_dict) if len(json_config_rules) > 0: del controller.device_config.config_rules[:] for json_config_rule in json_config_rules: -- GitLab From bcaeb42159bd1606835afe006fe7677e16d89f5d Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 8 Sep 2025 09:18:59 +0000 Subject: [PATCH 205/367] Service component - L3NM IETF L3VPN: - Corrected composition of DELETE config rules --- .../l3nm_ietfl3vpn/ConfigRules.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/service/service/service_handlers/l3nm_ietfl3vpn/ConfigRules.py b/src/service/service/service_handlers/l3nm_ietfl3vpn/ConfigRules.py index c5638fc10..3b537a467 100644 --- a/src/service/service/service_handlers/l3nm_ietfl3vpn/ConfigRules.py +++ b/src/service/service/service_handlers/l3nm_ietfl3vpn/ConfigRules.py @@ -259,10 +259,10 @@ def setup_config_rules( "/service[{:s}]/IETFL3VPN".format(service_uuid), l3_vpn_data_model, ), - json_config_rule_set( - "/service[{:s}]/IETFL3VPN/operation".format(service_uuid), - {"type": operation_type}, - ), + #json_config_rule_set( + # "/service[{:s}]/IETFL3VPN/operation".format(service_uuid), + # {"type": operation_type}, + #), ] return json_config_rules @@ -274,10 +274,10 @@ def teardown_config_rules(service_uuid: str) -> List[Dict]: "/service[{:s}]/IETFL3VPN".format(service_uuid), {"id": service_uuid}, ), - json_config_rule_delete( - "/service[{:s}]/IETFL3VPN/operation".format(service_uuid), - {}, - ), + #json_config_rule_delete( + # "/service[{:s}]/IETFL3VPN/operation".format(service_uuid), + # {}, + #), ] return json_config_rules -- GitLab From 51b7f12d654fab5681d1bdea7b1b0bbe83c59743 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 8 Sep 2025 09:19:25 +0000 Subject: [PATCH 206/367] Device component - IETF L3VPN Driver: - Fixed DeleteConfig method --- src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py b/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py index 379655a0b..0d616de5a 100644 --- a/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py +++ b/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py @@ -201,9 +201,10 @@ class IetfL3VpnDriver(_Driver): if not RE_IETF_L3VPN_DATA.match(resource_key): continue try: resource_value = json.loads(resource_value) - service_uuid = resource_value['ietf-l3vpn-svc:l3vpn-svc'][ - 'vpn-services' - ]['vpn-service'][0]['vpn-id'] + #service_uuid = resource_value['ietf-l3vpn-svc:l3vpn-svc'][ + # 'vpn-services' + #]['vpn-service'][0]['vpn-id'] + service_uuid = resource_value['id'] self.tac.delete_connectivity_service(service_uuid) results.append((resource_key, True)) except Exception as e: -- GitLab From 4ac6445a0eede1159f807b698afbdcded402cbd2 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 8 Sep 2025 09:40:07 +0000 Subject: [PATCH 207/367] NBI Component - SSE Telemetry: - Fixed handling of sub-subscription generation --- src/nbi/service/sse_telemetry/create_subscription.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/nbi/service/sse_telemetry/create_subscription.py b/src/nbi/service/sse_telemetry/create_subscription.py index 3bf6e5507..a0f061041 100644 --- a/src/nbi/service/sse_telemetry/create_subscription.py +++ b/src/nbi/service/sse_telemetry/create_subscription.py @@ -126,14 +126,17 @@ class CreateSubscription(Resource): # 'Controllerless device detected, skipping subscription for: {:s}'.format(xpath) # ) # continue + + sampling_interval = s['ietf-subscribed-notifications:input'][ + 'ietf-yang-push:periodic' + ]['ietf-yang-push:period'] + s_req = SSEMonitoringSubscriptionConfig() #s_req.device_id.device_uuid.uuid = device_controller.value s_req.device_id.device_uuid.uuid = controller_name s_req.config_type = SSEMonitoringSubscriptionConfig.Subscribe s_req.uri = xpath_filter - s_req.sampling_interval = s['ietf-subscribed-notifications:input'][ - 'ietf-yang-push:periodic' - ]['ietf-yang-push:period'] + s_req.sampling_interval = str(sampling_interval) r: SSEMonitoringSubscriptionResponse = device_client.SSETelemetrySubscribe(s_req) s = SSESubsciprionDict( uuid=str(uuid4()), -- GitLab From 81418b31b5f81e6f2ab07edb4f74f79eac0bb3ec Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 8 Sep 2025 10:01:22 +0000 Subject: [PATCH 208/367] Device component: - Added missing method "SSETelemetrySubscribe" in client --- src/device/client/DeviceClient.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/device/client/DeviceClient.py b/src/device/client/DeviceClient.py index fe3ac0f02..69d697cb1 100644 --- a/src/device/client/DeviceClient.py +++ b/src/device/client/DeviceClient.py @@ -21,6 +21,7 @@ from common.proto.context_pb2 import ( ) from common.proto.device_pb2 import MonitoringSettings from common.proto.device_pb2_grpc import DeviceServiceStub +from common.proto.monitoring_pb2 import SSEMonitoringSubscriptionConfig, SSEMonitoringSubscriptionResponse from common.proto.optical_device_pb2_grpc import OpenConfigServiceStub from common.tools.client.RetryDecorator import retry, delay_exponential from common.tools.grpc.Tools import grpc_message_to_json_string @@ -105,3 +106,10 @@ class DeviceClient: response = self.openconfig_stub.DisableOpticalDevice(request) LOGGER.debug('DisableOpticalDevice result: {:s}'.format(grpc_message_to_json_string(response))) return response + + @RETRY_DECORATOR + def SSETelemetrySubscribe(self, request : SSEMonitoringSubscriptionConfig) -> SSEMonitoringSubscriptionResponse: + LOGGER.debug('SSETelemetrySubscribe request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.SSETelemetrySubscribe(request) + LOGGER.debug('SSETelemetrySubscribe result: {:s}'.format(grpc_message_to_json_string(response))) + return response -- GitLab From 9b65ef5b3fb071a3a0aced6ed8688b969cbb1173 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 8 Sep 2025 10:01:54 +0000 Subject: [PATCH 209/367] SIMAP Connector: - Disabled unneeded condition in service remove --- .../service/simap_updater/SimapUpdater.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 5731a9fa5..10d16ba3a 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -543,13 +543,13 @@ class EventDispatcher(BaseEventDispatcher): except: # pylint: disable=bare-except pass - topology_uuid, endpoint_uuids = get_service_endpoint(service) - if topology_uuid is None: - MSG = 'ServiceEvent({:s}) skipped, no endpoint_ids to identify topology: {:s}' - str_service_event = grpc_message_to_json_string(service_event) - str_service = grpc_message_to_json_string(service) - LOGGER.warning(MSG.format(str_service_event, str_service)) - return + #topology_uuid, endpoint_uuids = get_service_endpoint(service) + #if topology_uuid is None: + # MSG = 'ServiceEvent({:s}) skipped, no endpoint_ids to identify topology: {:s}' + # str_service_event = grpc_message_to_json_string(service_event) + # str_service = grpc_message_to_json_string(service) + # LOGGER.warning(MSG.format(str_service_event, str_service)) + # return topologies = self._object_cache.get_all(CachedEntities.TOPOLOGY, fresh=False) topology_names = {t.name for t in topologies} -- GitLab From b972041cb9851799966736216e72657d40daa5e5 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 8 Sep 2025 10:23:57 +0000 Subject: [PATCH 210/367] Device component: - Added logging to RPC method "SSETelemetrySubscribe" --- src/device/service/DeviceServiceServicerImpl.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/device/service/DeviceServiceServicerImpl.py b/src/device/service/DeviceServiceServicerImpl.py index b6a10d4be..d599a5467 100644 --- a/src/device/service/DeviceServiceServicerImpl.py +++ b/src/device/service/DeviceServiceServicerImpl.py @@ -404,7 +404,10 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): finally: self.mutex_queues.signal_done(device_uuid) - def SSETelemetrySubscribe(self, request: SSEMonitoringSubscriptionConfig, context : grpc.ServicerContext) -> SSEMonitoringSubscriptionResponse: + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) + def SSETelemetrySubscribe( + self, request: SSEMonitoringSubscriptionConfig, context : grpc.ServicerContext + ) -> SSEMonitoringSubscriptionResponse: device_id = request.device_id.device_uuid.uuid config_type = request.config_type context_client = ContextClient() @@ -414,6 +417,7 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): if device is None: raise NotFoundException('Device', device_id, extra_details='loading in ConfigureDevice') driver : _Driver = get_driver(self.driver_instance_cache, device) + if config_type == SSEMonitoringSubscriptionConfig.Subscribe: r = driver.SubscribeState([(request.uri, 0, float(request.sampling_interval))]) if len(r) != 1: @@ -422,6 +426,7 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): ) sub_conf: dict = r[0] return SSEMonitoringSubscriptionResponse(identifier=sub_conf['identifier'], uri=sub_conf['uri']) + if config_type == SSEMonitoringSubscriptionConfig.Unsubscribe: r = driver.UnsubscribeState([(request.identifier, 0, 0)]) if len(r) != 1: -- GitLab From bade52b9244c924c533c1cf9abe589252b18eb34 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 8 Sep 2025 10:24:27 +0000 Subject: [PATCH 211/367] Device component - NCE Driver: - Add log messages in subscriptions - Code cleanup --- .../drivers/nce/handlers/SubscriptionHandler.py | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/src/device/service/drivers/nce/handlers/SubscriptionHandler.py b/src/device/service/drivers/nce/handlers/SubscriptionHandler.py index 78c553b65..00e45cdca 100644 --- a/src/device/service/drivers/nce/handlers/SubscriptionHandler.py +++ b/src/device/service/drivers/nce/handlers/SubscriptionHandler.py @@ -51,13 +51,6 @@ class SubscriptionHandler: def __init__(self, rest_conf_client : RestConfClient) -> None: self._rest_conf_client = rest_conf_client - self._url_qos_profile = '/huawei-nce-app-flow:qos-profiles' - self._url_qos_profile_item = self._url_qos_profile + '/qos-profile={:s}' - - self._url_app_flow = '/huawei-nce-app-flow:app-flows' - self._url_app_flow_item = self._url_app_flow + '/app-flow={:s}' - - def subscribe( self, subscription_data : SubscribedNotificationsSchema ) -> SubscriptionId: @@ -66,12 +59,13 @@ class SubscriptionHandler: try: url = '/subscriptions:establish-subscription' LOGGER.debug('Subscribing to telemetry: {:s}'.format(str(subscription_data))) - return self._rest_conf_client.rpc(url, json=subscription_data) + reply = self._rest_conf_client.rpc(url, json=subscription_data) + LOGGER.debug('Subscription reply: {:s}'.format(str(reply))) + return reply except requests.exceptions.ConnectionError as e: MSG = 'Failed to send RPC request' raise Exception(MSG) from e - def unsubscribe( self, unsubscription_data : UnsubscribedNotificationsSchema ) -> SubscriptionId: @@ -80,7 +74,9 @@ class SubscriptionHandler: try: url = '/subscriptions:delete-subscription' LOGGER.debug('Unsubscribing from telemetry: {:s}'.format(str(unsubscription_data))) - return self._rest_conf_client.rpc(url, json=unsubscription_data) + reply = self._rest_conf_client.rpc(url, json=unsubscription_data) + LOGGER.debug('Unsubscription reply: {:s}'.format(str(reply))) + return reply except requests.exceptions.ConnectionError as e: MSG = 'Failed to send RPC request' raise Exception(MSG) from e -- GitLab From 4016530f757aa59fbc05f802e29102205593250d Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 8 Sep 2025 10:25:48 +0000 Subject: [PATCH 212/367] Device component - IETF Slice Driver: - Implemented subscriptions - Code cleanup --- .../drivers/ietf_slice/IetfSliceDriver.py | 50 +++++++++-- .../handlers/SubscriptionHandler.py | 82 +++++++++++++++++++ .../drivers/ietf_slice/handlers/__init__.py | 13 +++ 3 files changed, 136 insertions(+), 9 deletions(-) create mode 100644 src/device/service/drivers/ietf_slice/handlers/SubscriptionHandler.py create mode 100644 src/device/service/drivers/ietf_slice/handlers/__init__.py diff --git a/src/device/service/drivers/ietf_slice/IetfSliceDriver.py b/src/device/service/drivers/ietf_slice/IetfSliceDriver.py index e5f2d3820..ddbc4dfdc 100644 --- a/src/device/service/drivers/ietf_slice/IetfSliceDriver.py +++ b/src/device/service/drivers/ietf_slice/IetfSliceDriver.py @@ -13,12 +13,16 @@ # limitations under the License. -import json, logging, re, threading -from typing import Any, Iterator, List, Optional, Tuple, Union +import copy, json, logging, re, threading +from typing import Any, Dict, Iterator, List, Optional, Tuple, Union from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method +from common.tools.rest_conf.client.RestConfClient import RestConfClient from common.type_checkers.Checkers import chk_string, chk_type from device.service.driver_api._Driver import _Driver, RESOURCE_ENDPOINTS, RESOURCE_SERVICES from device.service.driver_api.ImportTopologyEnum import ImportTopologyEnum, get_import_topology +from .handlers.SubscriptionHandler import ( + SubscribedNotificationsSchema, SubscriptionHandler, UnsubscribedNotificationsSchema +) from .TfsApiClient import TfsApiClient @@ -54,6 +58,13 @@ class IetfSliceDriver(_Driver): password=password, timeout=timeout ) + restconf_settings = copy.deepcopy(settings) + restconf_settings.pop('base_url', None) + restconf_settings.pop('import_topology', None) + restconf_settings['logger'] = logging.getLogger(__name__ + '.RestConfClient') + self._rest_conf_client = RestConfClient(address, port=port, **restconf_settings) + self._handler_subscription = SubscriptionHandler(self._rest_conf_client) + # Options are: # disabled --> just import endpoints as usual # devices --> imports sub-devices but not links connecting them. @@ -188,21 +199,42 @@ class IetfSliceDriver(_Driver): @metered_subclass_method(METRICS_POOL) def SubscribeState( self, subscriptions : List[Tuple[str, float, float]] - ) -> List[Union[bool, Exception]]: - # TODO: does not support monitoring by now - return [False for _ in subscriptions] + ) -> List[Union[bool, Dict[str, Any], Exception]]: + if len(subscriptions) != 1: + raise ValueError('IETF Slice Driver supports only one subscription at a time') + s = subscriptions[0] + uri = s[0] + #sampling_duration = s[1] + sampling_interval = s[2] + s_data : SubscribedNotificationsSchema = { + 'ietf-subscribed-notifications:input': { + 'datastore': 'operational', + 'ietf-yang-push:datastore-xpath-filter': uri, + 'ietf-yang-push:periodic': {'ietf-yang-push:period': str(sampling_interval)}, + } + } + s_id = self._handler_subscription.subscribe(s_data) + return [s_id] @metered_subclass_method(METRICS_POOL) def UnsubscribeState( self, subscriptions : List[Tuple[str, float, float]] - ) -> List[Union[bool, Exception]]: - # TODO: does not support monitoring by now - return [False for _ in subscriptions] + ) -> List[Union[bool, Dict[str, Any], Exception]]: + if len(subscriptions) != 1: + raise ValueError('IETF Slice Driver supports only one subscription at a time') + s = subscriptions[0] + identifier = s[0] + s_data : UnsubscribedNotificationsSchema = { + 'delete-subscription': { + 'identifier': identifier, + } + } + self._handler_subscription.unsubscribe(s_data) + return [True] def GetState( self, blocking=False, terminate : Optional[threading.Event] = None ) -> Iterator[Tuple[float, str, Any]]: - # TODO: does not support monitoring by now return [] diff --git a/src/device/service/drivers/ietf_slice/handlers/SubscriptionHandler.py b/src/device/service/drivers/ietf_slice/handlers/SubscriptionHandler.py new file mode 100644 index 000000000..00e45cdca --- /dev/null +++ b/src/device/service/drivers/ietf_slice/handlers/SubscriptionHandler.py @@ -0,0 +1,82 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, requests +from typing_extensions import TypedDict +from common.tools.rest_conf.client.RestConfClient import RestConfClient + + +LOGGER = logging.getLogger(__name__) + + +Periodic = TypedDict('Periodic', {'ietf-yang-push:period': str}) + +Input = TypedDict( + 'Input', + { + 'datastore': str, + 'ietf-yang-push:datastore-xpath-filter': str, + 'ietf-yang-push:periodic': Periodic, + }, +) + +SubscribedNotificationsSchema = TypedDict( + 'SubscribedNotificationsSchema', {'ietf-subscribed-notifications:input': Input} +) + +SubscriptionSchema = TypedDict('SubscriptionSchema', {'identifier': str}) + +UnsubscribedNotificationsSchema = TypedDict( + 'UnsubscribedNotificationsSchema', {'delete-subscription': SubscriptionSchema} +) + + +class SubscriptionId(TypedDict): + identifier: str + uri: str + + +class SubscriptionHandler: + def __init__(self, rest_conf_client : RestConfClient) -> None: + self._rest_conf_client = rest_conf_client + + def subscribe( + self, subscription_data : SubscribedNotificationsSchema + ) -> SubscriptionId: + MSG = '[subscribe] subscription_data={:s}' + LOGGER.debug(MSG.format(str(subscription_data))) + try: + url = '/subscriptions:establish-subscription' + LOGGER.debug('Subscribing to telemetry: {:s}'.format(str(subscription_data))) + reply = self._rest_conf_client.rpc(url, json=subscription_data) + LOGGER.debug('Subscription reply: {:s}'.format(str(reply))) + return reply + except requests.exceptions.ConnectionError as e: + MSG = 'Failed to send RPC request' + raise Exception(MSG) from e + + def unsubscribe( + self, unsubscription_data : UnsubscribedNotificationsSchema + ) -> SubscriptionId: + MSG = '[unsubscribe] unsubscription_data={:s}' + LOGGER.debug(MSG.format(str(unsubscription_data))) + try: + url = '/subscriptions:delete-subscription' + LOGGER.debug('Unsubscribing from telemetry: {:s}'.format(str(unsubscription_data))) + reply = self._rest_conf_client.rpc(url, json=unsubscription_data) + LOGGER.debug('Unsubscription reply: {:s}'.format(str(reply))) + return reply + except requests.exceptions.ConnectionError as e: + MSG = 'Failed to send RPC request' + raise Exception(MSG) from e diff --git a/src/device/service/drivers/ietf_slice/handlers/__init__.py b/src/device/service/drivers/ietf_slice/handlers/__init__.py new file mode 100644 index 000000000..7363515f0 --- /dev/null +++ b/src/device/service/drivers/ietf_slice/handlers/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. -- GitLab From 59b475970a96d9bdeb6f2b472a5a164207805b13 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 8 Sep 2025 10:36:24 +0000 Subject: [PATCH 213/367] Manifests: - Added well-known to NGINX ingress redirector --- manifests/nginx_ingress_http.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/manifests/nginx_ingress_http.yaml b/manifests/nginx_ingress_http.yaml index e45ca65f0..6d7badaa2 100644 --- a/manifests/nginx_ingress_http.yaml +++ b/manifests/nginx_ingress_http.yaml @@ -57,6 +57,13 @@ spec: name: webuiservice port: number: 3000 + - path: /()(.well-known/.*) + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 - path: /()(restconf/.*) pathType: Prefix backend: -- GitLab From b2ae2f7f97301cdfb1158f767dd1d93900d502d8 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 8 Sep 2025 10:56:57 +0000 Subject: [PATCH 214/367] NBI Component - WellKnown-HostMeta Connector: - Added "links" to well-known/host-meta --- src/nbi/service/_tools/HttpStatusCodes.py | 21 ++++--- src/nbi/service/well_known_meta/Resources.py | 60 +++++++++++++++----- 2 files changed, 58 insertions(+), 23 deletions(-) diff --git a/src/nbi/service/_tools/HttpStatusCodes.py b/src/nbi/service/_tools/HttpStatusCodes.py index 19c56d7fb..cb7fc76a6 100644 --- a/src/nbi/service/_tools/HttpStatusCodes.py +++ b/src/nbi/service/_tools/HttpStatusCodes.py @@ -12,12 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -HTTP_OK = 200 -HTTP_CREATED = 201 -HTTP_ACCEPTED = 202 -HTTP_NOCONTENT = 204 -HTTP_BADREQUEST = 400 -HTTP_NOTFOUND = 404 -HTTP_UNSUPMEDIATYPE = 415 -HTTP_SERVERERROR = 500 -HTTP_GATEWAYTIMEOUT = 504 +HTTP_OK = 200 +HTTP_CREATED = 201 +HTTP_ACCEPTED = 202 +HTTP_NOCONTENT = 204 +HTTP_BADREQUEST = 400 +HTTP_NOTFOUND = 404 +HTTP_NOT_ACCEPTABLE = 406 +HTTP_CONFLICT = 409 +HTTP_UNSUPMEDIATYPE = 415 +HTTP_SERVERERROR = 500 +HTTP_NOT_IMPLEMENTED = 501 +HTTP_GATEWAYTIMEOUT = 504 diff --git a/src/nbi/service/well_known_meta/Resources.py b/src/nbi/service/well_known_meta/Resources.py index 86004c165..e60f4230a 100644 --- a/src/nbi/service/well_known_meta/Resources.py +++ b/src/nbi/service/well_known_meta/Resources.py @@ -12,24 +12,56 @@ # See the License for the specific language governing permissions and # limitations under the License. + # RESTCONF .well-known endpoint (RFC 8040) -from flask import jsonify +from flask import abort, jsonify, make_response, request from flask_restful import Resource +from xml.sax.saxutils import escape +import xml.etree.ElementTree as ET +from .._tools.HttpStatusCodes import HTTP_NOT_ACCEPTABLE + +XRD_NS = 'http://docs.oasis-open.org/ns/xri/xrd-1.0' +ET.register_namespace('', XRD_NS) + +RESTCONF_PREFIX = '/restconf' class WellKnownHostMeta(Resource): def get(self): - response = { - 'restconf': { - 'capabilities': [ - 'urn:ietf:params:restconf:capability:defaults:1.0', - 'urn:ietf:params:restconf:capability:depth:1.0', - 'urn:ietf:params:restconf:capability:with-defaults:1.0' - ], - 'media-types': [ - 'application/yang-data+json', - 'application/yang-data+xml' + best = request.accept_mimetypes.best_match([ + 'application/xrd+xml', + 'application/json', + ], default='application/xrd+xml') + + if best == 'application/xrd+xml': + xrd = ET.Element('{{{:s}}}XRD'.format(str(XRD_NS))) + ET.SubElement(xrd, '{{{:s}}}Link'.format(str(XRD_NS)), attrib={ + 'rel': 'restconf', 'href': RESTCONF_PREFIX + }) + xml_string = ET.tostring(xrd, encoding='utf-8', xml_declaration=True).decode() + response = make_response(str(xml_string)) + response.status_code = 200 + response.content_type = best + return response + elif best == 'application/json': + response = jsonify({ + 'restconf': { + 'capabilities': [ + 'urn:ietf:params:restconf:capability:defaults:1.0', + 'urn:ietf:params:restconf:capability:depth:1.0', + 'urn:ietf:params:restconf:capability:with-defaults:1.0' + ], + 'media-types': [ + 'application/yang-data+json', + 'application/yang-data+xml' + ] + }, + 'links': [ + {'rel': 'restconf', 'href': RESTCONF_PREFIX} ] - } - } - return jsonify(response) + }) + response.status_code = 200 + response.content_type = best + return response + else: + abort(HTTP_NOT_ACCEPTABLE) -- GitLab From 6ca22402a9b8abab654092121df14f8cd4f44882 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 8 Sep 2025 11:45:39 +0000 Subject: [PATCH 215/367] Device component - IETF Slice / NCE FAN Driver: - Bug fix in rpc() method call for (un)subscriptions --- .../drivers/ietf_slice/handlers/SubscriptionHandler.py | 4 ++-- .../service/drivers/nce/handlers/SubscriptionHandler.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/device/service/drivers/ietf_slice/handlers/SubscriptionHandler.py b/src/device/service/drivers/ietf_slice/handlers/SubscriptionHandler.py index 00e45cdca..83d62880d 100644 --- a/src/device/service/drivers/ietf_slice/handlers/SubscriptionHandler.py +++ b/src/device/service/drivers/ietf_slice/handlers/SubscriptionHandler.py @@ -59,7 +59,7 @@ class SubscriptionHandler: try: url = '/subscriptions:establish-subscription' LOGGER.debug('Subscribing to telemetry: {:s}'.format(str(subscription_data))) - reply = self._rest_conf_client.rpc(url, json=subscription_data) + reply = self._rest_conf_client.rpc(url, body=subscription_data) LOGGER.debug('Subscription reply: {:s}'.format(str(reply))) return reply except requests.exceptions.ConnectionError as e: @@ -74,7 +74,7 @@ class SubscriptionHandler: try: url = '/subscriptions:delete-subscription' LOGGER.debug('Unsubscribing from telemetry: {:s}'.format(str(unsubscription_data))) - reply = self._rest_conf_client.rpc(url, json=unsubscription_data) + reply = self._rest_conf_client.rpc(url, body=unsubscription_data) LOGGER.debug('Unsubscription reply: {:s}'.format(str(reply))) return reply except requests.exceptions.ConnectionError as e: diff --git a/src/device/service/drivers/nce/handlers/SubscriptionHandler.py b/src/device/service/drivers/nce/handlers/SubscriptionHandler.py index 00e45cdca..83d62880d 100644 --- a/src/device/service/drivers/nce/handlers/SubscriptionHandler.py +++ b/src/device/service/drivers/nce/handlers/SubscriptionHandler.py @@ -59,7 +59,7 @@ class SubscriptionHandler: try: url = '/subscriptions:establish-subscription' LOGGER.debug('Subscribing to telemetry: {:s}'.format(str(subscription_data))) - reply = self._rest_conf_client.rpc(url, json=subscription_data) + reply = self._rest_conf_client.rpc(url, body=subscription_data) LOGGER.debug('Subscription reply: {:s}'.format(str(reply))) return reply except requests.exceptions.ConnectionError as e: @@ -74,7 +74,7 @@ class SubscriptionHandler: try: url = '/subscriptions:delete-subscription' LOGGER.debug('Unsubscribing from telemetry: {:s}'.format(str(unsubscription_data))) - reply = self._rest_conf_client.rpc(url, json=unsubscription_data) + reply = self._rest_conf_client.rpc(url, body=unsubscription_data) LOGGER.debug('Unsubscription reply: {:s}'.format(str(reply))) return reply except requests.exceptions.ConnectionError as e: -- GitLab From fc79d0d3b72be3a46c1e97ec93c48ea550409f3c Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 8 Sep 2025 12:00:27 +0000 Subject: [PATCH 216/367] Device component - IETF L3VPN Driver: - Implemented subscriptions - Code cleanup --- .../drivers/ietf_l3vpn/IetfL3VpnDriver.py | 65 ++++++++++++--- .../handlers/SubscriptionHandler.py | 82 +++++++++++++++++++ .../drivers/ietf_l3vpn/handlers/__init__.py | 13 +++ 3 files changed, 150 insertions(+), 10 deletions(-) create mode 100644 src/device/service/drivers/ietf_l3vpn/handlers/SubscriptionHandler.py create mode 100644 src/device/service/drivers/ietf_l3vpn/handlers/__init__.py diff --git a/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py b/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py index 0d616de5a..8181602d3 100644 --- a/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py +++ b/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py @@ -14,12 +14,16 @@ import anytree, json, logging, re, threading -from typing import Any, Iterator, List, Optional, Tuple, Union +from typing import Any, Dict, Iterator, List, Optional, Tuple, Union from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method +from common.tools.rest_conf.client.RestConfClient import RestConfClient from common.type_checkers.Checkers import chk_length, chk_string, chk_type from device.service.driver_api._Driver import _Driver, RESOURCE_ENDPOINTS, RESOURCE_SERVICES from device.service.driver_api.AnyTreeTools import TreeNode, dump_subtree, get_subnode, set_subnode_value from device.service.driver_api.ImportTopologyEnum import ImportTopologyEnum, get_import_topology +from .handlers.SubscriptionHandler import ( + SubscribedNotificationsSchema, SubscriptionHandler, UnsubscribedNotificationsSchema +) from .Constants import SPECIAL_RESOURCE_MAPPINGS from .TfsApiClient import TfsApiClient from .Tools import compose_resource_endpoint @@ -37,6 +41,7 @@ ALL_RESOURCE_KEYS = [ RE_IETF_L3VPN_DATA = re.compile(r'^\/service\[[^\]]+\]\/IETFL3VPN$') RE_IETF_L3VPN_OPERATION = re.compile(r'^\/service\[[^\]]+\]\/IETFL3VPN\/operation$') + DRIVER_NAME = 'ietf_l3vpn' METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': DRIVER_NAME}) @@ -57,6 +62,13 @@ class IetfL3VpnDriver(_Driver): password=password, timeout=timeout ) + restconf_settings = copy.deepcopy(settings) + restconf_settings.pop('base_url', None) + restconf_settings.pop('import_topology', None) + restconf_settings['logger'] = logging.getLogger(__name__ + '.RestConfClient') + self._rest_conf_client = RestConfClient(address, port=port, **restconf_settings) + self._handler_subscription = SubscriptionHandler(self._rest_conf_client) + # Options are: # disabled --> just import endpoints as usual # devices --> imports sub-devices but not links connecting them. @@ -119,16 +131,19 @@ class IetfL3VpnDriver(_Driver): if checked: self.__started.set() return checked + def Disconnect(self) -> bool: with self.__lock: self.__terminate.set() return True + @metered_subclass_method(METRICS_POOL) def GetInitialConfig(self) -> List[Tuple[str, Any]]: with self.__lock: return [] + @metered_subclass_method(METRICS_POOL) def GetConfig( self, resource_keys : List[str] = [] @@ -165,6 +180,7 @@ class IetfL3VpnDriver(_Driver): results.append((resource_key, e)) return results + @metered_subclass_method(METRICS_POOL) def SetConfig( self, resources : List[Tuple[str, Any]] @@ -174,9 +190,11 @@ class IetfL3VpnDriver(_Driver): with self.__lock: for i, resource in enumerate(resources): str_resource_name = 'resource_key[#{:d}]'.format(i) - LOGGER.info('resource = {:s}'.format(str(resource))) + LOGGER.info('[SetConfig] resource = {:s}'.format(str(resource))) resource_key, resource_value = resource + if not RE_IETF_L3VPN_DATA.match(resource_key): continue + try: resource_value = json.loads(resource_value) self.tac.create_connectivity_service(resource_value) @@ -187,6 +205,7 @@ class IetfL3VpnDriver(_Driver): results.append((resource_key, e)) return results + @metered_subclass_method(METRICS_POOL) def DeleteConfig( self, resources : List[Tuple[str, Any]] @@ -196,9 +215,11 @@ class IetfL3VpnDriver(_Driver): with self.__lock: for i, resource in enumerate(resources): str_resource_name = 'resource_key[#{:d}]'.format(i) - LOGGER.info('resource = {:s}'.format(str(resource))) + LOGGER.info('[DeleteConfig] resource = {:s}'.format(str(resource))) resource_key, resource_value = resource + if not RE_IETF_L3VPN_DATA.match(resource_key): continue + try: resource_value = json.loads(resource_value) #service_uuid = resource_value['ietf-l3vpn-svc:l3vpn-svc'][ @@ -213,22 +234,46 @@ class IetfL3VpnDriver(_Driver): results.append((resource_key, e)) return results + @metered_subclass_method(METRICS_POOL) def SubscribeState( self, subscriptions : List[Tuple[str, float, float]] - ) -> List[Union[bool, Exception]]: - # TODO: does not support monitoring by now - return [False for _ in subscriptions] + ) -> List[Union[bool, Dict[str, Any], Exception]]: + if len(subscriptions) != 1: + raise ValueError('IETF L3VPN Driver supports only one subscription at a time') + s = subscriptions[0] + uri = s[0] + #sampling_duration = s[1] + sampling_interval = s[2] + s_data : SubscribedNotificationsSchema = { + 'ietf-subscribed-notifications:input': { + 'datastore': 'operational', + 'ietf-yang-push:datastore-xpath-filter': uri, + 'ietf-yang-push:periodic': {'ietf-yang-push:period': str(sampling_interval)}, + } + } + s_id = self._handler_subscription.subscribe(s_data) + return [s_id] + @metered_subclass_method(METRICS_POOL) def UnsubscribeState( self, subscriptions : List[Tuple[str, float, float]] - ) -> List[Union[bool, Exception]]: - # TODO: does not support monitoring by now - return [False for _ in subscriptions] + ) -> List[Union[bool, Dict[str, Any], Exception]]: + if len(subscriptions) != 1: + raise ValueError('IETF L3VPN Driver supports only one subscription at a time') + s = subscriptions[0] + identifier = s[0] + s_data : UnsubscribedNotificationsSchema = { + 'delete-subscription': { + 'identifier': identifier, + } + } + self._handler_subscription.unsubscribe(s_data) + return [True] + def GetState( self, blocking=False, terminate : Optional[threading.Event] = None ) -> Iterator[Tuple[float, str, Any]]: - # TODO: does not support monitoring by now return [] diff --git a/src/device/service/drivers/ietf_l3vpn/handlers/SubscriptionHandler.py b/src/device/service/drivers/ietf_l3vpn/handlers/SubscriptionHandler.py new file mode 100644 index 000000000..83d62880d --- /dev/null +++ b/src/device/service/drivers/ietf_l3vpn/handlers/SubscriptionHandler.py @@ -0,0 +1,82 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, requests +from typing_extensions import TypedDict +from common.tools.rest_conf.client.RestConfClient import RestConfClient + + +LOGGER = logging.getLogger(__name__) + + +Periodic = TypedDict('Periodic', {'ietf-yang-push:period': str}) + +Input = TypedDict( + 'Input', + { + 'datastore': str, + 'ietf-yang-push:datastore-xpath-filter': str, + 'ietf-yang-push:periodic': Periodic, + }, +) + +SubscribedNotificationsSchema = TypedDict( + 'SubscribedNotificationsSchema', {'ietf-subscribed-notifications:input': Input} +) + +SubscriptionSchema = TypedDict('SubscriptionSchema', {'identifier': str}) + +UnsubscribedNotificationsSchema = TypedDict( + 'UnsubscribedNotificationsSchema', {'delete-subscription': SubscriptionSchema} +) + + +class SubscriptionId(TypedDict): + identifier: str + uri: str + + +class SubscriptionHandler: + def __init__(self, rest_conf_client : RestConfClient) -> None: + self._rest_conf_client = rest_conf_client + + def subscribe( + self, subscription_data : SubscribedNotificationsSchema + ) -> SubscriptionId: + MSG = '[subscribe] subscription_data={:s}' + LOGGER.debug(MSG.format(str(subscription_data))) + try: + url = '/subscriptions:establish-subscription' + LOGGER.debug('Subscribing to telemetry: {:s}'.format(str(subscription_data))) + reply = self._rest_conf_client.rpc(url, body=subscription_data) + LOGGER.debug('Subscription reply: {:s}'.format(str(reply))) + return reply + except requests.exceptions.ConnectionError as e: + MSG = 'Failed to send RPC request' + raise Exception(MSG) from e + + def unsubscribe( + self, unsubscription_data : UnsubscribedNotificationsSchema + ) -> SubscriptionId: + MSG = '[unsubscribe] unsubscription_data={:s}' + LOGGER.debug(MSG.format(str(unsubscription_data))) + try: + url = '/subscriptions:delete-subscription' + LOGGER.debug('Unsubscribing from telemetry: {:s}'.format(str(unsubscription_data))) + reply = self._rest_conf_client.rpc(url, body=unsubscription_data) + LOGGER.debug('Unsubscription reply: {:s}'.format(str(reply))) + return reply + except requests.exceptions.ConnectionError as e: + MSG = 'Failed to send RPC request' + raise Exception(MSG) from e diff --git a/src/device/service/drivers/ietf_l3vpn/handlers/__init__.py b/src/device/service/drivers/ietf_l3vpn/handlers/__init__.py new file mode 100644 index 000000000..7363515f0 --- /dev/null +++ b/src/device/service/drivers/ietf_l3vpn/handlers/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. -- GitLab From 8cc17e45cea59e2186185a4eb8e63cf3a8da94e6 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 8 Sep 2025 12:00:41 +0000 Subject: [PATCH 217/367] Device component - IETF Slice Driver: - Code cleanup --- src/device/service/drivers/ietf_slice/IetfSliceDriver.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/device/service/drivers/ietf_slice/IetfSliceDriver.py b/src/device/service/drivers/ietf_slice/IetfSliceDriver.py index ddbc4dfdc..0ad01846d 100644 --- a/src/device/service/drivers/ietf_slice/IetfSliceDriver.py +++ b/src/device/service/drivers/ietf_slice/IetfSliceDriver.py @@ -164,7 +164,7 @@ class IetfSliceDriver(_Driver): self.tac.create_slice(resource_value) results.append((resource_key, True)) except Exception as e: - MSG = 'Unhandled error processing {:s}: resource_key({:s})' + MSG = 'Unhandled error processing SET {:s}: resource_key({:s})' LOGGER.exception(MSG.format(str_resource_name, str(resource_key))) results.append((resource_key, e)) return results @@ -190,7 +190,7 @@ class IetfSliceDriver(_Driver): self.tac.delete_slice(slice_name) results.append((resource_key, True)) except Exception as e: - MSG = 'Unhandled error processing {:s}: resource_key({:s})' + MSG = 'Unhandled error processing DELETE {:s}: resource_key({:s})' LOGGER.exception(MSG.format(str_resource_name, str(resource_key))) results.append((resource_key, e)) return results -- GitLab From d45bdce10dad289830198cf2d56efdcb02097af1 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 8 Sep 2025 12:46:36 +0000 Subject: [PATCH 218/367] Device component - IETF L3VPN Driver: - Added missing import --- src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py b/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py index 8181602d3..835b186a5 100644 --- a/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py +++ b/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py @@ -13,7 +13,7 @@ # limitations under the License. -import anytree, json, logging, re, threading +import anytree, copy, json, logging, re, threading from typing import Any, Dict, Iterator, List, Optional, Tuple, Union from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method from common.tools.rest_conf.client.RestConfClient import RestConfClient -- GitLab From ef9ff5eea80d2a1f7b2971b917cd920b365ce9f1 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 8 Sep 2025 13:19:25 +0000 Subject: [PATCH 219/367] NBI Component - SSE Telemetry Connector: - Corrected database insert method --- src/nbi/service/sse_telemetry/database/Subscription.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/nbi/service/sse_telemetry/database/Subscription.py b/src/nbi/service/sse_telemetry/database/Subscription.py index 339390690..f60ac2b2a 100644 --- a/src/nbi/service/sse_telemetry/database/Subscription.py +++ b/src/nbi/service/sse_telemetry/database/Subscription.py @@ -40,7 +40,7 @@ def set_subscription(db_engine: Engine, request: SSESubsciprionDict) -> None: def callback(session: Session) -> bool: stmt = insert(SSESubscriptionModel).values([request]) stmt = stmt.on_conflict_do_update( - index_elements=[SSESubscriptionModel.id], + index_elements=[SSESubscriptionModel.uuid], set_=dict( uuid=stmt.excluded.uuid, identifier=stmt.excluded.identifier, -- GitLab From a31ca84ed9774e01f398071054f14527dbb0982d Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 8 Sep 2025 13:19:58 +0000 Subject: [PATCH 220/367] ECOC F5GA Telemetry Demo: - Corrected network slice definitions --- .../data/slices/network-slice1.json | 22 ++-- .../data/slices/network-slice2.json | 36 +++--- .../data/slices/old/network-slice1.json | 118 ++++++++++++++++++ .../data/slices/old/network-slice2.json | 118 ++++++++++++++++++ 4 files changed, 264 insertions(+), 30 deletions(-) create mode 100644 src/tests/ecoc25-f5ga-telemetry/data/slices/old/network-slice1.json create mode 100644 src/tests/ecoc25-f5ga-telemetry/data/slices/old/network-slice2.json diff --git a/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice1.json b/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice1.json index 121e20de5..33de80995 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice1.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice1.json @@ -12,8 +12,7 @@ "service-match-criteria": {"match-criterion": [{ "index": 1, "match-type": [ - {"type": "ietf-network-slice-service:vlan", "value": ["21"]}, - {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.16.204.221/24"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.16.104.221/24"]}, {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10500"]}, {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.1.101.22/24"]}, {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10200"]} @@ -34,17 +33,16 @@ "service-match-criteria": {"match-criterion": [{ "index": 1, "match-type": [ - {"type": "ietf-network-slice-service:vlan", "value": ["101"]}, - {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.16.104.221/24"]}, - {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10500"]}, {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.1.101.22/24"]}, - {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10200"]} + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10200"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.16.104.221/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10500"]} ], "target-connection-group-id": "line1" }]}, "attachment-circuits": {"attachment-circuit": [{ - "id": "AC POP2 to VMa", - "description": "AC POP2 connected to VMa", + "id": "AC POP2 to VM1", + "description": "AC POP2 connected to VM1", "ac-node-id": "POP2", "ac-tp-id": "200" }]} @@ -67,12 +65,12 @@ { "metric-type": "ietf-network-slice-service:one-way-delay-maximum", "metric-unit": "milliseconds", - "bound": "10" + "bound": "20" }, { "metric-type": "ietf-network-slice-service:one-way-bandwidth", "metric-unit": "Mbps", - "bound": "5000" + "bound": "1000" }, { "metric-type": "ietf-network-slice-service:two-way-packet-loss", @@ -93,12 +91,12 @@ { "metric-type": "ietf-network-slice-service:one-way-delay-maximum", "metric-unit": "milliseconds", - "bound": "20" + "bound": "10" }, { "metric-type": "ietf-network-slice-service:one-way-bandwidth", "metric-unit": "Mbps", - "bound": "1000" + "bound": "5000" }, { "metric-type": "ietf-network-slice-service:two-way-packet-loss", diff --git a/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json b/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json index 2d10b9693..80f23c3ef 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json @@ -1,8 +1,8 @@ { "slice-service": [ { - "id": "slice1", - "description": "network slice 1, PC1-VM1", + "id": "slice2", + "description": "network slice 2, PC1-VM2", "sdps": { "sdp": [ { @@ -13,12 +13,12 @@ "index": 1, "match-type": [ {"type": "ietf-network-slice-service:vlan", "value": ["21"]}, - {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.16.204.221/24"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.16.104.221/24"]}, {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10500"]}, - {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.1.101.22/24"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.1.201.22/24"]}, {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10200"]} ], - "target-connection-group-id": "line1" + "target-connection-group-id": "line2" }]}, "attachment-circuits": {"attachment-circuit": [{ "id": "AC ONT1", @@ -29,22 +29,22 @@ }, { "id": "2", - "node-id": "POP1", - "sdp-ip-address": ["172.16.204.221"], + "node-id": "POP2", + "sdp-ip-address": ["172.16.204.220"], "service-match-criteria": {"match-criterion": [{ "index": 1, "match-type": [ {"type": "ietf-network-slice-service:vlan", "value": ["101"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.1.201.22/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10200"]}, {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.16.104.221/24"]}, - {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10500"]}, - {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.1.101.22/24"]}, - {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10200"]} + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10500"]} ], - "target-connection-group-id": "line1" + "target-connection-group-id": "line2" }]}, "attachment-circuits": {"attachment-circuit": [{ - "id": "AC POP1 to VMb", - "description": "AC POP1 connected to VMb", + "id": "AC POP1 to VM2", + "description": "AC POP1 connected to VM2", "ac-node-id": "POP1", "ac-tp-id": "200" }]} @@ -54,7 +54,7 @@ "connection-groups": { "connection-group": [ { - "id": "line1", + "id": "line2", "connectivity-type": "point-to-point", "connectivity-construct": [ { @@ -72,12 +72,12 @@ { "metric-type": "ietf-network-slice-service:one-way-bandwidth", "metric-unit": "Mbps", - "bound": "5000" + "bound": "7000" }, { "metric-type": "ietf-network-slice-service:two-way-packet-loss", "metric-unit": "percentage", - "percentile-value": "0.001" + "percentile-value": "0.0001" } ] } @@ -98,12 +98,12 @@ { "metric-type": "ietf-network-slice-service:one-way-bandwidth", "metric-unit": "Mbps", - "bound": "1000" + "bound": "4000" }, { "metric-type": "ietf-network-slice-service:two-way-packet-loss", "metric-unit": "percentage", - "percentile-value": "0.001" + "percentile-value": "0.0001" } ] } diff --git a/src/tests/ecoc25-f5ga-telemetry/data/slices/old/network-slice1.json b/src/tests/ecoc25-f5ga-telemetry/data/slices/old/network-slice1.json new file mode 100644 index 000000000..121e20de5 --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/data/slices/old/network-slice1.json @@ -0,0 +1,118 @@ +{ + "slice-service": [ + { + "id": "slice1", + "description": "network slice 1, PC1-VM1", + "sdps": { + "sdp": [ + { + "id": "1", + "node-id": "ONT1", + "sdp-ip-address": ["172.16.61.10"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["21"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.16.204.221/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10500"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.1.101.22/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10200"]} + ], + "target-connection-group-id": "line1" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC ONT1", + "description": "AC ONT1 connected to PC1", + "ac-node-id": "ONT1", + "ac-tp-id": "200" + }]} + }, + { + "id": "2", + "node-id": "POP2", + "sdp-ip-address": ["172.16.204.221"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["101"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.16.104.221/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10500"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.1.101.22/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10200"]} + ], + "target-connection-group-id": "line1" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC POP2 to VMa", + "description": "AC POP2 connected to VMa", + "ac-node-id": "POP2", + "ac-tp-id": "200" + }]} + } + ] + }, + "connection-groups": { + "connection-group": [ + { + "id": "line1", + "connectivity-type": "point-to-point", + "connectivity-construct": [ + { + "id": 1, + "p2p-sender-sdp": "1", + "p2p-receiver-sdp": "2", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "10" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "5000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.001" + } + ] + } + } + }, + { + "id": 2, + "p2p-sender-sdp": "2", + "p2p-receiver-sdp": "1", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "20" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "1000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.001" + } + ] + } + } + } + ] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/src/tests/ecoc25-f5ga-telemetry/data/slices/old/network-slice2.json b/src/tests/ecoc25-f5ga-telemetry/data/slices/old/network-slice2.json new file mode 100644 index 000000000..5afa18c7f --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/data/slices/old/network-slice2.json @@ -0,0 +1,118 @@ +{ + "slice-service": [ + { + "id": "slice2", + "description": "network slice 2, PC1-VM2", + "sdps": { + "sdp": [ + { + "id": "1", + "node-id": "ONT1", + "sdp-ip-address": ["172.16.61.10"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["21"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.16.204.221/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10500"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.1.201.22/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10200"]} + ], + "target-connection-group-id": "line2" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC ONT1", + "description": "AC ONT1 connected to PC1", + "ac-node-id": "ONT1", + "ac-tp-id": "200" + }]} + }, + { + "id": "2", + "node-id": "POP2", + "sdp-ip-address": ["172.16.204.220"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["101"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.16.204.221/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10500"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.1.101.22/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10200"]} + ], + "target-connection-group-id": "line2" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC POP1 to VM2", + "description": "AC POP1 connected to VM2", + "ac-node-id": "POP1", + "ac-tp-id": "200" + }]} + } + ] + }, + "connection-groups": { + "connection-group": [ + { + "id": "line2", + "connectivity-type": "point-to-point", + "connectivity-construct": [ + { + "id": 1, + "p2p-sender-sdp": "1", + "p2p-receiver-sdp": "2", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "10" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "5000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.001" + } + ] + } + } + }, + { + "id": 2, + "p2p-sender-sdp": "2", + "p2p-receiver-sdp": "1", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "20" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "1000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.001" + } + ] + } + } + } + ] + } + ] + } + } + ] +} \ No newline at end of file -- GitLab From 80115f0158192d06402de3b5ac82e6ee5609960f Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 8 Sep 2025 14:29:22 +0000 Subject: [PATCH 221/367] ECOC F5GA Telemetry Demo: - Corrected network slice definitions --- .../ecoc25-f5ga-telemetry/data/slices/network-slice1.json | 6 ++++-- .../ecoc25-f5ga-telemetry/data/slices/network-slice2.json | 4 ++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice1.json b/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice1.json index 33de80995..786a6df35 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice1.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice1.json @@ -12,6 +12,7 @@ "service-match-criteria": {"match-criterion": [{ "index": 1, "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["21"]}, {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.16.104.221/24"]}, {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10500"]}, {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.1.101.22/24"]}, @@ -33,6 +34,7 @@ "service-match-criteria": {"match-criterion": [{ "index": 1, "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["201"]}, {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.1.101.22/24"]}, {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10200"]}, {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.16.104.221/24"]}, @@ -75,7 +77,7 @@ { "metric-type": "ietf-network-slice-service:two-way-packet-loss", "metric-unit": "percentage", - "percentile-value": "0.001" + "percentile-value": "0.01" } ] } @@ -101,7 +103,7 @@ { "metric-type": "ietf-network-slice-service:two-way-packet-loss", "metric-unit": "percentage", - "percentile-value": "0.001" + "percentile-value": "0.01" } ] } diff --git a/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json b/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json index 80f23c3ef..2a4fac447 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json @@ -77,7 +77,7 @@ { "metric-type": "ietf-network-slice-service:two-way-packet-loss", "metric-unit": "percentage", - "percentile-value": "0.0001" + "percentile-value": "0.001" } ] } @@ -103,7 +103,7 @@ { "metric-type": "ietf-network-slice-service:two-way-packet-loss", "metric-unit": "percentage", - "percentile-value": "0.0001" + "percentile-value": "0.001" } ] } -- GitLab From 528c3cec28aee663178cb18385d5c9617f38caa7 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 8 Sep 2025 14:29:59 +0000 Subject: [PATCH 222/367] NBI Component - SSE Telemetry Connector: - Removed unused/wrong database/model fields --- src/nbi/service/sse_telemetry/create_subscription.py | 4 ---- src/nbi/service/sse_telemetry/database/Subscription.py | 10 ---------- .../sse_telemetry/database/models/Subscription.py | 1 - 3 files changed, 15 deletions(-) diff --git a/src/nbi/service/sse_telemetry/create_subscription.py b/src/nbi/service/sse_telemetry/create_subscription.py index a0f061041..997e7b455 100644 --- a/src/nbi/service/sse_telemetry/create_subscription.py +++ b/src/nbi/service/sse_telemetry/create_subscription.py @@ -143,10 +143,8 @@ class CreateSubscription(Resource): identifier=r.identifier, uri=r.uri, xpath=xpath_filter, - status=True, main_subscription=False, main_subscription_id=request_identifier, - details=None, ) _ = set_subscription(db, s) @@ -159,10 +157,8 @@ class CreateSubscription(Resource): xpath=request_data['ietf-subscribed-notifications:input'][ 'ietf-yang-push:datastore-xpath-filter' ], - status=True, main_subscription=True, main_subscription_id=None, - details=None, ) _ = set_subscription(db, s) diff --git a/src/nbi/service/sse_telemetry/database/Subscription.py b/src/nbi/service/sse_telemetry/database/Subscription.py index f60ac2b2a..c42c0aa78 100644 --- a/src/nbi/service/sse_telemetry/database/Subscription.py +++ b/src/nbi/service/sse_telemetry/database/Subscription.py @@ -30,10 +30,8 @@ class SSESubsciprionDict(TypedDict): identifier: str uri: str xpath: str - status: bool main_subscription: bool main_subscription_id: Optional[str] - details: Optional[dict[str, Any]] def set_subscription(db_engine: Engine, request: SSESubsciprionDict) -> None: @@ -46,10 +44,8 @@ def set_subscription(db_engine: Engine, request: SSESubsciprionDict) -> None: identifier=stmt.excluded.identifier, uri=stmt.excluded.uri, xpath=stmt.excluded.xpath, - status=stmt.excluded.status, main_subscription=stmt.excluded.main_subscription, main_subscription_id=stmt.excluded.main_subscription_id, - details=stmt.excluded.details, ), ) stmt = stmt.returning(SSESubscriptionModel) @@ -86,10 +82,8 @@ def get_main_subscription(db_engine: Engine, request: str) -> Optional[SSESubsci identifier=obj.identifier, uri=obj.uri, xpath=obj.xpath, - status=obj.status, main_subscription=obj.main_subscription, main_subscription_id=obj.main_subscription_id, - details=obj.details, ) ) @@ -109,10 +103,8 @@ def get_sub_subscription(db_engine: Engine, request: str) -> List[SSESubsciprion identifier=o.identifier, uri=o.uri, xpath=o.xpath, - status=o.status, main_subscription=o.main_subscription, main_subscription_id=o.main_subscription_id, - details=o.details, ) for o in obj ] @@ -129,10 +121,8 @@ def get_subscriptions(db_engine: Engine) -> List[SSESubsciprionDict]: identifier=obj.identifier, uri=obj.uri, xpath=obj.xpath, - status=obj.status, main_subscription=obj.main_subscription, main_subscription_id=obj.main_subscription_id, - details=obj.details, ) for obj in obj_list ] diff --git a/src/nbi/service/sse_telemetry/database/models/Subscription.py b/src/nbi/service/sse_telemetry/database/models/Subscription.py index 9312a514a..8a246e7a2 100644 --- a/src/nbi/service/sse_telemetry/database/models/Subscription.py +++ b/src/nbi/service/sse_telemetry/database/models/Subscription.py @@ -32,7 +32,6 @@ class SSESubscriptionModel(_Base): identifier = Column(String, nullable=False, unique=False) uri = Column(String, nullable=False, unique=False) xpath = Column(String, nullable=False, unique=False) - status = Column(Boolean, default=False) main_subscription = Column(Boolean, default=False) main_subscription_id = Column(String, nullable=True) -- GitLab From 2d224479370226c531784c4ea131614e23aa00e8 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 8 Sep 2025 15:38:06 +0000 Subject: [PATCH 223/367] Tests - Tools - Rest CONF Client: - Fixed expected status codes for rpc() method --- src/common/tools/rest_conf/client/RestConfClient.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/common/tools/rest_conf/client/RestConfClient.py b/src/common/tools/rest_conf/client/RestConfClient.py index c2ce856f6..b11aa73de 100644 --- a/src/common/tools/rest_conf/client/RestConfClient.py +++ b/src/common/tools/rest_conf/client/RestConfClient.py @@ -112,7 +112,7 @@ class RestConfClient(RestApiClient): def rpc( self, endpoint : str, body : Optional[Any] = None, - expected_status_codes : Set[int] = {requests.codes['CREATED']} + expected_status_codes : Set[int] = {requests.codes['OK'], requests.codes['NO_CONTENT']} ) -> Optional[Any]: return super().post( ('/operations/{:s}'.format(endpoint)).replace('//', '/'), body=body, -- GitLab From 1a7aeb0fe00d315ddc879e0965d4cb353bc4f458 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 8 Sep 2025 16:07:17 +0000 Subject: [PATCH 224/367] SIMAP Connector: - Added forced update when detecting devices, links, and services to prevent inconsistencies in partially-discovered endpoints --- .../service/simap_updater/ObjectCache.py | 5 ++++- .../service/simap_updater/SimapUpdater.py | 18 +++++++++--------- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/src/simap_connector/service/simap_updater/ObjectCache.py b/src/simap_connector/service/simap_updater/ObjectCache.py index 7963999de..d8b04f8d4 100644 --- a/src/simap_connector/service/simap_updater/ObjectCache.py +++ b/src/simap_connector/service/simap_updater/ObjectCache.py @@ -65,8 +65,11 @@ class ObjectCache: self._object_cache : Dict[Tuple[str, str], Any] = dict() def get( - self, entity : CachedEntities, *object_uuids : str, auto_retrieve : bool = True + self, entity : CachedEntities, *object_uuids : str, + auto_retrieve : bool = True, force_update : bool = False ) -> Optional[Any]: + if force_update: self._update(entity, *object_uuids) + object_key = compose_object_key(entity, *object_uuids) if object_key in self._object_cache: return self._object_cache[object_key] diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 10d16ba3a..f580addf2 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -146,7 +146,7 @@ class EventDispatcher(BaseEventDispatcher): LOGGER.info(MSG.format(grpc_message_to_json_string(device_event))) device_uuid = device_event.device_id.device_uuid.uuid - device = self._object_cache.get(CachedEntities.DEVICE, device_uuid) + device = self._object_cache.get(CachedEntities.DEVICE, device_uuid, force_update=True) device_type = device.device_type if device_type in SKIPPED_DEVICE_TYPES: @@ -296,9 +296,9 @@ class EventDispatcher(BaseEventDispatcher): te_topo = self._simap_client.network(topology_name) te_topo.update() - src_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[0][0], auto_retrieve=False) + src_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[0][0], force_update =True ) src_endpoint = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[0]), auto_retrieve=False) - dst_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[1][0], auto_retrieve=False) + dst_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[1][0], force_update =True ) dst_endpoint = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[1]), auto_retrieve=False) # Skip links that connect two management endpoints @@ -374,9 +374,9 @@ class EventDispatcher(BaseEventDispatcher): te_topo = self._simap_client.network(topology_name) te_topo.update() - src_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[0][0], auto_retrieve=False) + src_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[0][0], force_update =True ) src_endpoint = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[0]), auto_retrieve=False) - dst_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[1][0], auto_retrieve=False) + dst_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[1][0], force_update =True ) dst_endpoint = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[1]), auto_retrieve=False) # Skip links that connect two management endpoints @@ -461,9 +461,9 @@ class EventDispatcher(BaseEventDispatcher): #domain_topo = self._simap_client.network(domain_name) #domain_topo.update() - #src_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[0][0], auto_retrieve=False) + #src_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[0][0], force_update =True ) #src_endpoint = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[0]), auto_retrieve=False) - #dst_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[1][0], auto_retrieve=False) + #dst_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[1][0], force_update =True ) #dst_endpoint = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[1]), auto_retrieve=False) #try: @@ -566,9 +566,9 @@ class EventDispatcher(BaseEventDispatcher): #domain_topo = self._simap_client.network(domain_name) #domain_topo.update() - #src_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[0][0], auto_retrieve=False) + #src_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[0][0], force_update =True ) #src_endpoint = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[0]), auto_retrieve=False) - #dst_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[1][0], auto_retrieve=False) + #dst_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[1][0], force_update =True ) #dst_endpoint = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[1]), auto_retrieve=False) #try: -- GitLab From 6b535983011da38514d620a7830a952df56dc664 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 9 Sep 2025 18:21:07 +0000 Subject: [PATCH 225/367] WebUI component: - Add new topology icons --- .../service/static/topology_icons/client.png | Bin 18192 -> 0 bytes .../static/topology_icons/datacenter.png | Bin 9417 -> 0 bytes .../static/topology_icons/emu-computer.png | Bin 0 -> 679 bytes .../topology_icons/emu-virtual-machine.png | Bin 0 -> 1922 bytes .../service/static/topology_icons/nce-old.png | Bin 0 -> 19438 bytes .../service/static/topology_icons/nce.png | Bin 19438 -> 3985 bytes .../static/topology_icons/optical-fgotn.png | Bin 0 -> 7939 bytes .../static/topology_icons/optical-olt.png | Bin 0 -> 5403 bytes .../static/topology_icons/optical-ont.png | Bin 0 -> 2213 bytes .../static/topology_icons/packet-pop.png | Bin 0 -> 7194 bytes 10 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 src/webui/service/static/topology_icons/client.png delete mode 100644 src/webui/service/static/topology_icons/datacenter.png create mode 100644 src/webui/service/static/topology_icons/emu-computer.png create mode 100644 src/webui/service/static/topology_icons/emu-virtual-machine.png create mode 100644 src/webui/service/static/topology_icons/nce-old.png create mode 100644 src/webui/service/static/topology_icons/optical-fgotn.png create mode 100644 src/webui/service/static/topology_icons/optical-olt.png create mode 100644 src/webui/service/static/topology_icons/optical-ont.png create mode 100644 src/webui/service/static/topology_icons/packet-pop.png diff --git a/src/webui/service/static/topology_icons/client.png b/src/webui/service/static/topology_icons/client.png deleted file mode 100644 index e017c59aea31de5a9112ec89e5cb4d6cbc009ba6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 18192 zcmeAS@N?(olHy`uVBq!ia0y~yVAKI&4mJh`hRWK$QU(SE22U5qkczmsvFuYsj*9JV z?P6kUnlwda!4s7QPh>&_1Oi+_j2IP=0hT~%2_A+RW@Xws4eDxOM`EG%p5 z^WLA!@%R7kQ+{5@dh_plmj55zUwLKwuKe)uz2)l`HF!-bJY~Sj=;$EO zATaF?OH+fw1QQO9MAfq)Cmle-9NzChk{(hlEgL4Ktx8$Y)S#fyqWZl-L4iYsm&qx@ z(>T;9z|lcK;LxOM2LS<=No~kl7d3ZRFhy;W>FRvMn#>S>@19q3Yh`|hrDL3 zoYct0)C6)S^Qbd1LX*eUAv@MbbL-Kw3N0J%EKo@W1>9YM3sL78E_iEhRftk|inu7? z)o_37HD2Ka2_qIqg}nkZcpgYJ+-6)8;+0$U%7a6D?=l6yw^?gXGe^iSf3`%@QQ@NC z4tLG38wJ7^`A3^mi19G;%Y%@ijnv3g3DcFR-IZ7iDWxBg=}FqhLvMzT$z zz)Il7!YRKltW*A}Vk7(E_=WasRiX`DIY~FS?f-D+$5O*BYN4!+N1PU%H@zCL_=f8> z$GI#4?wVU~{Av~mOFA2WB4f)nO9mOn3BC3_b2duVs&gf(ewcFdf{oIKMd8&V=d0Jn zuC-;*(p*|LvtH!TCyoy!KuuE}*^6!~9ettT8 z@Z0zQwM>;o^K#$iuMAqXOCW5QuVan$wW#lGAA%NKomA_V@c!cCk8fR~gjtL@8gES( zJ8+~rnyvc1b)uwHTo?zhx42P%MoZAD;0OGh4xL(Xvgq>KYrm=))<%@Q{b#f2YkaGt6EVkL4?_|wcE3xZzN>u9u0JrKNl>$CDd ztul|cYnv^;{op32O_by;g$KNi<@sxx^-R4ar~bPvRVn}f){pl^ti{P`=ff<1pD||Q z{l~qaF(_-Fz%=t7|LDxMrJfAZ%(8a2WrlV$c7NOQRbQ5q>1FSMH|^1jKOL>?&HE-} z*>|bdon`l)g}0Xe*imfzS>PL!hH}f)dz&9^u3c+duFTH9%DgsRXZn(n{({`m*Lf4}9q zzDp(j(N^O~VPiQZek%D*DC?WMH?u#S`R?tc_x9`S^+)DzJU~f8onqPEM;J$!Q7?WE=g$ryOZFzsehy^Z6qGjJ=8J9?p~B?Uvf=^@RUV z?zW&+3k9Z?uerRp?bj*ap9Y8*?rnd)V2`@Zhx z`;oQ#eyM3e{NJL3yLQU|)>o<=nJN%v>^vY3DC%#~LzUtaiS%%L{drPEk z*zd0^2)VOC)@QSk_~Y5uvzNVhSo&3`_rUMsYm2V$*ziI!d`8dfdrV)wMNLl0*%b=8 zR`K($EcbjD6YB-> z@MtZorY1*)%Yxq@Dkb(kG4J0R)RkZPQ7vYI^6roKv&-ci-kKk4+j4o)8&!!yc4phA zvZ+B?3k9a>@3_2u(zRdG498xlzP~7}aWj3#b3V(Pb4m}b7V-{=f?Yv+7+4a+t zL+AHi4)Z@e?N6KB;bwUew`zX#bsTJNvvsBiy~Y*Qe+=E!=L- zC-&AZN3KnuH+E%hS#j-_^-)Rz&&rpI{1rNS`7ZB)<(qr1&9Z*#*uHw7@U)}{KKV_H zubJOCyjc7}_xw7`xziPEQ=M3jeb4CJrOGW4zAfvx-Ie;u6O-?q-4z@7;^je$vPtLO zWPZDxoExw$;oSlGV;W0uaVAzCx4N61v35PXfz^eJTi^R1`*Xj%`qlQ_rAa?NT{DsV z!Iod!KlS$a$}DFtZl*AnvOP7%MQQeze|(;BRJ!nf$IpEZQg^lrMlD@;#c$3pe%5(X zBA58)dX|5G-;{H+Nxnf}Yw0}UL!Vg7e}!F&+Ryc%@Y|suT2?)B+xOHi>V5DtEd0U3 zHqoA>60@`auP9{5uATNrkHbPaz~ubSh^xD6Hyj8~7Jqnn=j$5BZJWZYMZB*bxV7s& zht^)18hh!tp&C=zKl}JCWw6p(+H<1*=HXAQmA^tSMV)6%U8e;(p2f& z&=Yj(#$|y+UJGvfW}iKv^-Dh>Q)K@CFWhpHvZkTYH$>mOsW&c~Gj&tR|BQQ9HkQX` zU(RH5RC4wdV40T{$LepoUPh+(?!L*h7hc=Bg+XnG$%bD&@m<@~Ti;!rwO@eY5Ay?= zrBgokDzrRhdwMyVfG$-9NchDA5u$IY6ssmVct z$$6LAv8|$A67}~tz30%1wtIZX>Du17NS=)3O9!elMYhQ%1h~)Y-M3nDR!p45hjxLh zfr>iYJ?6I+MZKD{YsrT2Gl!VJ{=tP z;Oa{4T1B(x6R)iZxe>?Q&3ozh+_(>F)h?P}r@^yhI71u5g)LuiUwn8u;UDL9+4B`I zqC&iRtLAM@RKHdCrTO))pa6jl&IfkAFS-2O*dSB4cJ|)L8En^{*-TN zntrioZL>9(Ht{#D*{~;ZUf$BJ*Pb(5NW56^e~M_u(Z|tm&g7h`5N30^cyHE6<9YLU z=-fS~!<){_vgWs+Xq;55z0PmHya^k2Y2V09h%CLwC=%kuDl6E!;f_MB)TxZM>lp<^ z_U`&OdF70cr}_MC!*wU|8_OFNg-INnAlLS-gOh2aLc+artBN=BBGuRKn;rJO>iwwM zZ=&gjd+#Eox5r9`NZwsP#p`FCkE!CCh%}?V(yiC@87j;!sC-|)qyOpAcPnZw@9p@v zDq(7oeqFOH`~Pp3TBeAy91<{iZ+-UW4({u<7DvUs^y9;szq?&LFzergtveQF1RT%Z z6{O{&?8$Lj{L9DnOSV2s3pU>N=k&Yg9iO%u&a+A_SpJ-6%&cXs@Yej=v1WtNhp;PA z#s9m0M@a0cP=5DaaQW5;AI&s^6;u;S9?i;q@#km{%bp;~AS1?1`Kf4+UGCK1Y)h6XBs4da-d}l6`tgQi?QGLb#U>URsn~BeRG8Ix zQEm0YDO@Lg6z8{a7R=6CyPNTcj)921dFxk;9e0lh8-5PYpZ$%Kp_NDRASeWn3VG!B zx^9VH8JUtkL#a#AQc-%RQv_#0bH-YGwgT~Xr&KmAlYo^6yKQf5u!+k^Rk>-{BX#r2 zDKTM|B*z0X9{!z5UZOt@%{O+$-@PS}t0l^9@_%*EtD@|9r--@>{l&6tP50l`+7!8e zxxtnFi}=Lee&5fx_*O&yV;iqbJ#D3gYjN^hGMMEgelFIRIJ2?i{~WK0Hk)o;T7GNc zlh$Kqdbv}ARxJi+hV_%K%{s*sl3@_^V4vmR^gRYAcW+*9c`f_OC*K{#9HCi9SQ!6v zL>OPUF-%jmo45U5c=NO0lQ~0YL>$d2DZBT%OK_^#20Jg!UB6mptl3a=P=3ExRQ5X~ zw*Jq5T5JP$&y{+|SY7odsm37OIf* z&8xHevT?)fsX_NS6D!-SYqc}h&S(2jv|u{4X*bU*H}0EIkI7P(tdH+hbnd+Qd+gJd zl_KZ9aM)a2V^lRW=;+U@2R6xZ+Bj@xJdlvq@U%ztGspU?te~o__E&Z+Hl5ion_rk6uuW576R`W0Rs;~dO z$oAD#=l^D=O&lRxE*^Mwf0Vva;t1QFp7UqE z`24OsUU#LsIEF2LYRP;_fryK%ZM&9dt?&O`$({E%YuftLM(zn8zF5D191_U2b>rRH ziyc=wHQMWicr86TV`dju;?G%4KSC}=8Gnol;F}V3Y>LUZtmyb%oX_V^p5*#{b*uD; z3+Dg)iuQcderl}G6vn%zZu0cUrt|ww6wB}A6Xkwxc%X<&^7bDm^(W_MPI4DMu-r@Y z@Y&;8P7#t7=QG!?3=s=?nf>P*i_YzD)0Ybx{^r;`fu$$K^^y=M-7WaGl-1zklD+p{ zettP)&o}<_zEYaI*-99V@Kl zvTi@ui)TmgwAi<2spAH(z2^ImpSM zHeM^4Hy!d?Anlc1E5@*l`{Di#!SN-=G0S`wwdkJB+xd1Guck6bgy4+71=%6{EL^Sg zza5*K@p+;B%G|UQ3zISr6xn|ZmAE1DHF-kms<#1NOXn!uoUvwu(TljNQP-IS?#RCX z^;M@Qa_iB{89Q_je0kU?(;|N+wte|=FFq%K1+Rw&ENi49{?Fdac0S7P@ts3ybqlRB z6K0+iJRq_(#9L8kJIDNGMNzNYB|C**oI5Y)c(ZBe$IHpNP0DE&e6h+$SQ;Y)G{Ozc z>rC#3Z+xt+?+eTR!WFLQkU6k}M9UI}Fd=EOf0c%*TIceR?St>JYfE;pR~#yUUxjyv9w*&LG%E7%-E zJ3^vYPw|SpWjRA2jP1^%sMp~PHV4;9M!U8LYV}qbE^|uqTGC+nkVV)ZT#FWbWVPJ* z?54@xJ;(P)clba1efhwzo#pHzCnukl=$p$X_Wzs)@4M2aw>XxD%<`Lbwv{9CsL^h_ z*I8@RnOB^DRX6=k`Bly4>$hShWf;xh?yS^T<2cgfaJp!ELdABqgKAOx%sIIqEol6D zc$a;_;q=SSZPSc@&uokCJ6WZ@v?<8D?7Cu$=;=4VR;=E-Y?Fql+e(uz|N5Bz=G*qUba691}1DLm_vMCCya9{aR^Oa+$? zRQ>&>U6}sXsPUPs>+dsvxf6fR?3H+xv35U8f%I&Pnm;OCHo_s(3-4zA4PrH1SADB! z_gi=SglVb^u9PMR1qgV!9$3|QG&p^p-S3T+{c-GRy~`{!6MT2Q&$^;D(f-1pey^1R zy(zovS~vVTz|tG%8afAV`B zpV(tH%lConpSdx2=jOe?F1hoZSSn_ zXW@Cpka|^}ogOmoy1uERENj%yr^f$av*!Q%_UEUSpAIxUYuIaF&07`Ow0y1R5r#X< zr&QTTrGtiSx*ore%641G{#5L5-Kn*8%a|=5Zkc$xj=eefC`)66V8!y}+&fjNnTuye zm~2=hS@7mvarQEGhObkCJ}-)G76>zGdGurVwO#rRc8zK~>yOsz=%<}^d=bum&wGB5 zS++s~-`81RU;AzOdt*M^HSOH&rmm%on=c=z+EM8GW#_V4k~dT^hNM5M+^+&(1{ZttdBk#*?u9Fq-p zdw1Gdg{1O{9p0w7H0V>OtD)kW43488%h#HM8=s+m)=?s&nH`r3bR)ttiv9dl#c$@ily}nb_;{>gxxR zz$aU>tWA4CFo3p+|`y1i=0H}Gex~#$FPrM%d>Ei zvvZn1oMzp>==qBIkBl8MRlSuzB)xuBp};2&pJrtk(VzaG0ynFks7j zpQTe)Z{IF(EptcZ)@vbY3!IpzN`3gXC3L~%72(F-*X67$Pem-Pj9F6oHt1A3BMaBu z`NFT~d0#$I_2o9}k2lBTYhriabid%qe0mQ*cf!;lE#GM-UhADA_ANUat-7|=C?FWlLyyZISC~}Hv795-=WI}wKMm1r~1eCd+YmLJrK21GL}8?;O*VDCBe1^ z%N5UhGIuLl^V~JOsp_@zZCg6lUtqif1xW=Uubi`r5JFm5kRIzG*N0R5eppQAa=H zr^f27`ve^f)*2V@G5pvPZp^;S?|&-$t$HO(j=f15PY(<3*i=#Hv(w&}dG+Jvmkmny zzGn5`;J95X;>?#LJ^m}r+%>NrIK77>vC{Fhd*s@$(F|3aR>ijmK2!Kpd_ebLKwJ3o zbw^GI%S?X!OJZrr$yFQyi)I8JWz_zB$m5Mw$r)oApBJa6XB~KBvf;;U*Ddd3mtJ9x zvl2e^scBmOrl{;==Ony-*To9165q>lSLQDD|dQbJ~YxpUwE5^Gf zI&0do|9LB9T~7NkcSn7^(qn$p)@x<|UI(3)r>wgDsavmIskh}iaG~+5_RYfbyG9jT z*FI7B!m(eZyzxL!`#RUHN}BUCjO*_%b&F)Zo|~s^;W?!Iu@h9$7F0HJZ}{+W+rhMV5=QKcr~m%^ws>)4>N!`3=`Kq{ zy7*hzg%5pVn09?pRJK=INz&f8?El>=*=k;~V8>aUgYJ|acw!=|>t$MaK z&sHaYcs>81jOM!8`t#n~tTA}&YiMX7%o3gZGWc!4r$bZ3^&2*Ntu)zIa&xx8H17p} zL@q~tE)AVi`d4jfPQWUGV`cerZ~pD#U!HL8SZrOJaw$ur0Lz(Goa!sre7K?zfA9;W z1lwKyqlPDTli?EmUAAv3pNf6H9~b0h`?}PtWy2n&XPfq1n`J%oAjhTjeeq1k{#S0D z*Q>o~z2}+zLyLWkd^|W*T)V{p7|H6~GocjV^-QA=mmmj4W zaovx3b=Jh189xI$xIXcEt<2w3rqS|L)NuOKt=D>FQ+O68>fW2%(mMNpNu>2!iL25p zA9*}3ntQs;&{08-3Qj^0P4iOuB_G|X#&KqXByXhZzZvXT00i*ki zZ-0|_JzDp0`HjD~uekJA9lR3%pTXkz+Kl->qd*Oo%JyGT*{|Xy<-gUbrmwZ+kpG{j zXgBly&!toR!ZpsS88e+d71NXdE=Ot)%l?;-?qnqt%1`^fapg82&C*u+KP?;nED&4T zcWu_GMIjPPFKO7#+b-wd6ZvB2^3|tB8CBDnkJ+-lGX=FHl|Fb(?y7kAYKzOVvfqAr zzcM0r2^_mKxlH5BpChRi*=`jdzC07RQY=5Wnc25)>6D}(vGZyzPbD|K5Z!WZN)0dD z@4J>Md~QEKUf>?#Z9+^mgpI zxxL)f`SIyszrqzC9G0?!qeZ83ldM`=5jP*G98{lZGtnfn zcu>2P?7}N%teFsWYP#`4foZcNKSsD^pXE94rt)IR z*4qV_pBvk3V~9|`{a)Ho-dU{O>TzXWh!zJELzc*Hj$nC@;Qtx_Pn!7rS1sdR<2w8M zf}gvE4;%bj5FC3aWN*rY_w!tTpW)?7Jo;1YT2%2|*`E9ls~_Chm&I9=#x?uHY%7~{ z)78(WGO_Hsx=SX1o4H6o&+a!rC;s?cAY-!O*&5gNh24h*We7#X$4z+G zl*#)ZJ;Q75bYZKOQvi>H=GP7PH3bfR>YihqXv_Yf^4}R>538P_%qy#X_r1@G302x3 znEUVj?!vdfC+{^=e9Fog&2q-*=*APd`>(z8x)Z9tiS1hIPyGXn)SZ&%ybtiIeOI2< zvf+=?bDxMm9p)~ZX1dLL`DE`bQ}%`N5AE0g_4;|H-(d#ZwVta9{~YYo>Q>5G7IIDh zA}4f>CszH1Q{tvsUMoMmcbm>EaHxoL%I&16?04~!&yU9I{SdWv4^+H48$9GKw^uKs zGjsZ}H9TtfK3DsSr8TN7gDSMio{NZ%o`=*R%z85X{{AUTVI0$6j zE&Y2=>#6BaK8J>DaWdrzM(j!Dm-zDL_?r1uWg9yzH_z{s)xV#Uw{LyWDT@i8vO$r) zdFAr0acm2;>OakHJTj}vjA3g5toOdSQ(WNw_Vi;DtxPw80=I-~ZLtv> zGPmn@T(i3^*cx1VYmUi=y!EA)_lzu8Flnrv(xn;W%yIg9No)9}sNl2q5{|FTpO(*Q zQ$EYV^s=oXnpY=nN?O{>kl*f_qF1JWw6;*x*?%JPRp#3H3?IB6oWHaGk=?Wz3KMP$ zWK={d_UyFY#Wu|{T(QB`Yvq=l19pyrpzz*0>)NdQ&))v{9`U91k%>&En}CNoi(F2G zoc@-sHH}BlZU6qdCnjCm^L*xf&86>pN&}oCHY&}CiqAgFW9fD-|6*Lf@QO=5PD&p< zpI5z*aAkfy>CVm#zh(;Xx6SifxjlKWg5sJ8L6!wu)1qc4@coKl`TN*FCeum4Bb-Id z<{n!r*Auf}H>$r_%(!?b>+I>critY(9Y!W$UWVr#ebib+g=;jYg4z+h=c4|a??1ju z_V=R;n@kirRK%EkI(Ah%h^Hh6D48$Y_QtVTI(}WdbhwLru$?bM<&+@yq`gugt9=x8 zUIj}}l7u#rkFYeJX=^Z5eD#}8Dq{Y<7P-O`7I6Ux>nw!)*`Chuu^-CJmZx| z_1z*qG7MAIgv1(euhZ$>?kg2>z1~LDxQ_SgfkzL{UOLH87vQzEs?|$K=-?g&IMz0v_coI{QLT{EaXuF}tz-g+r)<@fqyQ;!C>sLnOnu;!BoLtd~~ zt!%%x(4j6y*R`hU>&zS2lQY~_aWQ#z9@rJj9`O3>#>C|1rFnX3{8rK){@i?u4=o{z6)mCFiI{PBOPmu*lFvz1eXVbhAO z|9Bm|)I1Fx6}S~Pq@ETN(GJt8SXwV<*BAF#jCJ$u{#~0jXHDB_@!@yW(vY7O2bEex zl_y2L-nY_s4)X&yhNIWgd4 zdW)Xr-RWfC|C3YreRikQ^XEJ~HA{lJ&b!9=aA@lpEHhcOb=ejld4}^cb~O^_cM6=h zot&(C+gC<$+A<-IM&ol9%(hiu4sh%5X|SpM#L{`%=t%mMpudkya}?K9C}w84XWw!_!I==d?>6cemx&QxrZF@D`ipO`B2+N~BhkJV}c`v=Xx7XSuNo=>D=GWRiR-%Wx z#3xNF+j4D6vJCHnG^kj>l+KJ3IKja( zBeqHX$>(6}bQhD&j{<6^A3NVwzQcFX6f2p2My|x8%txpEU2Vhgj<;gZ@s*LFtQEq- zbxBc#fqynrr>evwL)HVq4D;AF#Ar^PcG`|FT&YErU100AQpO&Z01dvgp2AHIMoI>} z4$@1n+?UyNZPq%wuTz2^ZnQRxnJW;+^I)mpnQj}=o{f?#^*B_Fnc}sV?zrVu4k|?p zS7peXZdmu}W=n`LixDf+$1f@UY#pG1gq*tTQQKKRs5Wq)vHxHBL*mnX@f-O(Ya^Ea z-+p7y^~(DjFCDEk+!H-H;gw49Th`hCTuy&ibCuC_R+JD*OfTo3+dc7gUHO{$A5Ae5 zRn^KN8$It;eYj!2;zqs+pX`72HUEz8EKa=fw)f!1=-l6pi#7gVTG5f57rxWN?nQ|B z^EskrB7N7FuRY{FdAjvA`{!qjKb+&;TIjqgWc}6$q3ipP{)#vmO;o}8AKi`>Jux)nsltb0#*EZ@N5SVSpr=@(PD&V?X#pe+`G&k_|Ni~_9{`fT_+j))c41a&hGr>Vjd?bCtLF5i02mxJNf(n-_)$Sv!NuhrqH|gHv5E` zjXxPC)vP(z@ps{N^KG>cX1aVkr&!GUGe*7o-K(sMC7!Og=U4Ye^Kc!uj9kKAe)-R{;+I%l>NQxslftXSF=O87Y^}O6unXUliStHJ!RM1Ri{6cu{(d$IdgCBUbgc! z`;Qc!c|NaJ%Q>g4mS~H?_OsGA2yD%U$^HSnQ7E6EYuK zU#8@5+s}13>*n*_JuTmVK2JG*qk7iuNhu5T71vxoXeamkLe%FU)9ON1&xUS)YtQ7T zRlGIwZF_0`gT8~s1wUG9cj>pad^)&v&YG*B^&|6SHgnAX|HZq7-MoF_?AVu&KA69I zUoXpa|AzE8i+1*j20xDRpSU~A`d7ibs4xHOIRE|CkFlI;{tq-zwaYzB+TwA#jeVx% z9F}lU@2Te5mj`mTeSY)L8>E!aUGqxyXHWLN6KSf}udmAbSKaKbTnyu)xFXe`2l>H zC$H_#`Tg`%b;#*}t!q zw13=MQd2OKZO(j?I=Prh$JEWQJD)CcJ+#2O?32hlSv&drYwyT>yDfQB+78N&B6KQ-v?sn%k|ps-eXDSUZ&!2ARRto?*18#Q*R$G{qggq`JU*< z3)rUYymzj-{&Cy+W$rO&O*LXUn)9+V_FP$GmiRR+Ikoi7lE1HJR_w2d<-HTUJtrbo z=uNHiv&XZa&X&plAEo+9`n$)ym@hUQrt9We*V@Te_s`p~Hhx(`UUF*afiFo3XA-8b zPg|Ju(57NlrKsYXFKTk@-)66!xhP06fD|X zcv@t~fr_UJl*)?b;0N7@3Bi2#czzd!8L95`G59n4y+EItGZ}MlKRan zx}N3B18x`pnBh3(^#2>LE4=UDU$AH6a?eYfZyo=9eev}hM=ieIuiy6d^%R{B!Mjq; z^EWak_cig&_dDJbzwPnLXH#1@?3v_QHT`>{Zk+H41V5j5R zcJ)imr{^p9;{i&V)*u&GvCUo=e&i;88nHhp_ZBq90A7bC0_|j>`Os(AP zcP+K&|JP3cXurEw{n>owi#OLk3(LLVuK9YKrQMG7wBU@;g%Uq16HSG5zSq|MK3&=W zc2V(p$M#jzZ@uIH^wgm0sM-XEH$A^YkF1W^_hG8aZ@KMR+-A4F_$IB6-z1O~EA?rf z(XIzt$-jQdM(jFU+26Z*QeUX|0*6_9yb3C83T1bGF@Li)@+>P?;rI@A)MAO~x96S(#sR7IE2m{NUO5$7hFavu*M}Z|Hd&G}Q26^Y>}%(;hu2tS(Cb zt-p00U!B{m+{5$PFTT0kY4M?ag?G0=*cxM@_Fqx2*R9m!R($=TbM^M5&-;{jY_HpC z@kPm(gK4>VqxrJ+PWw-4%lHfae0kj8p^j@-?$vL5SNy+Lar+=w;@o?dJTHB-eVcyq zFl;tw@4f%FWXbMrHG(F4bo4A0j6Y~CeZ8jRz>BA|dH(CDKYp%letqZ3JhNS8cZ^Hc zCEF^l;n_8N%e6lV@@|pIp9R9+-(a`8%=;$b^fwN><~HrSTuk!>9%!oYo4)CJ$5j%& z;hp|I<80n}((m6eoZi;5!N%A5O>6Md9?g13+iwpH1SXHudnF$-LXB@Lqei<%?k|~ z%|i+W{+x+N&pED(z7Tb}vdb}iN(-ly?2C6&R_s&t&$Xu;e>hlLWYqKRf!V$uMKj(U z|0^$4Jo?j)&5qQaDIfF8^6TDZv)($}*SyScdv2x7wkGUD&2tXMdEYL5o@4oa&YM}E zTtB=#`!zRKPQu^g{CDByZ))f4NIz0xw_D;whFwjKs9lnisW9JiO^S+F+sTm4A0H~; zGq1Hfd@+08{DS7)W=_3H;;p9p-@hn4{Bx>d>%^Xer+Jc=opwLaH{YhUZ@!=Tovb^v z1g1qVshM^;%6Q8^T?X4TzJETyuRrE%mUeU*Pj!K^rKDT+W1}O_PDe{GIQjITOu@m^ z-Op4`uuoT1FH4lplZ!c~a7poUR!zZgzlgT-w58{pqhBrh+^g{5l38KpM5i1Jzhh4< zT{&uu>TTyq?LKkwxMuh_<6hAzbhEx1pBKN*%ZJTtw z^?6(S4TBT2S1a#6ANM(7*A<=Ka;s}xl`Ai{ul=Q+d|-x?z8Y+Bxgm+2^^ECAc(ZJ&zGNDg18YnF$YdtNCwh$+GA*9$fP!%dFjj zrB3@{O{+3+p15NIzM}VN4dBo?%Xur z#&b)`K56lMjoANMflqF8zyIUX4^gK@PbI&WILSW$`kNN>lT#Uwls!}veD0+gGwZ!g zv?=eqy8(g9lP)oS@3Kgx$ntn0G-%Eu4yqP#b6;FnWO&>>Z%&)SSq`Q`_lDm^{{68%{`E2P8*|+(3PO)w zoqF?yVf^#n!gcdAvb zKJF2IoQh0IB{rMNCUJN2O`jsbVkFlXRO{}-WA^;{ukGcg+`@N%XWXl@u}oX|oZCZW z)1w2%yos4>U-};{arQA337Pg-&ERQS{LEcKPr05IhR8l(Q~B=yLi=gT6qYoDwXzv^ zKBRK=UyeH>nV#Z&lWAw}UA`UZoAbUi%2W43hgSJ zZ@PCW<+c@Xjtmpl4uOIK;&SB}Rl4cX5^enGedzIYRQw!bqZYtBi z!K1v|XXBwuQO0Z&HZ~}(SMw7PmE@QEe>||w;uf<*gVC#ly(-VEU)krj_*+z}x=&Ip z`~1M|Q>fQ@_UT3YrU-5OeC2u9qSN8VvbDWRt{giicK4|`pJ9>PGyTn+fXe;}8j_NQ zxvh&5=Lnq5wwttIk+kOwhjzb3)o<F*qY z@^uw2{GE>Mm*|qq(jdRWl!+iA>FWiLG4AjgQRbLDXzY@9l z(8{}~m8PyZ#voB)Wq#xBu{l%bxA<`?&bZcjI{w}1lrPP%pQK2(;Jrp8q@Salp@$rx&CyFP#1Q!>{-q-?}DDUo`hc zoXR03&x_knsZHd%zk->uC(z6CxRz`3VT*tBLRyw?eO4jQ`oXBdu!QAluXaJl?(bLNdb-#tQdDx%Ju zPoMW-X71B(dovy=?0SCX#j3csYd76K!*bg!Ff8O+p*kxljuXDknsA2y8Ox-JexDZI zV+?W-IN`u@$Kw!7+jEs&<|QZdca;CI@efZF-eer6d34P)&TLNKh?Sg*ajYLE+)VFT zZfY|vy+L>XPvyB!RCPWX88|W+!~}8Kncff;+t~5$W~sqKcka!%H0+tX)nA>`GWge` zsb(?wL>SU+IT(hnDBd^>#W$rqm>0ipP>o%{>6`Hw8&;K!-{;d}~Uf$Z`lW22c zK~t{4hc&#`AJ3h++Q+!j@|@M-35!<7nZzt>`CY%>H=0$gqtwW1l3jJF>4#fu-M%-9 ztLq+@lTN?HT{I`vWAhY!kwtHkO+KFA|Nqfi$#A~)I z*?i1ikkk12+`-RlSKVHw`8eXYUqqK~$*)p_G?`a7S2X25Q^;Ev{O`CPgXRC7&mY{( zUe#(@$o8)PpZ%I6o7UDR9!yK$!|eRX)@)mK@%=^BWz*As%J}@aD!N`#k<&Z7mc5~z z;fV>eZOzI2iXXRGEvBvdCiz-$&;7|ZeVUUdD~hVG%uShIHz(iom(lam*CExjV$@iIA>xP)M#lEduu8A`|IO%et{L4k*1wUy2ZFFAAi56|PXua|s2*XR~xoVxF*`o8EV zGq&A%-z=?rq|I;h#GtM2TyHzo#AQ9(X8fKXqVlxVTGGDS{r=T6W|_Gv>np9aj5n8g z&-SmaGqSTS6SVt%acBJNibtBe(sOqg_Xbt`KW25!-hR!LMfXDwMYYunEmLc$U^`IE z&~$5U%%`HAck54`dT_WT{q56zJ!>Ysot^)4 z+x{L4WA${~V+wKcJ6qoR%4l{Q%FDgm8xyAxW7JU_xO9@s%an;m`v12*7dZBJ&r!pM z86Vrjr|W0!&OgF%$1{7orPrq-o1aDVdZpKCWd4qHE*# z{AQCxr$O1(H20>b`;zWXY7N-!!WeyV*L<_LW|LYhel$rjHNIUvWrOJJ$6-vyDf#v@ zCKcU1w$ewe*1cC*JTEsTcAK!-l11vKU*`%n+~{3ge!4(2ZU3Tb zy^~ow4eK@?>N;>WDtq0swkTnz8O_s}uS)Lu6FL2tTbPugqe7&>f~2=w-}|%uWxXr$ z{jA-K5OW>RqqFAz4tU=C{;rAOMxOA{nApfS+Dk=*Io0>1-r(}iK3l8xF1+x*R>qf4 zsY1t@SALSLd2IEvUEOAv(E*))huQq~2j29ai+Z2dAo0QOPoLU@l}m2^>3SD=uv}sF z_NhU(lT#P@Stn+$UAg4HS;OwL3K!*{&MfqHN{LrK*{Z^Q!06nXUWe8E%No}4U(~s` zsQTGIr8my^=5;(YpSZzlZOhbI0!bzezam(B9Z$!ypJ)5edqcIdNOD!D$b4a8mPw+F z<}IT67j3m8BPY8p`8Tg5=_=IGAPQmi(Gndo9B7LE$wOd=SR19&jyYC zZgIP_AWg(3HEhnB@NTV&MUfr*R|Zaz-rCbtpF3;s8t&5tpQqH`{^=RGqi<%*^yOA- zXZ_xlP*52h|i6_Z1(!om-uaqr9}< za#8LMgFs1iM(oB24!u0Nn6L-U2Hz)MOI&VyvE}uT3+F3d+}a>LbC=tc(!x`g;u0zI zvsXQQcuds!*ctZi-*+5s6LM8~y!`VX_jS&l+|H?mcWrj$Og}gE{@trQJ`Yp3W&8?1 z6X)~LYIWhB8cxHv(?2+l=4zyBO2GSZm`Y&5I>CT?Qvu_Rl zw3rt^D8BTzSvsTH{neZ|2bg8v9^}2U+3vs@rZX;U^GbSuFibX)iVM4uaqexi^o!a2 zAI{8M+hoai*KSASFBR`eX%7}w<{b*!BYrU3E+9XlJ?h1*U0z9j`>NZQTmG=}%X`JY zvpB^(j<0b~-^C}3*1o+M+u}Es@pr*Ht1tiRHYOW|WY_K!iJr3i$dAp^@^;e}*GtCx zh;lvWy;iKvnfz}4qU!l~W9(J-bVX%ejhAhf<#TGi_<`roxfKkrN+QGd{aLestGSqG z@@+n&4Kcd}!m^w?rZd-n2` zYaRJNeVgxBEV8cn(7#5uryrAEziKnqm>jXg)b4=!b&b~QwzEgK3qHF2=9cyUIhzgs zUwqxezV58My-QF2=GEt?zC2+)E9u6h`#hfU<}R6e?feGLUN4MI9$%MTr#Ew7+~%rn zMt|n5`(L3ImUsX8s<;IKJDYa$E{dHp>+g({ntLMdXRkld%zJ9_^B40!d_BY)w@T{L zv6z|jD%*^L;{P6NS*Ewf<6X>*?=MQ-K80?dWbiDl`qR^b(ycRGzRziU`9Q#HQ_`i5 z7J-Mwj2n|D?hu|9?8LFI=*31kj;J!@7oX2rav%LPkDbl%_r@$&Jr?_?f1=)cb3ZuF z(KXrctiP?MhW@wC9&Syl?;ApKl|q7jFArBfdPStT}m_^sjcG zpsCKGqRsSDahLdkLyEOF^?qLM0o#k;bZ?j=RnG8m;;?GC#NKYCpVZ_K=GJgCXUD-jvAc`=quMPx-dR2o z@TqDK*#5mc8|3XJZ3|6#HgZap9+=5*Fw1<~ZAMQ^M}?V!3trs&Z*`6D4Ix-9hS^dnBc=9 zTPQ5ciDTK#q+M$l1lFv3z2Ih>!fenv3`>4EE}i}&F07QJv^rjM?boFY5?g%Me~o$8 z<&%Bo^>0;+DYFC)EV+Qi@U_Zt%Xt)~X5zcAYEl(8pYQs^23ga4+7&d2>1y)gDU zqNB#a!4$?~6|SGkBFp~f({uNva-N4~j|5)*l@mVnsc+_$6N@;GOy01m3#40AaZQD? z*4xx5mPf7;j|5)*7ZW`6sekU3Gf5mrydoY6Nd4wjZV|O^yW}M1s33MosQto^J1R~Q z9}fmbon+$pp{i5J(R=@_VvDG~&eEkmeGT$pCHF*}B0e4sjyipWqXMkt|5=3=(R!by zM}4{)#6e2F?Bs0OaEU!U(^OdL!L$uW6ixoB#dB!?-7s~Uu_RL-hqsK=6d8!m~* zXW9xY3iN`Ns7G^X|23Q@XT-_m*P_a|aCv@H>jpphYnOs2DJQr`oRKo!=iY4)`~A#3 zBfG`Rf;hAnPncdenU7IBaqHg~?0atP+BJ2}CLgh9Ju@5>*Yr5V{_^o+S<~YB^26ui z6!ZA*387Z!*9e4JG3%U7mt$GelDg9)Vh^Kzz@u0n0RfgZ z93fhB#IJ{FWU>29^Ol?+Sne*7n0m!wNxY+ir{IZ6nvadM*0pRf;;MXUZmeJsq*csoKR=;EnlmjJnICOb@6=WkhE7Rhoj>P*(J2~$!7 zyO`K2Gp4O{TEW7i)5FF%neBo{M-!(PhvbY2-&4}c-~OJl^ZU8Io8SLDXIK7h=kwj! zsds0d`S~w>{(I~1+rQcN{|`JgeL7#p+J1qF{neHhd|ZVKoL#1MdPKRjxjs4lELOAB zG)QNq)YL1y-z#@-y;kh<#7?ks@A=11Za7(g5L|ktcj0Q46%36}BX2~#e(dtZUCHS3 z=7oh8d9puN1-+V_=Y6f-lOaE9E&+g{B#8Wf%-cPOv42_}()@X=d zjhe5gwAyva(gIuiUspeWdQoh>IL0*F{mB7I?zGcQ`{os0{jRn2jG^v6CM$;oRui4B zqg$^%?>wQabSWa@|Fz@uCi>qoRETol73Yv1X-sZ=WnuqI-$F6NF&3bVlFgAD7$4$jcRw{;Ou4NIM!QRL!_1tOM zR^tIQ1y=mHiqMkRWY$MmT6&smXPC0RH^||Tqwq;IUJM#cjvEl}Y$A?aBxn|yZ zB3|^Gr0mt1>l?P)?laA)e6{>UBfEWZ+nf)#?k(DCzqxq(nb|iZf7G*x&ERijm1;k= z<=QigtFlU;{vDEH=kGr;X}0{7>bjk>y1sWstUs(z-ydqX>xHlD;v4@?#r-_M`~9T< z|8@I6{Qs`8v_ehdME3zEA7|C5*Jgq{y|uCDAizw^sa3*J{W{f%u&#JoL!V$N-lX>m!gG}t&xC@QBBRx-%Y&#rfMtO;s;ub3KlHh8MrN`blw&-@z{w<;r|Po z|D3L!ZM0ft!yQZigq@!^<*2URx>NpxqM+rbg2v_XGN%q?9bf%hS&s3v`^y89=i9sY zz0=#r_T}C3ZK-MDKgAoL>M=5%o;zJwD|4;?iCSJAtK2U?^2<;CUcW!Qjc>;6Z$Gk17W7;R1e^rluI#yoaT7J6murf;+%R!+C!5LXzS!?}I$|mIN#~gJ&|0m9_ z;BbFe*^7?01Dgb`KAbF<(T=xzykt@H8~wMI3f=$SvJ0o3os+neyXipD^QRH2Yro#y z=`B8s>-7DfM~aIsu983fFz{ZsaN}E3?aEJkB0v4zZ?~2s?6C6UqFY=FX1zVIrew8E;m@7IdAlRaUORu9 zJOBT#Hw7Fz<(h&K@AjB%*yZH%Z{wCHUDf~n_tl(tYtK$vxw7Cu%+?QQLZ5r)R|h)t z?)-M#|D6xt-X<=X2x^J_H;y&%Ol=*w@uf0W= z-aOjZwe0Wto#uDUGu?TaPRC}kgn8(1y~exqn!$7NX;wByTBq}>J@#$?cymJbf;;T& z`$~SvpI$DeuOW5H*t$LCqiOcjtId<=zg69EN7W;t^+Qm0?mp?7co#oyo#ece2G<|j zeE9O-Ib``2arS%lhh%;pYxmQ$I+YQ+U) z-&Iw5?eavowO;a8y~nwI|KfiB+&2BdsVP679FR^cJN4`QK7Xm8$MRDrUJQKs|JUA6 zAFhe7lG17no$4E^>yi+fdYL6+rutkR)+JM}G&0x!zu$l2D}VU?^*slQ#H|`dFPy%! z)=awS{B-UW&mI+)ADH9t^GkWXP=;~etb2O?8>5{S&THxVB(QGScxkN}qgTO23F{62 z%=J%6UjJ+Gp>;nf4a_~T>XP@`ue)7RdOJk!W(6;u(|xs(dAt3#P`!PBA0CK4BpH@) zT8oQA=YaRxui@PC%JDb6STx)_m-E$Bzf7{MJ@{th#k>E0gzfyX?ZJUZJf~w1u!ucS zZDgGx=FHjnG(LkxYyu|@c%9d7*|0u;TluoMxjN2JHgHHHQnwetNrxaOtn*QSD+%I>MMI~VVmfcji*ivM>I^| z!^dr)+a?s(PP>q$MQjZfGdp=BYGdCTmAgr;@bQ#jH_N-PTFuATt{J#Qb;F#qiWYH84=gBMD+$J8oQ|&z2!#Vle|1T2E9URknz$tBxGmuzQ#57`wy<>&ru6>}yiZ1{8g zKZlOO zZ>C|g+tG=uu9dvgjudlwb|dg*q4Lx$hHWM{g_ut7|HG-HFnJbp_J`D&TMG(LuF)`e ze)Z<;yvWC=cQUg~EB1C4SJ}Y8oz1)E;9mCBcT=}BD=$^)VP*-F_`c|(P(;Ie7O{jV z9#xiU^$S6knk6u*YJR-H$aH#t0Edo(FcUa?F`eFj0mPHuaAPf_!PUl=??P4v7e5QJ zhzSTZvd#!&=4=dQp1iz5ltoN{fuMjxK!WP3Xy<$WQ-!Vu7A!8=m+b6*u=mp2XZKv{ z%N^xP*zIHlz5m>eR=W4W;HfbGnwC4WJrY`fPH|bRx`847wQR&5bHm-i0ucw=m6JFd zStVrB6IM?9)+D`{+uH2Iy(5=m8xJsnjNs5Y@L<6Krm#SjjiuQV(K~jQ?%8=SV*2?* zY}I=g&6;(XPu;MJ>Gb{;pa9flyf)FjxBR=wTsz(g$D#zkdz>oC&B$Zi);7tDPaxvo zTWJ=t37w)5ee$18cYJR+v-cBU!jdU1g$)O|9>!ym%B6Fh_s@+uHja~xEeDDc z7CjbX5ldLX8?jD4w7w!GpCUM41PRUme~d&ad0Z`Xq%}O!^1oGOGz2 zqMMn8ZS@@!7#SlQpZ>7YN$BXDq7%0>%q*w+u#QApO_AY;EuPGV8Gkm*|Akm#t8?CDF+RR4?5UcTg(mGS;=uU5Iw$KLKv zIe+lsh0;%~7Ke+{mTq=p5ldJq8lh^urYi42`1!l?96ApU3vO&Vz{KOnvq9+IU!<7W z#Sj+keKX8t)dF4LOS|n6(U-#-cG7T-?`*S^vrMxaOjA{Ve|tMwR68tWeSXfgkX_c- zSFA6W@sYi|bAGk!)-9p&`U|~#Y zn-_Ma_rjx$8(C8(StO`_x#z;f{iDD|M{xr~^J}h%J?0x9$vGr2%J+4svWO*Y5RK@w z?&IUq+GW8%fG*N;imG+zIltJmrZ^7W91I%DaO+eaP2$oy7s|5E{h!2 zX^%IzxZaie^f)w&$F0?NjnQ4{PXVu92t8f!)gU1-)`LUmf0D|E2A?&c1bA)T!n;LZ zB2RPOI`PG0)n;)COI1f1g$+$RLT9IP>KsVd-XL=xRN!D_sQG{P{64TDrr`1410I^3 zj~N<8kL}Ia!XoxyHCjxx9ANV4Uf6QrlfDTP_X$+q5w=JRn{A2xVrR`8WS(w3cYaWBu5@aogU=aIIOb^1!h?J;2I?%K1YRRT74j>aCdBEU9-p|Xq$Z4L7`QqbF*zHvj%G2jlIydEPTpO z+arf%5B1rv0~Lu+K=mL}ow7omTlI4RdvRK?{&PNoh;zEUu@eq3>8vr`aD$&4C3-Ya znzo&I@ z)=pCOp2m>fwb5m%OR3NK*vk34Ph5?8am(=E<}7pnW#7uhpE#EP|7KtRUF`oJDe1sR zjZ-I73v=rHw`*Y$o8Y;Zx%<0XS~(XYLp{)*LAMBhckj zJyW^=l+1>62bLTEp7%0&>g(L8FK=DA!<56TI#uQY&*}ZO%?Fr_HZq<5n7oheyWE7G z;rAGOrc5&66NpH>)9vP+z_<@mJbg1lFP@72uK0CWdGV%yH^mz#1_>QwWYX@bzL>$O zb0A!LL&Kyg6)zYXpT=u|DlguMzsJu{OznQgJQb8_6uf{aS28= zOa*72S3CFm*xsM*0WXFMk{YId0L2unVp+$XV7hh6-axC*CNKZ+Ugq3yJ`rBsCxF{q zxwp1>URx8%yhc;|=Jx#gYa%u-s;W)iyyVieV;Riz?-eqXmOQU}Cwlc#Snb{xJ(}DM?^2&CKI9PD^pWbs^%yQbq0sA0q?TuPE8RB^LPJ>YU&l|$!1fb<42 zIpbizx%b&a^z?7q+bC(vS6qs;6UZg_?%d%2dowE8BJJ^w{|7nlsy*qsPu#tg*K+TesY=LhtAOD?L z`psjN^6drs_b0i{ocBtxPnuOeq(+2gT74v_Bm=jNBd+jk+s7?EmJo77)r5Jm%@nI$ zE|wKLm~mG$QeVH~u4vFovK2`A<+OL1yu>u_ygl~`Mb<{f)$3QsK0EiIS%D$j+4IMf z=K(&cW0m2-16^j zJ*Xf{YMiOs4UF}nU=4EkM_Z)f2sqX4$ z%X(CN-VAI03Pv7{%hbrKlEOOevHMkK=P&M;uIQ}1<`=kDcG0af)losYFVE~gwcmWZm88DB z)59Zf>$?Ra9Bd#Z*4D=PSuaoAO}VSPvQI8h_VRISr;-`7H&nZrZfD^8t@tp~$-YQK zhP9D3CFuVFqqiBCW==CszAJCUaH{d-p5@i9re=)qj=DP~{G2Y@$eJRQov?Dsif=Lb zYiuU?=&y;1p7qD4Q&zzR(X!sgwEBqzqF=me(!zy`FWBa_Ah$(cU04U-gX1huK%BH~P!j%v7uWxjyq*#KfPo zEmy3S&=kmw%s`6sQ{k>Nuvnn_A9*)^UUsagFlmMcl7lw^HBrm;~(H z^W?*c6>I9hxe{(Lule`*`H4-gpBqn2X(@bg>FVp$)8`f%sCI2(nFi~#mOK!` zE_BcP#Q~S|-U1O04&Y({oLS1USHSx*^Ho{eW_WO>M|_pHZe&dX^)Gm`FE8`mly+7s z!M9Y!a68LP>4nR;c4eLWJNw$CpOHrUXU|{W$+6G-9<;q~e7OQrFFmYo->i6K{RGhn z&X?B=ro|)|drog_TK(bkcGj(>$MiCoPVYYfYAd-gW=GxrvA1*TF7qv081;VXD{lC6 zy5j(o(Ms`z$lsBr-@3Al;xAA7skGtG>ABFl-f&Ir?{6o&M75cELOQeF>%6)&VQ1v6 zEfY>|pJe#H^xk=`hQw5H)eVQNUc~t)FwO(Tg-+tK=)*mdHzzKWI`%74^q@NXVxQ^f z`(7>z;yo}eFbGuF-~Z=;T0eFFw=_;RGFIJ}^_ZbCRC<2%n$`nckHYdIaW${G`)6t{ zKlh$lYx?vz@^TZW^4q*zBgY;3oS`vvuK(7Ut^-VWwlQ*Rcy_jfI>xO|35+vuG(4?p zV+9pjaWCT_<(OH*#%Sh*Rm`04RZ9{UJy+$>QBVM_L-1D_!-T8}D;jOwI?;5UNnKYBN*Y=X#ca@im-0yAQRh=W5@N#~RIz zwU?#}fd^%#${dc`o;Oz~exFQ4aF~&ejSe$2^9;L#f7cisJ{HOn7Que^HK)#jV6bO( zUR?_?h`TyVZ0c-DQ8{(3S(>Ul=8FpCPHZ{A^?E_IZ6j+%7VEUfuQ!zOzMZ!C{GSLh zzn^ACm-`vq{5%ENIo2rTT}_K*;{I@xOXs*TMiH9DJngl*{d+qD9q~IA_GAn$bP7Z$ zD-}%%%5vw)oq0)7IHKV%WH{*lz879Pwmy;g=ZiTiA5IKXshJFEzvQ=YBCdhK!a(?_?Pn6rOh756&6Wm!@5jzE`lVP3Yc z6Fv(EHXUF}Qrp0AIZL6CZ_U!~r(ak%A5V{9*l^A=v9IQ_{(Zxn*JevjYrNKMxD%(14SZRSy7UY)5zSMN<_yeKij zYS-r4O`mkLeD3R9wlsOR-r#_i`R-rW&Tl)heE;XkhT(cK7XJS}?{R$^^Qdqw*KfW? z)|wTs#3D+s8H#jl^AKW-n0$5}S9noOeE*uiKXxzOy!1`ZEWV$2j;JsVibP(^VH&o159PC-~q!)3l4A@@80!#Qu*oI@%F1P zES1+@>esn2cOu`+159tWLQ4-8RxzvHMhlD%XjSSydut^O?tU#0h-k<(oWjB=mL9&9 zbH)Ltnr)1yOS@A*gCjmq7r(OHz3Qx88<^`dP~j=_Bvab z;2t|oVM9a2+5<&@X6S>oA%?)he@_*d8#~im;>=PW)%D#|f}W{m>^o)QkifXR6+E^F zD#IqNoRQc%Gj{4u##x)SPtWsCV3f~=jgHw*T9)RywX7@a?SwOH?bl{{&h+DZw({!F z?`h`Z`||GW^Zq6yQ?~8x)iWEm#nvud!o90gtSn_lj>4SSx-TLv3=AwSptS&rhJq0d z`OwzG-kz+NGcGJuynIyg`c}oy#l_qYHpmM}Z?M?Cs@9K1>_O#U=G9$wVK3tXdsBZ; zVeb2-xsj23ZtTq2bLaoQ#JW`auj|q)p>1_&<-8hZVXDW{kS`}C?-DtqQ`1m;}bWh&Tin;Ip7XSxcTv4{4D>TUBA}iQ`Y*d z=WkBM{(w*4dA{UXT_WssZrzQ45;xR2bPkw6^qg!EO({G2?fAPx>KhpJLB*AXml|`} z*3H`6?8~HW8d-mMg4?=5oDp+kePd^^h&`|c50iMRCKQ&LmL>{CG<=VGZSDG`XU~_Z zf7?^O|8sUw^{OdLaC7h8VrnkNYk1;K*W3QP%e@mA=Vh&p7npdRA;>G~ox|46+Ga1h z)iRj08`eiH*Us`SC*a-I`w`@`DA(!(#Z`X_`wTzl=8i(sU-lDnARzWN^<{`}dl-??rP zn|SAP=Fe$mC)b?ow0*AL3{sxzn-IC@YSesHrDfZ_1B*W`kC&OWdH+7YdGfc!W3AS_ zX=T1X=ePQ%FS+@145k%bNc#OaLs@x)+Q;zh+SZPmK$otXst+&kpKQ;oa`+}^C6jBl z=3OuIRj2N^{fm2h4ro1If30d(#~PuD{8Rq@%;x_ar1$&aVxv_*T@7xW}$ zQvT@)zmtzQ3Po5tZ@uomqAI0lc^5-V2J)XSp z#=jfS0uJmKj!;^(^`Fzb2UQNgcUZ`-cyZvY*UF_|j<03zWD9$D>Z`QtlbY?@Hc81& zSUojp`epX*d@5oQQ~B2I-a7rN(7OXmy;e@;{C?o&Gj*YesMmgiKcikAEM0zXQ^X~i z8LCR3i>3eW=y-GT@1M%0R~#;#>By=U=3KMlOv>F9>jhiQj<1=fwbajZoAAY4+eXp3 u+t^3?|4QKA)NApN&NDDDFnGH9xvXk44ofy`glX=O&z`&C3 z=R5WxfY#cm%LQ)DU zY8u+R`)AKzzIyZi!^baQzyJ98>-V3(|6Z`4?=b@d180FpWHAE+-(e7DJf6QIg@J*w z*3-o?#KQmW)Q3Tb3npYi5zJ zFVE}+>ReTu*sRh7F({O7#MvG*AnIkoDglX?9cCugm6_6Zbmx#RFpX6l^@H}@4R z_7D3o!SAM&%;fE^A9aHFsa1uaUUyP&?%K!yS%`j6Tk(=TDvV9VVwUYRR zAI|VOz;$ung%Xv<2L_YV^hH;%Y6&Vl)w4msbY=WXLw);iashTl+|#0O&SHzN?E6<4 zW#0Yo`yb!);{Lu5X1f1d_0(ojFI#c!L-XgB7J7nR>Cf(+uITJ6yQcZEtaM&I(=t}W z!dGYPpB2rLf0U=t+!k=?i`Tjn`Y#QPCkySJW*T<7&wEYf3A04&=}Cw8$DP>9vVZ3G zW041Qem~LuP~>w{{r!zfGmD9Pc)1kW=9_%n!}-W>mCmf|4*yQ~*nhJ*QEkvXmCg3^ zJiVYgZM8Gm=T%L9R%dKDQmW|q_3E9&0mpV|oXIQr=);|C$EbO_?fLzW^=+Uu<>~6@ Jvd$@?2>@>i@EiaD literal 0 HcmV?d00001 diff --git a/src/webui/service/static/topology_icons/emu-virtual-machine.png b/src/webui/service/static/topology_icons/emu-virtual-machine.png new file mode 100644 index 0000000000000000000000000000000000000000..4101ab759e8484cd0641548002027a72af66a221 GIT binary patch literal 1922 zcmeAS@N?(olHy`uVBq!ia0y~yUWfq{XsILO_JVcj{ImkbOH zEa{HEjtmSN`?>!lvNA9*JPhy&ab+0pz<8VZeg*~xuaY3YUL79+OerA<*9DpwOToW2nex^Z!5l*V)eBPi?c9we0Kf2Gd!xy>6>*)57)m zCvuh^zqID0PsF2b+qXa6asEiwcKPnS`+Z-nUS2)(Xc1@R3D$M1liD>3YNwvQylk82 zJL5$c8$9~+f3Qzf{!^|aw1{8%ney4TXTQ!D{0RKh&H8b>OzaE}NfsqLP9YxI7o5dC z<|1z2t2K6nPSewLnY!b`3ajwWoW-Xz@85OVrnz-r+0ht*E152tpCYWzeO>XaYK7D$ zbKM)43wl+r+H<}amUWzZEq>#olR|g9n-p)|G+ey%#H@Gc?qrr^E}1%ig7w9Pv0ksE zTtBnuyFC48B36@=Pv%=V*BNJ z@Lt!c_s`$B>5=eb+oogdJM3m?HSfIH(7LUSHQcjLh*Rtxr|m+a?Fo#>J}TtoeB#}2 z@!j@)$BU_+r)&aQ4^J)j?Q#!&8vO0m(ul$%Y4Lo@ru>0Eu|uf+q?zt zbtxUoKfTp;XXep-Yw!7QO*{j6>`vX_dwMZACo)hmC*@Jb5!uP|n*S6%(z1BEW@S&{ z)oLskd(?i&UU#$M*3>TZ%ibFCw0rsBFs&f8X)X z$dK_Jd$h}rBreA5sq^_ZH&-bC%8**xcks{NlWh@UKRV`%ET8FqcL)u-_Mdqk z)$o0x(+O#9|3#|Yk-=Lyy|sELUwE|j_!X7pKvCav(}kInx2ec`JTi9`Yw$~$C9q#J zZ{GZ`+)O)8?Fm@+V&jCzDVj!KKjbX4T%*Cg>5EL*l*L^&atgkS`?|scPMG?1hkf<@ z_H( zr?O^P9THyUDG<;wSu^+_`&lOR4}4TzjbBdQx&L3;7sqWE z<@i2HBxr`E`k!lAzpXiJ-m6=d);l_7&)nf#ulN3_?n%8*Dp%G&n!Az9DC{Z!DaJh)T5)%SOK^8MJai=$TE7oC`4<#{%rqbmEno+eN4}8B>b2?*L6*iP{f%J zZzjra*V#PnMQ?)L*1R=cc`3$gtH15AS+Z^Kg+Cp3H#^1S^WG_L{iwFrbocHVPJ7bI z-d{@CI^pA~**i|=yH|Qu<=$Jf>-!psz8HxqpH5wFGJCW;M%gFuP<3l1|`*h8%FCK}#_BzF8@%7i61y8tNm=srgTSd;Rs#JQKQO}bv z6CICbxTTyl}2%=e*` z#YTRuL%$j_Ht}nlUYaZZ{FhFN=8dJLp*s#HSU%j!+kcmH@2FMg{vd$@?2>@^GH*Ej_ literal 0 HcmV?d00001 diff --git a/src/webui/service/static/topology_icons/nce-old.png b/src/webui/service/static/topology_icons/nce-old.png new file mode 100644 index 0000000000000000000000000000000000000000..0c9af8f37fbb7249fbe570a920e0bcd281582655 GIT binary patch literal 19438 zcmeAS@N?(olHy`uVBq!ia0y~yVE)O#z_g8nje&td)Fy_Jfq{XsILO_JVcj{ImkbOH zEa{HEjtmSN`?>!lvNA9*a29w(7Besy9ROiQjg+X{3=H1Qo-U3d6>)FxR^RzsJoo+o z_?HU%79G2>TXQmxX7oZQqh4n|Q%=iw`9AP`5mNTb$sI7!7bOQGA&pm^Ss{SUk+sjNKe zR=h6%E_>Rk7wOYJ8BI~~oFtX?#rPM)=7-iMGCj=OmStTZ`D(GQ#2-CaHJ^Wk2QNxv*vrgEuon z!>uAUU0sM-5mF2cSEd7 z>6b1+4PMH~;E-!MdD=9HBXxKg7_v^!+_DAYo`g&WhJfgEOO}pWJsRMn$xMG3Wv(mV zPsjh;wENR3?cLn^dp7(&lWuo;ZowhWySMLs+m?6U_WPT%+qv7{ZCziRyZ!C9+hw!! z_f>xH3g5Hnc0x1nuF`9f%cHB`ZrxpaJ$C!;ipRaXea-J~xt+Co?XI7npP&C*TmS!i z{r$b~Yu~^2v-|nv-5KNaZ}j&4c=Ybs?EG{3yas|O+3E0(Z@02{>(_nkzWedG{QbQj zd-LZ+pP3$=xAR@6f6b!|kG0ocyk5KGQP0eX)a`$I%1JJm zggC>gJD=PMu*otxad{bk?F;9<_U6vt z>i?bpe^S}tpxgTEFZN%MFT0U=S8aBVQMvw;Y^$@juOrXDD!Ws7{N0pbzn67?9?L&J zoX+#_b^YJ$IAwDJR)8eBv6GZdJPfBJ10i<@S4b zf4^7#*~7=s``;U>TB*Ok@Bjbzu2sJ#@qOu~ztVp*F26A5IA{O=PmNUM37wx>Cnxb= zn6Zb!JKJ^N#jAPC=a!vPHax2HbMy1f^Z(|2e-*xe*Y{g1)?D2FMZWUM#A5qv2M(p9 zx;IZ|Zq=)ml{(9&sMeePc_hAn#_zH}sna5teV_GWqIvzlKku^Re@EHfwc$c@?Xin5 zF8|2i`E;6%RPc#O_qR#s@7ZYmpn-YS|9!tAU#sd@p1h;}<@1ZkPoa}k(_o!H8!Q!8DTb*S!_JuX-M=Fa-s)6Zib zXRqCQ?bW_dQ`bN7KI**T$!ZmEJBQ>?Ogd{tf4Z5!i~DtH`ZK|qO4I(lEPpmLy=?Wm zU0LsUUVF0T_q*NY_WysL|Ge}uV+5*iX1DO!|M_5}7JO)u{oZxE-|gCY{@97<5tDBQ z&;NC0=l{JQFQ7${uh;!w*S3FJ;lt?tujJQ_<9651&Ng5F{ZG~>>A&4GU$5V97avr2 ze$x6$IzJhDPjYYA681#;?vgF1v{pX}oXIrp&&yAs!sT{iyKL3@)yr0^+y6XS_w-&f zx@SE1{ydw1?y=EBou8K0jmr`{rA}-4gu$b^L#;_*XO2(1Puf)%!i4 zKb`O~@cvg~@#8^r_4%c@UhF(yoA>=}ew}grro{`7zAjc8G~Qg_>pq`SG>?@l|OekkIT`uW6m z*=yDJOrqCrzgKk^RQml{Y$%K#6Ge;XtEO#={G@*Rg?Y%6Vl!{sXNLVR=lwXMe(vtg z#qq}9cO17}_x<`AFa3&x+`B+g^S{FcBWlVIajHLYPH2kwq<%ba_uFgL`=VZ~^smkO zZeIV{{O-Hm@9)W0y;ykHQ+@80xs}gm7SGqHf4lX1+4lQ&)#cY?%WYrG@2!5nxBT7u zpZ))T^uK$xdi}e7|Np)(zyITy`Lntu57*|`W`nAUbGk~-TQxZ=l%Qh^X^4=`Mb3bx#M5deVRP~ ziL>k#|N38--*w0Td35K~{&QBZznnAg?X5d{Pv@_At@l2!O;4Cj)btNrikg^Ta{0oG z{f`W1>im3MdcXF2seb*>)79rMZ`pFj=yb{LoXx)Fk2=-Q{T2V_Y5a??QEb{jQ0Bb) zzxCt}^!zdnlwWEzd>Fj{Rpf3wDz^7|#wBUpZ|m#-zJ5OKd8YBNYu~@`d2W06)#~;4 zs$(YK%UZqm+TEhlx<4n(GVW8Y|Nr;Knd{fGL-i|i)b&P?*}MyX4REM{(V+ppAfGV$d4{4bv${@mjeb-w1A@$)+4 zV{7)tE#~LGlQP-2>iqE~rAGwapIn@scyhTbYMnX9CwZ~stdj{R*T0^T-1pK}eexAh z&AIon?E4wN9S%f(+q%AX?d_V+XF;C*y3)T^tN$*Gx1H`Or0xbNY^17>8PA)@-)*?@ z$@SuUmCwJ<>-Wt1lKa5?PJy#r^|HCO-)`RJ(q40+dY#(Bqhir(c0cOU{&XVvJQgoq zE4x?uye6ZkQFVV`_WHeL?}A@`{yC#%-rqOrN}6KzWfS0{`Gpi{ddNSpHf%yu7-x6UF<80nKqYfo%emu^QX>f4%7a$ zLQ?s&!mk(m>rDEOAK3SK?)x);vo6%=W|^N|zgKm>7*e-;OW2cwOG%6GJNwE``_uaT zT6F%~XJ=<$uRinm#oqU|`EvbU=WM^WR-t?B)? z-(J|>D|{0C<@kG0gK@9kR_lL19)FGqK8TrcGPkDNJeK@uGdhq);(ySooV&I&-3TI&*H)oVyEA3I<5Ef1E?LxeO%=am+AhtqEBn~e!I2z z`pUHUf1jp*_7KLB`6M?zw|Q>4_kZ-;Wq+?b`dWGaTWrpGf7ae7lf299|2*XXoY8Z0 z>GZf;)oZiN>i_+*xpZVRW-4iWb9H@P>Gv(mZP$LUUw`vO^BIXg)&CczYokl-&fS_@ zcucZVN9~C9{h#OVy$;>{|40A-6Ux3On4y<)!p-}i?e{yEfA7DZ*R8wl#PY`0Ba{BW zsQEN`{u|q4w`7Y>C|2sIow4}udSUndzwd0$xnOfm&S~B4bAHcKpHtxU{(tyxVSgLL z`CMX5-v1VFzLmB5t6cKgYZ1x4HbTNLHXP=Ay|-*@RQB4fpDcv2ln9cW>~(%BKmMa1 zSpQ;Lbl%SU#RnIspR9l4XZ!Way~nHXwu;BiIG*H`yXB(W-s|hb!sBa8< zZCAshd*5c;2fSXl``zDjhr!0g{Wv84O)mZ{pT&a)8zJF0+VMZV-kYwuy79Q&?Bg>d zFr)e8gr}40f4yA({okxxvV}(kzgr9U_o@D$X!&-_<*NR*Y1-@e6v=gJegJg=zeiom z*!%U`>BT-e*n)c7y{gxLKHX)L&bjdWb=LR&|E~W(mHd$-@{|0`!vF6NEPv_0-1xlB z=I>G0vZCx#?_(xd$xZB0pWHvKUw!w*XT}H%ouB;oK?TXZ$E$9t&CZ$Rp47Ew$D^*j z*SEOM|MzA2XAR+#Sj~w3bl(2|pL_AqyZ?T>t>1rCIpJjek8d}U`#}cn|8si(lZi8w zK*d${`pn+4`?cR~PPuHtk_K~T=WaWx|Nno^_Pb?!_wOr<|NAQZbA*l0v_I|7!}tFR z{r=;){l4!zvbJ9Kvwk{ph7qWudSANsqL=pCDef~pu!Xhll}YxK>TBOli!HlpBg8*h z_5VZ5XETzk`d6lf$Cif5b!u+;_v`idxZ6^(<#$Ufr*tM`7Q#F?x82U09sjRRZ}*!` z_wEPpjxD>X+JE$L!pZsyxA|G~`&WkTezz-Iu3fYAQK$O1xcgFbi%x0cGHZU+CwFk` z;ePn;i|dUaPu%!qy7c$-z#nUc-b!5G{-)o~Kj{*-<#7r}FbB z50Onzrr$0;Z(H5JJnVM<{#vCJpKq*SnD)p1EpL8>^ZUXpr`X?ZeLMGkP5OIN`CV7T<8Av5v3jw3iT6K` zf19(mzgo5W*+j4rxj&yye{b6-_0#yyzRP~rYrpT}zq_RV&^~No-T$n-xc-^({Fi?} zXMLZxeUECt>xL)xTTbikUh`cdtMtdi_UDBeXUg||o_p_g++xo(Y#Cy45frk1Yw5>$XoiS)Tz)K-K*V!|Z-A?1;ltPpkL&xs zZTTYUf;*820Q z{}d?y{+8M*t-Dnx;beWn;$pjR8~Z`FEuUXkCC8qIJz%O&%12hcTv{E!yzKI(jB8+> z@q1^aufO`@`l^F#uol>$0Ve(bEB=02ZvVD+?k!LUyK>FU)RXlI$7PCr-oG)eI>;S2 z!M$;261G$%;QjB!!*=<5zoWKRf8Tw7?(tck)BdoV{rda3@|)D#t=Ho!MP_JXi;A~< zKA-#fRD70bSj58bU$e?D`Zuq22ko#h3l8b^Do&k77%4iqrpG+%!FY zJ$OV%F8f);r{8&dzh1kyGyCgf`TrLE%-FSo3jO_moI;v`p%HK)F z)_%Q;ThAx=Pp_}9TWYVqROjdS@~ffYU(dxXuKfJ$X<&jHwi+y~`sDr(hq$ZLSKOTz zoj3D%)-2Wk|Ni})|9@xsGpV(Ezul@7nc<1edEhp5-Oq!$`~QC1TYTl6@cbVxa;qb! z{rPSE@rZDB`nrpJ)^9+`3`-Y^=cdK9KNIio-`Kk~GV$d8{mlF}8_J(a{YtNe>-<&+5SII^*=}W zZhUh8{*HfV{=WbJcmHP%gUQ$`Jn8R~>R+$j{_R}I?yF(Zy8Ty0y#F!n|9$s;)$_F% zFL~?Fb#I)RiY@oOj{pDb&!?+11D9?|)3`^D5Kc z?=iLdaDe%PF}Ywk{-|L2L#rI5`}^bMl6 zecogG>xsJkOmKitSFLxn1kW#nhs@fXw>_U%{cO7Y?!sE{ukzcUPK$oFaoIH0`bKAd z+b!kyq(GxBpg@~e6ZuJe3Urcqt?17*i&YbVPuK%St@7mm`TsxHe>)ew8`M|tH!aoq$qjCkf4=tOR`&X{iw~UH^yKuU z{R?1|*K1FhA6A_GM72JpUAFAS+?{2hj+jT?|&yi12NY1 zZ}WCMY_qv`Wb+gK4cDTwzn+U;T>XBpId(U{@y)LauKU^f_4EAyd&>Xavi&^s{F!85 z8}ENitJm+VdcVi?*Z%+C|Km!6mnYd5J(=kKP0sl(xKDM>1zZ^RCxU`3zVhkR>gQ_( zH#DfmPy53PTULN&ui1%Y-zks#?e~>G11EA&6Lao%H_(_p4%gQG zemgzB?p4m-uh;e-58d5g`zE<^O(!Tt#?*&_hKh0I1hHvvucu zMYiWmn*QhQ_WS$F?@7hh{d|g}jL5G%9sTLF|I%)~T`Rud$lCtv)#~RP)tXf68$sm* z$hWia|4EZ$O}l0>?GLEPvHtyLv#H}&HI(wSeX?S9>Ph*Vx!dnn^{+2;m#a*X%YI|` zp^^W~IZIF+%a>em1R2I1|HBDt7-&AFk|$yX+9+V?b@mkhknKd(!{fS+f4iVtHImz_Jeq*`H6( zzqC5;>#BRn>+im}zP-0u_5VM!yCs)FjjMM%pW|vGnQ43fJF%!+?^gAnt z_bJZ)qFR66_WPT;JKf}e9AN*1qmfkoI{N;s_+@|KxSjQR2VDj!Q2Y_@*C$GG2A!u#Lf*qV<=_ZnY+q5toZKFGz# z3fZGSNk*YfhB%vbd;c@v|MTp<&jGuiPK(xqXzu;T>OF5~*!Fu>*>db@eb{W>yyHp# zo2}R5_C8-!wzV!~(YNyp{C`cd-}I+nu7*_jYE7{QfMFAcU=DZ+6=IUw_?~#UO2}(_@VKw_e!$-1hy;+PjLg z+f@I925IN+cH8}STeKW24ma<4(!b|zZvJaq^{ux{ug89l;GU|w|2=5g!uva>RTsPC zrnooG#5y{+M*pXB_S&ss?<-f{eIb0m9b7B!yEno2@TB^ypiW8bYU_79lyP+IjJ)fv z->v)ocJK348Q!Cl{vQA>Hu-ukZZW9h$JGIi{Ph3w?|a|(-75|TEyywH-x{#z_ucpJ zYS${xo~ByA_v^LSbN9Nn%T;B_v8G`i@tXFB)eO|*`W(D_^Lab#{;w+{Kgr*4WS6~S zUEK>BTCw3eBZ94<(fRrP`;6l@%f8E8%}cDlzWd(Q-%`$?CP-xZ+^g2ry<*Wh8}aG= z*k8W~q_@}f+KFUat!aPQ%RZkq|9&oRHMsC_e!OnSqpo-C_CF5BOpmXtoT@ezTN0c0 zXJ792yW76oTz#i3|I;F8qIpwke%h2x`P{4nhvi!@u)wk}J->)$on6&_47H!Rp&$B&ilG1{pm)ziHG^^_k2Guw<$B~)BlI^{}z`2i@IBK z*|$do$$ogH=3#X{~)Lz_}VsptMuRSAMU3o{eR$QFF*JB`m5@8 zA3JUMdT&*|UR%BX|An<%uSHe%bYhvM<(aAae<7&y+WUM>nf>3F{+}cGyH)=$w0&-Q z{-tf36llJ?Qbfx1m%-VZzwO|)I>|mf*rS|3H%5NO*W;j}%gQzFPr-wP^DR>@Z+r57 z_xpW&j|Z&2oxk6<-?@2OWZKMo+4d#iq?eq8Z9FsWh;yVpo#Tof!IgUru{Kmy>?sF`<<)rrcRHYc3kUIU-_QJ z-_J{J%8dTx4oZUe9+8Hwfgm9@n;K@6j0Y-?=$D^m%Ox}8e$)mp7y6b{ch>? zx3=wDe}9_3f6nnLk7%k(X7PyV8L`{#H5n2^BPp=m=-BYQu?2bj9T>TZ$ zN*ec>8rTL)B0tIREc|P$7P%Us{F*Be6qd*v^?-$@~W$oy!CG0t$aSWQf1j0Q1@x) zb))$EQg&HDYG&sFEM-_H9qW%-kh|K>aMSuXj0o&V+*{h$B;z5jo& z+Q;;B{O9HO-R*x}y!Se4vG|?`td&zbv5jfXRIPtlaT&DqEib$L03-X8jn5)J$?pTL z4gJ0&>+6lyx(`8f17vz*54QI_NS81 z?mhPNI&UPm_rK@%|9^re@!suzKhHhMtLCxv{TF-ZZQb=^QTHbcth0ee-v17Lp8LLL z`S(A4i&IY4r?~CksURPH)~$Zt47>Q5d%J7(wYTKV1TS{7*h{xSBcQP84;9d&Zw`+rZb z|2h4Cxt#YuW#cND|4;Y-nSSr_>YJYGbEh0nYU-_Xuie+SRqAiY$t~E+l>Hw-6N7u7 zSIYc7c#vKG&hP)ck)VcXpu6nV>KfDNl8dg8hSE`%U7t>ASFev>3!ZDlKCgXdl6_Bd zpXIXeG1uQc)vy0~CsOBU{~gfEn7=>g|F3*+cr6DsX`XD@=w`1rd423!xBF|2eX%v$ zH$UM|-;(nDbL?)=qL{m&hQsgu{E^R z7WS(A;hnb+xPq3JJ7PV_l4KqMc@AwCfCck@#V7F-{RIs?R}G~ zfBLbH4fZHJZ1wZWU_w#68dM&5O@lRvz@w>7Kur2XG7m#h0rWqS9mZa8W1E8s{?ZPvF&{u+gT z6H)A;d)@FC!=9RV(QB`&);|edUlscPW8dP7+dGWo9{)4@3!Y1?`+fWVvyID|um@^Y z-2Y$Kzt8(GmR)&r|CdYN)&1o$z56yde6;v}r?`5(`&&@uWOMBb_GA}-{nKgv`_;Tr ztDmaYKWSj*dtv*m@at3kddvRf2mbxN|9>xNfpyh-*R!C?_FcOD??}1aEbQ5P{p;27 zf3x13W~{w7{m=bB&&=;`p7(Xly~lFy{uPIWcYQu*t>1sb!EL`-@zv1qw|n^@lQHhI zwXm1?udP2EV6I;8?7em0lm0)K?f(Y9FYMoJe9l6-|AbQ+Xg%QU+-;HXcdoztBHqB) z8hbiyzNPU&aRis_C8O%qr3afrn}GWzVEcrVpV$^f2YUp@M0|Mu1-#f{B->N zlF5Fr_Uc`}qoen8``xnW?{CuWcYlxZaRLpwm!G%&{$ye$!}ZwmyGLpc+>^h4YWvIo zjo4H1$qh%vqQAt=&dden!~ELU(R-gK1w|De72REY-qyVTjQr07>~Eq0GhPW+<13$#A(-fQFDv#%!Ef9mwFS@gZeCkWJdDL*P2{$ykNt)t@cch>HH zw`=e7ETO8S+Hn()=iCqKUQuiy3kmrwHMJx}_3{+Ulbe{#v!XXg8N{{DHsexCb$ z&BgK?EnX~Wt~!5q321Su$?u1q*b}w->2uca?^O3LTN@ew$=z-LF}?ZIJ+r^eRc@Cl z@+d#dYyRZp?6T9k+t1Vn{S}`7sVO+sOOh6YzS5>bc7_z5mtE zvahduzjNl5xZk&~LuMKjO;W$;8-s?r@+P{9y|n%3`Fg`)zSED*aWo>ISb|y)>o1#t zhH0m+uStFXbLNwOzt;b+ddK|2ZU3!}tmXG>%gaw`E-wKu$L%i-0xb>soM?h=Lej|l zU&^iW`?b~UuWkViz#Rlo^(5H zcJ8N#CYdtc{|b-GUax&_egC3J=~>h3PvqxzBJYd?t(Bb+`APkH{qOw$Yrj|dI#2ua z`}fWBb=UTS28nI`FJIAGz2*{V|H+$k7LR-0U5m~?+ip>|*#Ct6qYh=YZ^56uqd+S+ z5=}wVKg-tFe!Hpu|Ao&Ml(mgDi={!~3~G#2t-o%Pa`HWB{n2h-vl|P($M|N0Iw;#8 z_gR;{uRLG<%ueB5yZxU;yC;r+-j(lv4Vtrm2bz?>!(La$>r~MJ}(oGDRBJ0HotcD?k|_TcY{`Uy*thT8Uuze?mk~z{qDu#|Da_v zrSpH^dAuJt&X9 zkUnMFpO*%|rhKk>t7ht5H}m58&CiRM-^}>3TJ-&%&*%Oq8yaH?sH#72@Bhnv|8@2e zou5z7+5LW#w{q#USGMh0+d*qa|4W>a@kU)t?wodVzT9Gd&Dm}Df9Ia8uUNP1)vCO= zTd%*f?ax}z%x9qx|8Hgr+HQaoIzKO`ep!9^lj)o-IzRi*=!Aodb%X#WV{8qZYf^< z@dn}s1R2`PSccI!=UD5mBq2iB6#n0F7F1I^A>kgmR?auityS=(KM2`TcGAU-TpQ z{XCoh%-cy3GiBr-e(}BFdA9XBou8$(-*?}C3o1ds#j)%DDZ5ws{M{sPy_3@$+R-3>y_4~cMSFhWpHNQv>o292g z+u!bG$5?y+tN8mS-TpObE+%j7)@!f!nrY>KQ8ob04}PD0|Bu@IBsoQz@i~jdzwf@Uo9-{ojK0w)W!fLJ-p|3;b$?FI z|8nvVXjW(!cy;jge6e4v_y1bG^Su5K&<@1Ezpn2;x4gX(Gac5=22Iu4^YCBY{KS7c zXjNGC{onV@zh^%Lt)9L2`lgeA^{vahKudkzrN3Ff=hLYgt`%pHQ_7v{_j|7{irxSB zZT@kg%Ws#>&Uh+p;51RSkfz||q3k2T#lka(HuQmC;m{Waz&8DSwOizDL_~HN> z`}^shz6fbWw+VyTzS;TvcJBUghX`h4>#olK z|8su%>h=46-RWN@Q*=V{E@&^oJJ1SlejuiajD|Np=5yVuu!U43`u@_Dyp zi_aL|ZDg0b0qTC<1x8Io_`b6AS&*rVO z@yOXq@EI?>rNy9ic(cs&?^*H}2!fA&F_~3f%*K{+O~_m4E{kG=vH6a zQ~7yaWy5LE(J!-JxfQ?5xObLOVnHUz#I#C9kapfJk>~a?7$=r6{?h+7NyRg3;r;Vl z#cC3s*=Bw*_TK;Bz;6C7ea}ftGG9Hre}3cQ`3Jx2{$dyfqai^55HNo9pM7)cq?#Qs Rx;KDq_jL7hS?83{1OOkDbQb^s literal 0 HcmV?d00001 diff --git a/src/webui/service/static/topology_icons/nce.png b/src/webui/service/static/topology_icons/nce.png index 0c9af8f37fbb7249fbe570a920e0bcd281582655..6d7557b2ea2f1ebde2f28491828e329344ccd052 100644 GIT binary patch literal 3985 zcmeAS@N?(olHy`uVBq!ia0y~yU@!+^4mJh`h84FjxiK&>wq`mz2Y5O=D-;yvr)B1( zGB9XNtet4?p3ajVJ<`}A zCi5q9jnxi|*RLLEaWvU0yla}gsC!C9d{BnqW8VInIpyac?)}ZMFfMJ?O+&^Nvr<)( z0}Us~iZZW%v24A(U61FPE00$mHL-d9dfu-U@04s#?|y&&RcMhKbB)qRnajN070nfC zmoHhq7jBOT4C*-Fd7Rfu;iK~P)amyv#AU=P(oT4&KksxtlC-e*sacw6@^EPB(elzb?9}p4N>gpp7WuCq_$~jj z>`W<|df>#y2Cfi~KKVbqb?2|0@%P>3c~IlR&!pS($_@$Q8CP>G z|GZ>p{8ia|?b0E4hx=v)g?BGKv3*d)E>Uwy)wxK;URQBG`@QooW^OugK=;DtXXn=L z?7z&e;JZ0$A0r=M<3*;__0@NFuddDe|IOt2cjj=NqB)HD95xIL3~Wi>?k)`fL2$v| z<&zm07&r?&B8wRqxITa|qthCb84L^z>?NMQuIw*4IR#iWx7QnUGBEJ=db&7*l05&%)#Nk+&`HUyHB5yY}90Q{~Kt84T_XA6dUYb~orhU~}jL#|Mrdf@kK+@*QGq z?9Dgm6I^i7BO|x>%G!HZUT>ejRIBzPQ+sZjL?`1`OTp*wTtfe9ZQf+`x&DFvkBO&u zZEy}{y-{t)n2@@b@xY=J&*L`lUK3#d`=(v_`?nunr5}X#v68!FW{+|t=!Sk`c^zHxl=zF&$U!Uc-e|YY#E+79dm0J?m zkKR(7;Sh9iM#sj+y8k`w-@j+Be0=bo%l<95CU|p~=a(z}-DpSiiR{m{yn~1W}fef+r! zoJ}SMac2c9?Q@FhJU?j>48_90#!LB zvI9I@UWv_y;DWLINY@B$&TYvLcT)%^}S7&^76rpilJSmMMdnI2Zp4&gO{ z4u%c9Y-_n!DBsL@U2h$2Shk`6a zq0H?}n|a;UMDN->KX&_d%8)qCvPSJZcuFMMt7qj#x58r&dX7eW zKRYmcQrWy6Wu<-V>vJFfzJA?m{^~cCi@)AE|6ca>-S*peDtDaS@M%S6LJfOzF^l32 zkIP~)@%f+MZLPc)m%H&!Zp^)^cPSxzrXSh6{lb>}ckj$ueD!UDbYr_ezlGJ@1l5Ei zLE)m2&vx!IJbwLN?4!9~kA0mtb**<~r@eg7){ax2^*)l>&&1LM-k;d)VA&ci|MtV_ z=mtR62hDxl2ve_4oE!-O%3o^xKY0x68{be^ku>d$-u_IqT6a36EbXU0=Wc&x4Bx zf3TZRoAp0=bKkPV?`yYwebIcozuZ!H%EYE8j5b#?&-|!p|9sK#u6}*@>bVD|uU}tO zwWZs%?X`5V&iS4D=WgD=f6mWC59*iX$9rN$6{c```-|KhwZg-Z|y{4D6)^*L&KGjw8#E%E(-K%oR&ueRT z@}Fijb9Zscs{KD+D1ST077{tx%J@p$fh#jY9>pBl>2k>>uty@CbADu+HLuyV>5+3Y z|6Tm}!1(>n_&d5^uOg<`!R>zo1u*{WDDYN<# zWh?UV!H15A8BVhQTMDA1+wbiB-r~7X<$YkIk)P+WM{^BUawWxHn3XB$lwkBO_?qdl zs7+@QdxV4~k~eUz)G@p!ROGLHnseE*m#vle(-hxdG?{i$V%|F$LmBr=;!{Nb%@5>b z`+D--zbVhFm5*!>Ox8?t++waLR|Oi3G#Ee>39%HE2&E)RExM@bH`5G75?H!Phas9shJ@J= z?#;1gY3VaL8y7pC)X#srsn}rAB{rrva=e)~c2D0WUYW{Tyf-~gcln=?9Va(DdQ#D# zn2_strF*k7lOKz-lL$}O(FvB$1#I(eE_)t*cRAOt@l+q83!gL?6l%Yu^+ zPS`DP?^4vAnbP7nIdP^$&(xxz;w|+)|4TgXy-IZYyy)UAo-Ceg(TiF?FIu#}{#1fZ zn!%fm8}Izw-R$K*ZO@s#Vlk^9SGsJ1Mov(^_|m{rA@X{qFSi_}sa#BY#d%XHWOhaxc(r zb8YT8Zd%MGbh7CNm+1r-LDq@g^SU}*j~J~~&yk;_#4)qD{_m~nKem4T`TG8^U%kgq zf41E=?~UK zLR$|eoISm5^=S_#O*a|Abl!8ENh~1;S%umhZ6E&pIpxiQ^M@y|cz@<*c>de3Mf+J# zyz5?Qcl%ADnS?-g>OS-1r%ow_Ur)0ue-f1^U0EPg`o7}8xqF5|{wHK41Xpl}=xj}E zIL=)mT)6DeintEBV?iq$xFh5()^l(3KVFhviA^t zgXZH_&L-mXru8`XFlHZUIMJ%tu%~6?f@70jndB0%&=^dhMw zCiSkE5VsXuS8PmZNzpkOry|4|5V*#4iA1AIppRbj(=SU?6(SEN9Nc?_?UAdJUW0_d z@95W@Su$E|k8LuPjxu>X9?GS^HeGL3 zx$*=NlSgY8zAc{9bMo^Wp#uk9RagRCCpS4An($+-*z|w5lO#g)%no)aX(pK@c|JLJ zG1uh8>RDy0KR#;}(0=yJsk|*r%p=v)`t*v^OlyU&MJ25fXndd>SrDOq;`D0;mTuLg zBuDGbCTpBb7hXSE_~~WUkNLcLh9}IM8_#ge)R@*Ge4=UT<*W;5m@|^Bq>Lr|FYY=d zy&)>N@7%?#GgEJ>Wpm3iiH2#X-S%CVGCx-F&&`$zH zp@Y#Un6%^fuJldb{&YtczfA_WtC$C;*-L=#873a?0 z6g|f|K~s}W+~#ncJK=4{t|Jkdtdi!^`Uj^TL z*6t0?njLtB?X~fo?G7w!Kd2c^j!r8+9Ay4{?NPni>lwZsd+=@fD%UlYMIl86R!i-5 zR$Oh+xaYK4aK_t|Gv_0!b}pQK^YYRM&*DnwnXl@Uzy51(;lw%vJtafU2?lN-?oDL( nepgw4Z_m6BKg9q45r1*HaqE$!lvNA9*a29w(7Besy9ROiQjg+X{3=H1Qo-U3d6>)FxR^RzsJoo+o z_?HU%79G2>TXQmxX7oZQqh4n|Q%=iw`9AP`5mNTb$sI7!7bOQGA&pm^Ss{SUk+sjNKe zR=h6%E_>Rk7wOYJ8BI~~oFtX?#rPM)=7-iMGCj=OmStTZ`D(GQ#2-CaHJ^Wk2QNxv*vrgEuon z!>uAUU0sM-5mF2cSEd7 z>6b1+4PMH~;E-!MdD=9HBXxKg7_v^!+_DAYo`g&WhJfgEOO}pWJsRMn$xMG3Wv(mV zPsjh;wENR3?cLn^dp7(&lWuo;ZowhWySMLs+m?6U_WPT%+qv7{ZCziRyZ!C9+hw!! z_f>xH3g5Hnc0x1nuF`9f%cHB`ZrxpaJ$C!;ipRaXea-J~xt+Co?XI7npP&C*TmS!i z{r$b~Yu~^2v-|nv-5KNaZ}j&4c=Ybs?EG{3yas|O+3E0(Z@02{>(_nkzWedG{QbQj zd-LZ+pP3$=xAR@6f6b!|kG0ocyk5KGQP0eX)a`$I%1JJm zggC>gJD=PMu*otxad{bk?F;9<_U6vt z>i?bpe^S}tpxgTEFZN%MFT0U=S8aBVQMvw;Y^$@juOrXDD!Ws7{N0pbzn67?9?L&J zoX+#_b^YJ$IAwDJR)8eBv6GZdJPfBJ10i<@S4b zf4^7#*~7=s``;U>TB*Ok@Bjbzu2sJ#@qOu~ztVp*F26A5IA{O=PmNUM37wx>Cnxb= zn6Zb!JKJ^N#jAPC=a!vPHax2HbMy1f^Z(|2e-*xe*Y{g1)?D2FMZWUM#A5qv2M(p9 zx;IZ|Zq=)ml{(9&sMeePc_hAn#_zH}sna5teV_GWqIvzlKku^Re@EHfwc$c@?Xin5 zF8|2i`E;6%RPc#O_qR#s@7ZYmpn-YS|9!tAU#sd@p1h;}<@1ZkPoa}k(_o!H8!Q!8DTb*S!_JuX-M=Fa-s)6Zib zXRqCQ?bW_dQ`bN7KI**T$!ZmEJBQ>?Ogd{tf4Z5!i~DtH`ZK|qO4I(lEPpmLy=?Wm zU0LsUUVF0T_q*NY_WysL|Ge}uV+5*iX1DO!|M_5}7JO)u{oZxE-|gCY{@97<5tDBQ z&;NC0=l{JQFQ7${uh;!w*S3FJ;lt?tujJQ_<9651&Ng5F{ZG~>>A&4GU$5V97avr2 ze$x6$IzJhDPjYYA681#;?vgF1v{pX}oXIrp&&yAs!sT{iyKL3@)yr0^+y6XS_w-&f zx@SE1{ydw1?y=EBou8K0jmr`{rA}-4gu$b^L#;_*XO2(1Puf)%!i4 zKb`O~@cvg~@#8^r_4%c@UhF(yoA>=}ew}grro{`7zAjc8G~Qg_>pq`SG>?@l|OekkIT`uW6m z*=yDJOrqCrzgKk^RQml{Y$%K#6Ge;XtEO#={G@*Rg?Y%6Vl!{sXNLVR=lwXMe(vtg z#qq}9cO17}_x<`AFa3&x+`B+g^S{FcBWlVIajHLYPH2kwq<%ba_uFgL`=VZ~^smkO zZeIV{{O-Hm@9)W0y;ykHQ+@80xs}gm7SGqHf4lX1+4lQ&)#cY?%WYrG@2!5nxBT7u zpZ))T^uK$xdi}e7|Np)(zyITy`Lntu57*|`W`nAUbGk~-TQxZ=l%Qh^X^4=`Mb3bx#M5deVRP~ ziL>k#|N38--*w0Td35K~{&QBZznnAg?X5d{Pv@_At@l2!O;4Cj)btNrikg^Ta{0oG z{f`W1>im3MdcXF2seb*>)79rMZ`pFj=yb{LoXx)Fk2=-Q{T2V_Y5a??QEb{jQ0Bb) zzxCt}^!zdnlwWEzd>Fj{Rpf3wDz^7|#wBUpZ|m#-zJ5OKd8YBNYu~@`d2W06)#~;4 zs$(YK%UZqm+TEhlx<4n(GVW8Y|Nr;Knd{fGL-i|i)b&P?*}MyX4REM{(V+ppAfGV$d4{4bv${@mjeb-w1A@$)+4 zV{7)tE#~LGlQP-2>iqE~rAGwapIn@scyhTbYMnX9CwZ~stdj{R*T0^T-1pK}eexAh z&AIon?E4wN9S%f(+q%AX?d_V+XF;C*y3)T^tN$*Gx1H`Or0xbNY^17>8PA)@-)*?@ z$@SuUmCwJ<>-Wt1lKa5?PJy#r^|HCO-)`RJ(q40+dY#(Bqhir(c0cOU{&XVvJQgoq zE4x?uye6ZkQFVV`_WHeL?}A@`{yC#%-rqOrN}6KzWfS0{`Gpi{ddNSpHf%yu7-x6UF<80nKqYfo%emu^QX>f4%7a$ zLQ?s&!mk(m>rDEOAK3SK?)x);vo6%=W|^N|zgKm>7*e-;OW2cwOG%6GJNwE``_uaT zT6F%~XJ=<$uRinm#oqU|`EvbU=WM^WR-t?B)? z-(J|>D|{0C<@kG0gK@9kR_lL19)FGqK8TrcGPkDNJeK@uGdhq);(ySooV&I&-3TI&*H)oVyEA3I<5Ef1E?LxeO%=am+AhtqEBn~e!I2z z`pUHUf1jp*_7KLB`6M?zw|Q>4_kZ-;Wq+?b`dWGaTWrpGf7ae7lf299|2*XXoY8Z0 z>GZf;)oZiN>i_+*xpZVRW-4iWb9H@P>Gv(mZP$LUUw`vO^BIXg)&CczYokl-&fS_@ zcucZVN9~C9{h#OVy$;>{|40A-6Ux3On4y<)!p-}i?e{yEfA7DZ*R8wl#PY`0Ba{BW zsQEN`{u|q4w`7Y>C|2sIow4}udSUndzwd0$xnOfm&S~B4bAHcKpHtxU{(tyxVSgLL z`CMX5-v1VFzLmB5t6cKgYZ1x4HbTNLHXP=Ay|-*@RQB4fpDcv2ln9cW>~(%BKmMa1 zSpQ;Lbl%SU#RnIspR9l4XZ!Way~nHXwu;BiIG*H`yXB(W-s|hb!sBa8< zZCAshd*5c;2fSXl``zDjhr!0g{Wv84O)mZ{pT&a)8zJF0+VMZV-kYwuy79Q&?Bg>d zFr)e8gr}40f4yA({okxxvV}(kzgr9U_o@D$X!&-_<*NR*Y1-@e6v=gJegJg=zeiom z*!%U`>BT-e*n)c7y{gxLKHX)L&bjdWb=LR&|E~W(mHd$-@{|0`!vF6NEPv_0-1xlB z=I>G0vZCx#?_(xd$xZB0pWHvKUw!w*XT}H%ouB;oK?TXZ$E$9t&CZ$Rp47Ew$D^*j z*SEOM|MzA2XAR+#Sj~w3bl(2|pL_AqyZ?T>t>1rCIpJjek8d}U`#}cn|8si(lZi8w zK*d${`pn+4`?cR~PPuHtk_K~T=WaWx|Nno^_Pb?!_wOr<|NAQZbA*l0v_I|7!}tFR z{r=;){l4!zvbJ9Kvwk{ph7qWudSANsqL=pCDef~pu!Xhll}YxK>TBOli!HlpBg8*h z_5VZ5XETzk`d6lf$Cif5b!u+;_v`idxZ6^(<#$Ufr*tM`7Q#F?x82U09sjRRZ}*!` z_wEPpjxD>X+JE$L!pZsyxA|G~`&WkTezz-Iu3fYAQK$O1xcgFbi%x0cGHZU+CwFk` z;ePn;i|dUaPu%!qy7c$-z#nUc-b!5G{-)o~Kj{*-<#7r}FbB z50Onzrr$0;Z(H5JJnVM<{#vCJpKq*SnD)p1EpL8>^ZUXpr`X?ZeLMGkP5OIN`CV7T<8Av5v3jw3iT6K` zf19(mzgo5W*+j4rxj&yye{b6-_0#yyzRP~rYrpT}zq_RV&^~No-T$n-xc-^({Fi?} zXMLZxeUECt>xL)xTTbikUh`cdtMtdi_UDBeXUg||o_p_g++xo(Y#Cy45frk1Yw5>$XoiS)Tz)K-K*V!|Z-A?1;ltPpkL&xs zZTTYUf;*820Q z{}d?y{+8M*t-Dnx;beWn;$pjR8~Z`FEuUXkCC8qIJz%O&%12hcTv{E!yzKI(jB8+> z@q1^aufO`@`l^F#uol>$0Ve(bEB=02ZvVD+?k!LUyK>FU)RXlI$7PCr-oG)eI>;S2 z!M$;261G$%;QjB!!*=<5zoWKRf8Tw7?(tck)BdoV{rda3@|)D#t=Ho!MP_JXi;A~< zKA-#fRD70bSj58bU$e?D`Zuq22ko#h3l8b^Do&k77%4iqrpG+%!FY zJ$OV%F8f);r{8&dzh1kyGyCgf`TrLE%-FSo3jO_moI;v`p%HK)F z)_%Q;ThAx=Pp_}9TWYVqROjdS@~ffYU(dxXuKfJ$X<&jHwi+y~`sDr(hq$ZLSKOTz zoj3D%)-2Wk|Ni})|9@xsGpV(Ezul@7nc<1edEhp5-Oq!$`~QC1TYTl6@cbVxa;qb! z{rPSE@rZDB`nrpJ)^9+`3`-Y^=cdK9KNIio-`Kk~GV$d8{mlF}8_J(a{YtNe>-<&+5SII^*=}W zZhUh8{*HfV{=WbJcmHP%gUQ$`Jn8R~>R+$j{_R}I?yF(Zy8Ty0y#F!n|9$s;)$_F% zFL~?Fb#I)RiY@oOj{pDb&!?+11D9?|)3`^D5Kc z?=iLdaDe%PF}Ywk{-|L2L#rI5`}^bMl6 zecogG>xsJkOmKitSFLxn1kW#nhs@fXw>_U%{cO7Y?!sE{ukzcUPK$oFaoIH0`bKAd z+b!kyq(GxBpg@~e6ZuJe3Urcqt?17*i&YbVPuK%St@7mm`TsxHe>)ew8`M|tH!aoq$qjCkf4=tOR`&X{iw~UH^yKuU z{R?1|*K1FhA6A_GM72JpUAFAS+?{2hj+jT?|&yi12NY1 zZ}WCMY_qv`Wb+gK4cDTwzn+U;T>XBpId(U{@y)LauKU^f_4EAyd&>Xavi&^s{F!85 z8}ENitJm+VdcVi?*Z%+C|Km!6mnYd5J(=kKP0sl(xKDM>1zZ^RCxU`3zVhkR>gQ_( zH#DfmPy53PTULN&ui1%Y-zks#?e~>G11EA&6Lao%H_(_p4%gQG zemgzB?p4m-uh;e-58d5g`zE<^O(!Tt#?*&_hKh0I1hHvvucu zMYiWmn*QhQ_WS$F?@7hh{d|g}jL5G%9sTLF|I%)~T`Rud$lCtv)#~RP)tXf68$sm* z$hWia|4EZ$O}l0>?GLEPvHtyLv#H}&HI(wSeX?S9>Ph*Vx!dnn^{+2;m#a*X%YI|` zp^^W~IZIF+%a>em1R2I1|HBDt7-&AFk|$yX+9+V?b@mkhknKd(!{fS+f4iVtHImz_Jeq*`H6( zzqC5;>#BRn>+im}zP-0u_5VM!yCs)FjjMM%pW|vGnQ43fJF%!+?^gAnt z_bJZ)qFR66_WPT;JKf}e9AN*1qmfkoI{N;s_+@|KxSjQR2VDj!Q2Y_@*C$GG2A!u#Lf*qV<=_ZnY+q5toZKFGz# z3fZGSNk*YfhB%vbd;c@v|MTp<&jGuiPK(xqXzu;T>OF5~*!Fu>*>db@eb{W>yyHp# zo2}R5_C8-!wzV!~(YNyp{C`cd-}I+nu7*_jYE7{QfMFAcU=DZ+6=IUw_?~#UO2}(_@VKw_e!$-1hy;+PjLg z+f@I925IN+cH8}STeKW24ma<4(!b|zZvJaq^{ux{ug89l;GU|w|2=5g!uva>RTsPC zrnooG#5y{+M*pXB_S&ss?<-f{eIb0m9b7B!yEno2@TB^ypiW8bYU_79lyP+IjJ)fv z->v)ocJK348Q!Cl{vQA>Hu-ukZZW9h$JGIi{Ph3w?|a|(-75|TEyywH-x{#z_ucpJ zYS${xo~ByA_v^LSbN9Nn%T;B_v8G`i@tXFB)eO|*`W(D_^Lab#{;w+{Kgr*4WS6~S zUEK>BTCw3eBZ94<(fRrP`;6l@%f8E8%}cDlzWd(Q-%`$?CP-xZ+^g2ry<*Wh8}aG= z*k8W~q_@}f+KFUat!aPQ%RZkq|9&oRHMsC_e!OnSqpo-C_CF5BOpmXtoT@ezTN0c0 zXJ792yW76oTz#i3|I;F8qIpwke%h2x`P{4nhvi!@u)wk}J->)$on6&_47H!Rp&$B&ilG1{pm)ziHG^^_k2Guw<$B~)BlI^{}z`2i@IBK z*|$do$$ogH=3#X{~)Lz_}VsptMuRSAMU3o{eR$QFF*JB`m5@8 zA3JUMdT&*|UR%BX|An<%uSHe%bYhvM<(aAae<7&y+WUM>nf>3F{+}cGyH)=$w0&-Q z{-tf36llJ?Qbfx1m%-VZzwO|)I>|mf*rS|3H%5NO*W;j}%gQzFPr-wP^DR>@Z+r57 z_xpW&j|Z&2oxk6<-?@2OWZKMo+4d#iq?eq8Z9FsWh;yVpo#Tof!IgUru{Kmy>?sF`<<)rrcRHYc3kUIU-_QJ z-_J{J%8dTx4oZUe9+8Hwfgm9@n;K@6j0Y-?=$D^m%Ox}8e$)mp7y6b{ch>? zx3=wDe}9_3f6nnLk7%k(X7PyV8L`{#H5n2^BPp=m=-BYQu?2bj9T>TZ$ zN*ec>8rTL)B0tIREc|P$7P%Us{F*Be6qd*v^?-$@~W$oy!CG0t$aSWQf1j0Q1@x) zb))$EQg&HDYG&sFEM-_H9qW%-kh|K>aMSuXj0o&V+*{h$B;z5jo& z+Q;;B{O9HO-R*x}y!Se4vG|?`td&zbv5jfXRIPtlaT&DqEib$L03-X8jn5)J$?pTL z4gJ0&>+6lyx(`8f17vz*54QI_NS81 z?mhPNI&UPm_rK@%|9^re@!suzKhHhMtLCxv{TF-ZZQb=^QTHbcth0ee-v17Lp8LLL z`S(A4i&IY4r?~CksURPH)~$Zt47>Q5d%J7(wYTKV1TS{7*h{xSBcQP84;9d&Zw`+rZb z|2h4Cxt#YuW#cND|4;Y-nSSr_>YJYGbEh0nYU-_Xuie+SRqAiY$t~E+l>Hw-6N7u7 zSIYc7c#vKG&hP)ck)VcXpu6nV>KfDNl8dg8hSE`%U7t>ASFev>3!ZDlKCgXdl6_Bd zpXIXeG1uQc)vy0~CsOBU{~gfEn7=>g|F3*+cr6DsX`XD@=w`1rd423!xBF|2eX%v$ zH$UM|-;(nDbL?)=qL{m&hQsgu{E^R z7WS(A;hnb+xPq3JJ7PV_l4KqMc@AwCfCck@#V7F-{RIs?R}G~ zfBLbH4fZHJZ1wZWU_w#68dM&5O@lRvz@w>7Kur2XG7m#h0rWqS9mZa8W1E8s{?ZPvF&{u+gT z6H)A;d)@FC!=9RV(QB`&);|edUlscPW8dP7+dGWo9{)4@3!Y1?`+fWVvyID|um@^Y z-2Y$Kzt8(GmR)&r|CdYN)&1o$z56yde6;v}r?`5(`&&@uWOMBb_GA}-{nKgv`_;Tr ztDmaYKWSj*dtv*m@at3kddvRf2mbxN|9>xNfpyh-*R!C?_FcOD??}1aEbQ5P{p;27 zf3x13W~{w7{m=bB&&=;`p7(Xly~lFy{uPIWcYQu*t>1sb!EL`-@zv1qw|n^@lQHhI zwXm1?udP2EV6I;8?7em0lm0)K?f(Y9FYMoJe9l6-|AbQ+Xg%QU+-;HXcdoztBHqB) z8hbiyzNPU&aRis_C8O%qr3afrn}GWzVEcrVpV$^f2YUp@M0|Mu1-#f{B->N zlF5Fr_Uc`}qoen8``xnW?{CuWcYlxZaRLpwm!G%&{$ye$!}ZwmyGLpc+>^h4YWvIo zjo4H1$qh%vqQAt=&dden!~ELU(R-gK1w|De72REY-qyVTjQr07>~Eq0GhPW+<13$#A(-fQFDv#%!Ef9mwFS@gZeCkWJdDL*P2{$ykNt)t@cch>HH zw`=e7ETO8S+Hn()=iCqKUQuiy3kmrwHMJx}_3{+Ulbe{#v!XXg8N{{DHsexCb$ z&BgK?EnX~Wt~!5q321Su$?u1q*b}w->2uca?^O3LTN@ew$=z-LF}?ZIJ+r^eRc@Cl z@+d#dYyRZp?6T9k+t1Vn{S}`7sVO+sOOh6YzS5>bc7_z5mtE zvahduzjNl5xZk&~LuMKjO;W$;8-s?r@+P{9y|n%3`Fg`)zSED*aWo>ISb|y)>o1#t zhH0m+uStFXbLNwOzt;b+ddK|2ZU3!}tmXG>%gaw`E-wKu$L%i-0xb>soM?h=Lej|l zU&^iW`?b~UuWkViz#Rlo^(5H zcJ8N#CYdtc{|b-GUax&_egC3J=~>h3PvqxzBJYd?t(Bb+`APkH{qOw$Yrj|dI#2ua z`}fWBb=UTS28nI`FJIAGz2*{V|H+$k7LR-0U5m~?+ip>|*#Ct6qYh=YZ^56uqd+S+ z5=}wVKg-tFe!Hpu|Ao&Ml(mgDi={!~3~G#2t-o%Pa`HWB{n2h-vl|P($M|N0Iw;#8 z_gR;{uRLG<%ueB5yZxU;yC;r+-j(lv4Vtrm2bz?>!(La$>r~MJ}(oGDRBJ0HotcD?k|_TcY{`Uy*thT8Uuze?mk~z{qDu#|Da_v zrSpH^dAuJt&X9 zkUnMFpO*%|rhKk>t7ht5H}m58&CiRM-^}>3TJ-&%&*%Oq8yaH?sH#72@Bhnv|8@2e zou5z7+5LW#w{q#USGMh0+d*qa|4W>a@kU)t?wodVzT9Gd&Dm}Df9Ia8uUNP1)vCO= zTd%*f?ax}z%x9qx|8Hgr+HQaoIzKO`ep!9^lj)o-IzRi*=!Aodb%X#WV{8qZYf^< z@dn}s1R2`PSccI!=UD5mBq2iB6#n0F7F1I^A>kgmR?auityS=(KM2`TcGAU-TpQ z{XCoh%-cy3GiBr-e(}BFdA9XBou8$(-*?}C3o1ds#j)%DDZ5ws{M{sPy_3@$+R-3>y_4~cMSFhWpHNQv>o292g z+u!bG$5?y+tN8mS-TpObE+%j7)@!f!nrY>KQ8ob04}PD0|Bu@IBsoQz@i~jdzwf@Uo9-{ojK0w)W!fLJ-p|3;b$?FI z|8nvVXjW(!cy;jge6e4v_y1bG^Su5K&<@1Ezpn2;x4gX(Gac5=22Iu4^YCBY{KS7c zXjNGC{onV@zh^%Lt)9L2`lgeA^{vahKudkzrN3Ff=hLYgt`%pHQ_7v{_j|7{irxSB zZT@kg%Ws#>&Uh+p;51RSkfz||q3k2T#lka(HuQmC;m{Waz&8DSwOizDL_~HN> z`}^shz6fbWw+VyTzS;TvcJBUghX`h4>#olK z|8su%>h=46-RWN@Q*=V{E@&^oJJ1SlejuiajD|Np=5yVuu!U43`u@_Dyp zi_aL|ZDg0b0qTC<1x8Io_`b6AS&*rVO z@yOXq@EI?>rNy9ic(cs&?^*H}2!fA&F_~3f%*K{+O~_m4E{kG=vH6a zQ~7yaWy5LE(J!-JxfQ?5xObLOVnHUz#I#C9kapfJk>~a?7$=r6{?h+7NyRg3;r;Vl z#cC3s*=Bw*_TK;Bz;6C7ea}ftGG9Hre}3cQ`3Jx2{$dyfqai^55HNo9pM7)cq?#Qs Rx;KDq_jL7hS?83{1OOkDbQb^s diff --git a/src/webui/service/static/topology_icons/optical-fgotn.png b/src/webui/service/static/topology_icons/optical-fgotn.png new file mode 100644 index 0000000000000000000000000000000000000000..bec347ac1351ff28e00445cafef6ce5d880bcca9 GIT binary patch literal 7939 zcmeAS@N?(olHy`uVBq!ia0y~yV6b6eV6fs~V_;yMy@=~40|NtNage(c!@6@aFBupZ zSkfJR9T^xl_H+M9WMyDr;4JWnEM{Qfn*+j(>{Ej8GBC)^@N{tuiEuw!$zCxfbXWbm zb=iCG?cOcsaQ*52^6=QZyLX8xlyI(e&?-k$s^n35|qByqs z28`c!6h07&XP9p&AL~B9sqDP-1i2N~d--;GTV8U$_N4Vi=NHG{@7mk!K4|TzQhmc^ zZy|K-0&|_YUBTBcY#H9k3G04IU32*_Eia(I$L_j-{n4a;dHt0eT&fSrz1Fn7AZ7GM zc)RX(_xCq>lYLgTireas=e?7SLmve@;WB5702x;yB`F(7wwyQ1H zPqni7p(6ibQ=!V^gD2G+{pT|3)oR{yZa=(uy)J*&O|_^iE+1ZUGh_>Iai2FqOzrQ9 zv{_2W7g(zaY%q=ge=%?;_pVbFB`2pfc+V?+-0$X^&}!NC<>=(NP1{v}#$T_Ou{zST z_WO+D@O|rTKYro+9xf+0E8_g_7M1yXn~WHoCh6~3mSgc&Y|AvY?zx*%7wP3q+q?b8 z;d(2XU+TQ^Kkb_KoZRjyT)I-z%C@>tg)__NFE#$MM6^eF_I!uv7X`(y-`)Rv*7of$ z1@+r$-k+v%8~Nw|`XRqAzdj*CSl#BO$45r%bh~2CHL|5I^7gdsYLQJ!fAXcS`qrgK z_xe74eAYH;zk}h=?e8+5yw@~7cYD&ZgH!$s=EzRJIB~Oh+n?AMmgQ1+Rn;$wY!&0( znE#;YP1&6PUo?4^_^Qj#HB6XxKBPiovCZ$^z{f}Wgck?=J$nD>v&X?MrAL@brn?_l zn1A5v3iE^NzYfkc;&n3Wo;v-a^ZBf%_L(+oHa`Bna}mD=>xE@JR;;;v+&Q}+u@>mu zFnZQ>K9Xs(j(}{n{KT@|xz{-sx2vX=)XxuY2oS8v{dD2+qsf^wMLOJ`cAI1#*}AyM zUryQA?Bue=wq=?>7Ui0BE(+T^BS`9}QEozYs3K42=5@Y*uBA-d`D4dL1C!8&8y)(V z9ZHGeiEb4;diz3*#^RI4^$AA0n}7YNvUu`1F?;o*_s0%R+$%F{mHezZf4Od2i*38c z#qsiU!nK%%_LA(1*)y{@DSdc0xtf*lS?`E%nx>3Ygd`R zuAcl?jVtqCz1yjl`-*S%%MtAP|&-)KgeDQUO@5|3@M;m+>ybuXWJ2-K( zRG;yB#}l#Il`H(r#6zo>1>5zw7Ux;)5(<0pY2izc`fQ~~ai5*Sqz4!S2M-yE61k|f_nSYt~Rl@EjE(yYmU;gK>OCi4((b%fi*u`d28!ZCOH1)-fGUMDYw1*fI1c)07)f#^`1 zUHhk>TXFv7fvYFX55(FyX71*xx#?$dW#)ye(}a9m_>c5)SPhPC|$n@5+j|NA6U9jwTub!daGqHJQjndNpD>sDJ^A?wi%qiWt zVXi%sUb?Mjp3I_(W8bxwZ7`}TXnh^JtAlgTOiSUTI=Y9R9=xw}&bejr=dDNk79RcS zD7#hWZkOnTTczTiPvTj{uTK}y3n*J8?J--@F z)4$%6t4s*;-!s20{OlozYdT(PlM^kz^7v>LC)#{eF|n3ElgYg|-_v)(@we~x|913u~ z6{!mp=2YH&7O;HQ+`FN<=Z<};Sy%BpJ?7IK%X23Gm+wef-@dTsU7pO>AH08$u2qaN zJJA)t^|brlwXpnA7iiI%Q)MB92pf>gH$bnF59XiCsDBd12Xq?zIoO12)~|-o!h3 z!j4?SRZra$uZJ{U-+uT)RR6N1rJG~-Z9A>^`P`P%f43ES&39lD6@K$l-~P1ty1$Ft zCz!qujxK8OU2rQi)JME`E904m^QyE zw2GQ+-(r>5>=G}nbbYboH@nA78aH>dRlfhjJZnSY3*T?<^{+}+>V3{Eu898Bq!zsA zej>m3Go`gNFD=l^yVPK3m*6mWLu8H*@6oAO^p9>neoyH1z8L*^Hx1)$w&zQ(I(*Gh zw_beL=EM!9*P64>v~4Xh=Q$NRrG(?)>Ae?fvt&-P1i6GX{7|yiQhe*W>S}j__pI)D zlao2`{@-}bdHsxwnY~qVJ*u;l)Vp?lC|vaG!sQj5KieZ@|Cn!l`om?T<{9^#e^p{p zoIA2N+c!_t5vy8v;9jNZ)>D5o_~LywzEPhTeXh$F?Gp)mg-YVtIo&0u=*38 zQN}Bs!7aD$tAWO{OxYF-7NOZaJJ^$%zs&2F*EY3!^GQ78>OPl+dzVCIPx^UyMHF-N z$LbAYIj+@5d|o7TxGq1Zq^f^tqT4BsR~8(%i(Vdy7h<|7`;Euyn?O6KY~QxK{JdM< zOI=jdOxtjL;%=#^r)e`Dzx2;@kX!fV^Q1*j*K9f0be&V{c;M+GWM_8J-_?e)0OPbMm7y0cqv#`mRS3@|i}( zYSA`xqjMLf>|JoKHQ*boO!%RP2HBc(UOn7Wa&W>%$#buqU%Z!hn5)$CtncT`E>#DA zu}wzmCQBD@G?7pDf7h^bt(aP_bl}7A!grGmGxUQWJ$kwQV5nSy_Pv*3HcyXhn;jEk zk+M1dByz*d9*@de6O-Ta&7NE3maV>NkecI=|0rV1JQXXad$+4!8vOd*ruC0gY`4hn zPQA))al!4Y{O38$pLA}90)KcZ&#pgnHyrbyw!_-n?oCgA$e&=HwCJ-7)`~25@w}yV zv7%{{VDp~B#U3Z8zp(hMdZsWqwp)4qKXcLO3sEBMi)-Y56d!cC!gL@wtjJ6=G4n+G z1osN-YRMnVe6}oMo!sQPWX9V=z9R1TH$C#5;^32^yTIby9W~L~>ZgeotN5B;@9A0R z{qw=qp9LjT4qu4j_5G8@RJMhA%h&rsGNr4#?rIi0)OYIixtHI`d2vcPQNxnq@Xw4D z#{3H(cJP1E>Uz8Rcf!i^OPD?i9yJv0eIl+|e&5H?@Th#qnJ$*-(wpiTCvWg4&yg>= zxKG4u@&)IuTi?#mUZ!f`ADw;vKG)}?5*((>516MPoVD%Eu180rgYSqLIlWE{IrHMd zV+~ccN@rozM_FTU`;aNY)&ey8>%+gW{ntp)S?mOka0 z<#%S0!EBG6zixM_iXB{Y_p3wj?Q;*kZDdqeZG8<@xxUCYrON9-`yngM|kVR*$I6zz8)!DyB6n#=m>7TAlSEULSp2r2fvqJ z&Hr^;R_?`}s{V+5sV@Gv7NzT0u3xj|`5G?MCHj4@yFTf^==&7(I@QjvS}9OM?DEEB zPn%egHk(Ps7cds_#21{JL~a$SNoQ^tVLw_oa&ZFU(3< z?ylYa>)T$po2MrBHBH@|>$<)!>2!SA7VSj0HD;GyPjY-`eqp}l>${qSeR@CkRo~6Z%Vj9sFZSB}ZlYe~*XrztZJaR= zcl91ID4!85KJP`>SM9|s3izJv>$rD?A>zd1uNQ?m)qXo|em+s|>^k-`bGG}xHR^bF z|Jc`Pku*a!|IfX^pXY?XJ-S+V?a$TMJu^=``L1f!^8U8TulU!YOW$hyS;|-{??@T{ zS$FUS`=dSoqkZQ7nqix`YQO8V&&zBt>bnK}idDS$!Rx+T^`!HbdKdMlde5qh@~F<7 zzd7Tz`d)FqJ*p;J@ujOmyJdYd8*5)3(cD`)ak1=DM}MxDr~l8l&HGeP{G{c6r=;EY z4SdSGUoh4!yZdt^bEWco5m90Jm!Vbu&!z;&t7nzoS@^~FwDun!B}1VV%NF-X`0ER{ z-J6p6;`|KmjRm#=vRo#|8Sl0(IKy^;>4tSj*xcng(FvF1JuYo|F1Iej`)9yX@0$Y2 zeo-=R0lDif6Qee0Dn3ZweCNOIY?}wB%ZsihDW=Y=&wqC5`O!;j>zl0JZ&xZ1|NC&R z%J#{B*iP)`E&N>nSJ$pW+qt&r$=pkQ+RJR*=Ol{Ge^S~p`S-F(G0%ke{+=M(z<8DC z%d)dYg{)5Zg0@fk!TkHpi3pay9ZPo>uy5J9`{ImE-ddKw+PGirsgj%ZNhgE5e{ry@ zsA8SAl>b`C+8at&-f$oJnGhfHZcZEb@8^r(U8!0jDt{=V=E9lSi7Q*DH5Kh!ZFV^E z;?Wytc1^BYYqe(cyFb%&mwx&itC`R8dhxQl6Gtoez6z)~`%I|zn8l>M@~XQG56Qg# zS#6m6rT(Q*`n}sTH$IUojk_>+1?R52&S#>2M8yBnu~$qzmi}!D!%p3!>zjgqeKiny zkYdvBAU@q`k4n0%jOhHu>%WQoiVnJXdk)XTES?>&CU2N3->~(CaKxv3&6>|`Uxd70 zp!_p;%j#{5mH&Hh5pQ3-*koE(4$m3Uc){y;IZF>k@2DuzJXLny_2aopjXOVuy2{qO z+`MRBX!tAGWM$uiwHj=(NrVbBnHWRlhr}`fQ5B zX3dlFCaS*+#Fk@f4v*%y`Wd#V4{f6>KvsKSx0__F1U z*Pj!0Y<2Elj{JO2@yjm3T}Q$fOxJB_Z0Imd>IQL@lg`y(0to?J^ejT<8kYDaw9(L#P?+e{BwAhXU*$gxlFJ$Os_#R&uaPasy9d1E%tv_DPcVQ zy5yA2TYtSM{JZSL^M%%bZL0kG1$Qf}b=Y2vODf;8{zloY_#B?k!r6R2uBj{d&#|$0 zu-;zY@l(E6RNG0ddph5bj;V@qNjJrVkA8~19O$__zV?l1jNM-;DIq(T?03^&tnzAH z`B7)bx+n`B+kzWs9`wc@4}m1YI)9+nKkRrv@VG9anoa79>uo0NYQ5_})$u%>acy9N{WT{m>VoF@RN-y?&O|tTmeMio=7zj0M@{~j`%3PeJ zE|91uzNBDdU|hoNB>&l*dZlb9OHK)1PDsB$Suy68%%lCE6;pJZCj1rJ$Ue!Ztm=#G z!~Byc&WJuR4!`7iZaMFF*YhVYzWDQLea8LI>}8wlcpt)uDHG z)vLcdt_XLU?0D0xhIePXzM%1;pdRm&cc(mcKlvcdV8aP3&vob52<9)p|F-3BtnVC6 zp*_!5UirB4@)F*S!7;ZkKR+~Yv&=W2vPs-0xKErAUcA8e$Bfnc4@JqEXO`D&Kj<=B z=i=huk;_+Zx8qy1SGJak-#5|bm0$h@A&zA8n1j|2G@_09Cs+lZQD%FpCih`+ORn1O zf}N|6KKmrXw3%nu>E(x}Sjk&OGVJR6&n(OSuYF;P(WhgV_sp!>e$a67@42nBFJ7-> zd$HL|+(GJ5Pw7OLi<|DZ7JQU=^H%(zsknm%f7|0XzYE0n^SYnoT3LTw`^M4><&x+2 zNozIV-+Z>H_-skh%|qJ{%>OG<^O1c@&5Cu#k56W32QrtXAFuMbrTk>qb&W%3{YzKW z^dC(AC8?dhJ$8+%f@F+H_pJ}S7fy>Gn6;VJ_W9Hc*9COf@Gbu&#C^9lBtp9A;fikd z`CDaT&TM{=G@n`iyUmXu7uWU47`L@D7DjsB@I2N#EzfI3+OA*WZw__EdWFfm?0;xf zm1}z;+-0$Grm~M=a6(_|b<53n*z6xZb2+nM{@sLUm)3jbs!yx1Qc7E3oF%36F?d1$ zEGN?ios(Z?-FUpVvDtwE;wQAAsGYQume;ws{mb$o8LdY_u zJX$1v%}eE@H)o!Gz*%VdmDiwr+d?yY+3=#t3)X73`UY!mby%A#elak(ttwPP&uw2n z_wswLwZ9B!+>*Lj{8i_b===q?Uu!>1W#_26?)}&DENfapM~UzoqZwMq^ivJ8H+xRs z>6|>lRA)s==h=?}J-I8U%y&B0XR7h*2Wv>XPlHvBBF|Fy1K&*!I6PRiSA!|)C)bxb zQoPJp->+NoGNa>O%j)>G3M{TEVf%RcUOBs%%v!?N^}40p>-wVWyz@SBOW9Q9+G*(M zXcX)7k+D^7>-WxZpIGuRncgLQiv-cdCdg9Hd z&Y-<%^9~1}Onj?!a_Rod5B^UUE=!N_ve|Qp{mxVgiKy&?shfU2o$#Eawerxy0(=h@6S@d3R%fF?Cy)i7cQu5 z=}Oz_H$64E>h0Ql%OySzBnmep~18!c3Vzzh3 zlj(xWR(;d92uNrtCG4HW&s$DDyi5q_NRzEZ|dZugQx<=vCfma5BhhG#PY0&tlA$c>|?V!v@8J^|yS{p4) zvhR8IMi&K_z16w!naMf&$vU2hh)?(0Hl~eis61Ncd)OKl~8=UU9GW< zINzj$y&>(cTP|^4zr^WX`s2Pq#P0924&KeRZa1%v*c28rds5o=SIQ?kGPV_n1+!>$ z95l#V)xXS0X|{{u^H)xr)jlLFlY8T1EcxeM+98In-hYRc=Cc)FvY9bO|KNn3qP9!b zFRtP`c=fUVLg#&bN%I#hT`9NcYS*)xrma&9N(J==PD!gvo~in?J7Dt*2iYo(bDyo1@;iUoMr^&1V#jQrSe;hSCFWlqKCyhc{y^MD@wrO_Gfw^ETDwvE zVriDr$4viQYfc7D+b7y={nBOkcUzrUW`Ws8*=4-inVgjg{?Q-)UcO~t{lbucW94g) zja85DvtGM#uC8*PVdXg^=S$tur6-G>d|!S}I$5FC!+lN`Tl02qwp^64$nu=@ zvl%B1f|!ildR3=?ZqY2-Qt~o!snDT!>M^qqnI$e#xqpwL?2}JSz|~n6%vDc?wz!^i zJ{|tgz{Pjb6SvvU3lFUmRcT(FmuFzyH#be(Gy6n=d-jVDx3*n)m?A9Lr^%fIK% z*m$cq)J;Av8&o~bbRXjcmcLw%Dutixb*JqqoUWSvvNq}8FLxQ&*K+04_I)eK|M)Ag z?N9iz(xrNz43C_TP4KGvC_6hVafWL3NlEW_pVFf51#LFX4xB&n?uSX!OWOTi)EBNh zC;Y#m@6nvxj-z`eFQxLPW`*zBZPVO3LAOq5?jM$jldX{w2J3&=HP2kAcU|_}yRa>~ ziv07k?tXo6>)M0g^RB;>_{Y#>lQ+5NPdWReM@xJiuRgoqB_1z-EPT&D_k}fQ=dV3_ z`hV7mh)?^E^&Fk7GqZboRPnU8o1U*dv2||b#iX;*CnJl)zs@wC#`om})6sA1)w^Cf z`**HwpC_{W(7d*fUy6ggPo%E6%^jg9nYH2e(yhPUH?bXPP%GK7cWO+SY|Z~u$Nb;b zemRx?{a(26lfvEAGxxulut!b*g;|+l>nz^?on7Yd#k;rnmCURCtGTh{vPk;LU#U(f zkG+k#xccI@KT}hu{SnPdepj^Duy*23ce~TKBrRLB|DOuJAXC3>yYZ{my2V)=Zz#`J z%i^<7%;c?AS^Ks7_x3IIb>EF=1@!N(+je?&e9?mBw|5o%U+UhsIu`iz+MR#g`Fn4c zC%$$}zj{q4{pjqk=Pq5ZZfTmj@mUM!KUsJ8JMZUxQ*LiR{-53IRMCfc<{EJZ1_lOC LS3j3^P6{#ZPjfq{XsILO_JVcj{ImkbOH zEa{HEjtmSN`?>!lvNA9*a29w(7BevL%>iLX_9?-485l%1c)B=-M7SS~WG|Q#I!UZF zJml@&pgULFZWolEmeY}W!xZg(P-+8X`eK>23EtC0)wI;Lk5624QRDQa)TarmwbQ&@>RO5Rdg@avs_Vh(okdmk@l&DscJu-G%5UTpYkY->4flUUwaSb@^^Md z|C*zMz3aF2E3NNXw2^;%-rZ&5GRYb5t<`l*?T0c zKliuh=ql4T{#W%9akCra|3r0Z-E%p2@MqF-2W_8;7tFU!iTmiIq+Q)Q?NRaU7f*J2 zr>sA*>}K4_WaZlA(;6C!tKO)Foy&QlXl?1X>_t3V`jrc}-Oo>$9A$21WVL)EpZx!Z zxJapdFJqg{)8f@$#4~M@`rW5}Df@`~M{X753zO^&)u>IY4@p(Mo=Dr*QshI1&eu_Vz%nor}|7qIZ#a9rqHcDjq^s`P(ws!G~->r+DVm*SS;Gp;{-!mGZCf z%sZXN@Vk$7H!}t|U0i;>WRi1%oNcjygAclG^DYEzgN@ zxmGu{H_fQ~owdMV>7H3{xAfO0pYHpcbjiZZpu+BT-v7qOi=|n8EGp*tuaYqMaZuGu z_ol);b%_`iJ1OV$_mZcvdwc&rI{l)}`;cEhF0W2HwBz8%r`8dxxsEtwPO4jUZ;2bb zK98KScCPn-E#Ktxp8uD4eM;QuSaPg+?YzS38wOWIHkp61oppcT^XoHv_o|%FT5|l^ zsrlwLn>Ix$oUXg#8LMEUvF4EFl8%@|UMFUrJsr31_wKbZtBZK`ezoq6`Yo;A`fhq# z=6ALqGhEl$bN?}FWz{r({mz1QX94gY_F=d9~NHtlv=ZHgSh{<)q+3QE{VJA@J&m0t;dQ52ew3g>eCkIzO7kw zWIcO(g3NoFewV%BCTcdqJPEstIZiD-{(R-JKB<<62UJ_xmDWl4o4EJzKRm$l?t{F# zPGzL z_+Wm2Qfb080rzja_#TERJT7c8os)Ot#HOPSOBen;l>Eyuuy%^n{u$E!I}Mh6-;}3V z`RJ~#C0~L)hrZ~H7|&O0*t}NUa9izVePY(;W6LWZd^Qu@Q(?zyR36|iV&vFe!s?j zZgt-zo1IQ7sXwNQO)UuObClS5tV#5x$Wc$3J9^u?4|HB`UmU(Pw39Wy?DBK@%St@) zeJ;;VbbfwhkjU|))x+jy4r5S~aq1xf=1buPm9PFBZ|YE;A(lAt-=;_5TfTCCy?v+p zgu^irv+r_Zi*~9%T=TVdj%eRcPH&FQM;Arp%=4FuWY0LE77#DTpeuAOEVoi++rIbd zE2rMh-kMvjxpv+3IfuJi<~d!}k?gdbCAjbRlbWw_4=2B8Rkh}~n|=3Fm`l;sa{)7# zeRGMqS?0CsjmT0lBSD>a=dG)*8vlGJ|7>->jam3t>%&)mx1Tw-&Qh>d`WP2u@&?Wm z^B)UEHDv^3XnX%OKm2-T^K7lB%gq>XcE=vJ&<}lj&sFCz6UVZ$qPw}(t?RCt1^gCg z32Lmn<<1lQo;S{L>F+zrr(M=I-$)fQnRuH`Tu7{xS6`I-lM}neyH;h(XI37)M|qa` zC@O576<6lJ#DULrB5SVlyPQacE{@!N7hWW_X}n!<%DF(cDBu;_&KFh(Bhue4zbrq` zYIEP8iKjNqJ5;&NGx|3I*2n8vWrJlCr$co#+s9wtV z_kGOOibKydBs=u|5A{9h36b2oC}+#_S!OERXC|FC>n_x{aJw8lX9wr$**zapI_4f- zd)Mjw&6fUIpL;lyPhT?D(Vw!jZq9c5J_8%kFgfWQuM-lE8f#OOPn(3>a38AF{c^-+ zt7f95x^}5-waY$UKZ6T; zaQ-|Sa_^3F*1faRr-NN0b*tFFE9vA4GauAcQ{*;cyVCjp$FE%{TBNqUc{=090j1SR zhi5S*CcEgG6=Ym+n5JQ}cvhLb+>-tCe|G2t369Jc^S=F^0{_J|I^y1O&7Nc(+ZtcrGR9bF` zu^qBmDs@`s>Y8~B9n2@sw6r|Fd9GGi`-JJD88$yZ*!;Zk?9#xMzCx9-Q6_1iL&zu!L8GI4LS z_KPhhZcze{oU?h}&faj6=eY(r;J%z93991UWeY%Bn`kMD@{TFN!_dO_4DtKa)6V-D0 zT8@aYU{&6oUAptmKHU92|I+L_sl;2sYCQ1)2X@I!zpiqm=;dU6o+rm1NWHd<;<_|P zQ_#%4fc13riPCs}@redf2c}%{P2I9fwdLI+k%lv}JJz)w{4-VfxTovHpt~pK%`OCe z3oEeTF#o{(jN7B5Rpma<7Kb12g1Zmz%(}Jd|HLJ$a`l&AEvcJ*^^4fiDY=SAOf+RY zJs-Mc`2EOyo$>2#_p10#nVS+ux2@)1G<$n%^C{T10~S}XgyXI%X= z=gidLMY;j!t_COcO<$~4zHP0}?|PmTIh#G2(QBU>y;m^)Y{(_=ZaQmIGV7!-ehV}f zeL4T*nDnm9^`~T?-Pewpxv;!y*2_5$&(7HvoZ)}-9#Q*(ibe3%*c7N%X*KYo$p;QcGq`F*8dOv zFA7Qj+w=PF{KwaQr~TF7Dl*^Bo4o7TGuNVZY=y=YknPf>(e#+U(eo< z#}fN`r(I^u?Pr&|T+SV=s8Lw#A7i<*O{~+m(K2l6#Vn!yH}?Ew>j}(h<1VdrXE<6V zxzlw@=OWqJDN^x9a>cyoPJi!?mR{kpHlW*0_|L`@mv3ZCTFd-kU7U9K4W zx@9jWm3ggyy@)4He`AvM@~X8@`p$Cvz7YSNljofBc@L9jOZOy3-A|_%JkL>f>%R9p zslsRqlT?ymlF+pSo>lJR-IohFPb$|>i<7Xv6RJAfV z!zV%uxCLk4Rrqyb$8p!(zB?=(QtnytUfl}X6QZ0iNoWR7m@y$EL1vxzwzpN&xf`9A ztiE?_W?{R>q6xSCXD3QV*NPbLp31o>{?o&!`Y(RGNH`#Qm8Gql--F@$2)SI@(J(>$RTZ}lRH?e3e7vhR?>CR5zWRp842iL0*;lj%i6ep%Q!8T1=DMdIjB> zGL_c5i9XAGA?Fak{mXvm>+AYG1Lxlpj@|W1b#3;`rH7B7=W-GDXz1inoIH~*xxv2dB8dXTM0XSY50!$v?e8 zl2v8%D~rwN9;*q6*IrJRa8GXBSgn{9=OKOfRroZu3qt2CFRVNiE0dSU9yzID_V&Ea z+c73if-V;x&U-4Q?dPLauuSJ+Pn%q!ah~`L)g?WAqIY^7>e~Ef8)tEDG$;xF)!*x| zX>~;SZ~j-RRlg3US{=!{bg%kE%9|}cdTY0JC)b#}ish=P`z8F_VG&Z}u;u6F*@-6i ztz9J)OC)_9=O3NBO|n^7aR%d}%glRO4=AMT_FjIicX@``VaczyEvtM5M9;4Cof5X! zx#XJ38oi9i`j1^w5*@_;Ue%p9Z+rhD@i}3g(Kgjfr^Psbik!r{_{GVf*kdQVuRNc$ zhv`Dx!ll#aB)`=6IUIIO{Zv-R_sdB)ZoFtQvCwD``1e@+eZl>8*RB-y-O<`rzH!cq z2ayNg@2S{htNz16HazW$^0Fx#dA>U-U3LmC>2qIn!2GZ2lY{Sse7LR}EBNvJ^)q?B z?C6G{g;6ds(HJi~GyHqy@r{^dDTl(O zw#!UfmsV-)+cMXL_wJ%6e(SG3Kk-oJR{YmF3tJK&{@iNaGSQ>p!wX442ea>gdzp{l zcah%OJYU*^D{a$%*GnxNFAv&@e}4Grf$o7NhPe+`+%o+2L4w0^{bA!N26}%kp4@G0 zLWu-8fm?GkBNh7WTV0Ka}z8@qXTJC))dLecSrZ6>Zi_%I<}5 z%;z&QThXs--pPBnd&~)TW87GyL+D=aQ$# z^}w0V*J56`8ZXYZ=I?mJ^WaR})6{P!LHT}%*bja9BbwQysbU~>_=SJ7=i^F;9EJ|w zT?HY|iy`DBy^xyl$ zoGfuHIAVXWPRX3p$}x?HY7ffh%3C-czP?ZN{xW92IcLrn^;R43ILC|1tnHF6yJR45 zzaag-;j|jTyE`&}`Xwqg?5@6M)%2{$xdOkT7aX>!z58x?RO6gKJy+nY5%b&tte&d4*fRMIa7U)eAp|{?XWyC zByVA5igL7WmP3xgMD5N!vvMD83lHBpb<>U|`m1g`>RY*Wt8fTjoO5l8vp2 ztZpp0tLwiyZLXdu=k_f-wtR58JLlzt6dwDn8dJ2(zivJLT=TwRN#|Ds3lIM1rr*w8 z%9V(idG}A4M_BNDt;PGSEw&|j&$QY6(01vvhha`X{k+z_TD0VpUr?Iq#OMmco$ar> zeV%YnP&!qndCND|`Fqljm*$Ii{tA?>EZTSJ?ty1dCKwrfSifst^8bCiGVj+vGvobZ z(`vYI(ev*YmqtH3HZ}L0*V-FZrrEXjspjvOO?joqb!qD)?+8K5qC&3k9BoJ3W>|WQ z$32z|yL)u5h|2FHcM_foZ*HznOn7?m;Hzo>o?Pv%{k>W0*SWNH?``+)TKT^xJm%+w zV2^{L?{8d^_V_Y2$GY)k=$1*!D!0~5bd`Q#`o6@u+jPb6?d#e9Y;U~N&peyG@1Oh4 zXSMn-qJG=n$a_?D>z-GK#+@mfjFwt?FO7|#9<}$gaki`9YyJ2Su_q3@=WjoD;MA-C ajD@pr=-oR#S&4yxfx*+&&t;ucLK6V55J#H; literal 0 HcmV?d00001 diff --git a/src/webui/service/static/topology_icons/optical-ont.png b/src/webui/service/static/topology_icons/optical-ont.png new file mode 100644 index 0000000000000000000000000000000000000000..7b9aced95ed1790ca3134f4c9287298a1185a360 GIT binary patch literal 2213 zcmeAS@N?(olHy`uVBq!ia0y~yU@%}{U{K;AF(?H&F9!|CX8OWc18Lnx~7cayzAdc3$!M zd&b4j=Ik`S|L|Y2MwjjXkePdSKXvb$?3N$D`u!fRo2hRXJpAjkMdQ=`;sA-GXQQHT zKXUwk;!Nqz-4aWj-W6rmyv{C~{`k!EMfXqK-D!X1MfC!co$^w4FD4`uZ`yF<+`&x& z$Cst7cC3r(YW*grw0=$T>^reNPt|N<)+mM7_@z&GNd4G&>($|F+&iBpuHfl==;k>g zP>88EQ{sx)eVy409yaSg3oZ3YpX&NxZCf0N|GmZsFLW~`E9B2SVy&=T&nOh+Z+Gq0 zt%p-g%#_NLop;^*z4J~lZ~xp0MT>TN>a+Y?VzT>H@R@nfpC-1s3SZgt|JY*Pua2{B zGQ0NXA6|JP)ofa0wBPMHp=!NFTWWpxUcY&7@w9eBBYl&FFG}p+oGEsynPhOPaM6#F zw9F3+)|qfvu2?GBtZh@sUUOL9@$XN~kVlO_{;D=y{4;0If~5u$JkotvN>^@adN1#- zWEZ9*9-rNIRbqi~#23+HO{L}m9qQ`k^E*U&-`u*oe_}ar`E>U64EJqVLKhaU$eW*_ zd-UqbZH5*>mzlL@AF&IcdiA@am&M&DmJ7_bzEzREy6~g@lE|p|M6UR&VfntgWwpyh zor>n@_U(S!@@V3tFE3W+J&101$mh5F)87{O7f}Yuj6y%y7v^?%UJ}O|8c~G7`_7)BMr6OsLpfGUuND5(EBe z%?Ay3I)*;GJ1tuFQ*X(zm`-uNH^b#Z<4k=8*jUOZ?NkoQSoIRwX;7rS}v>ge>=tY&I&itf0BCcWZAd*o0GY= zWv`ZqzCGp8{xeU*$`To0HZ|?aE8wp^5M;{vadO&{@{}1|z3WsC^e9zECN;E7`ERmY z&g{LgLf}J&cdD^-_WYiqd-O%v+N-}O9eQ*2(t~anyJBst0}YusE7BO2Z0wJ$op3DQ z`N7?I$J4APcgmSx6>OE7EuXctbEj2xiA)dFjaBwNrGSEZlm$ z%4&g0Roctqun3#=r=7F=zm_zd(0dtYx;Tz^UvUEKAAgx2KQCUJANcXlHH!&xZ|+S_ z`!+k&ZThE-*zmOA`(NX$_3j->5}Yw1KGJwiC$r_$gL~Db^eg(TtKW;yl|5p8(;#TN zqJeoli&AR)>hsQVt`dBqrB2Hh%Zj*Nmk&()xXX04Nu_#7+IsiQFS{?Dv2#rQ^v=(G zXMJHq`Cg6H@q#OluAP7J<*_#hROK05U5;|{5!uT;*!&$nY~kgMckb;dE2$_o{y87D@6sBX7)TAY zW#P^xvs?V8-Y7|QmbdzD)@OUk(`3_%jr=BjoN<yI`poR{mY3DCw7QwDof5(K56&t z%c0h1kKXCk#Cc}eac=y|WTW}_q`@1T5~*!(^cyRtxTLZMUfR^M_u5;XlAts5o`3z8 zP_^ph+}sr}_f31r@%V0Il92teS9;$Z<=hq~tDE^B*X>pQKX(uF#FM+%sB$W*^C@tC z=d0&P`eLj0WXtAat~t+_FZkgzrIo|d^KFfX?ww4lug))W9^Pd9;Td-Kj^E=&oqvzN z-Q26DGV@+ScOR=u{>SH;&71#6Zgu@5xa^F4dbrD`3l(*KttM+WR{nR}y=3uep*sD@ z)Wj)eVofua-q4)F?sl;K`pgSAbjxP>OsJo{x#Tg&o@(wt)6)+0uDotrA{=+&y2GQ! zOFLbIclWpN*sFS~bm^rhD-Ig$JgCq$yGSSZelYjHbpB-D3m+IKpOgELRPi^i`KGIO z!m)|1GRK3{uP=UFv8u%C*)vDWZLA%)50@9ry2Wzy$baq$Y^Tp%I_fI$%lw)77uDBq w-|q4I;BF*)@a6mE(=IKo6D^tf_#f+;9zH2)^;35l7#J8lUHx3vIVCg!06#QDQUCw| literal 0 HcmV?d00001 diff --git a/src/webui/service/static/topology_icons/packet-pop.png b/src/webui/service/static/topology_icons/packet-pop.png new file mode 100644 index 0000000000000000000000000000000000000000..e891a083e55f8ac7fcbd35bc5263de7e8894e1b3 GIT binary patch literal 7194 zcmeAS@N?(olHy`uVBq!ia0y~yV9;k^VDRN&V_;x(l;O>0U|?V@4sv&5Sa(k5B?AKk zOS+@4BLl<6e(pbstPBhcoCO|{#S9F5b3mApeM<0M1_tT%o-U3d5$Pn{-f zz2mJ~p;>47WZ$IY+3IE2ZXVfvqrq{-1>dPGOjl0Wmm*!bke(&c0J2RK-^?Xb#KmYx1{*syH=WXBqTW%w)R_vP7^kcEPYS@~B>1|HId(*&-8ncgfjSxyZdrwY4wFv-a@o-h6c3P41xTx&oeaYQG=o z9BlLab3i2YMomhb!?GKO=ZqIVXP;R!Yj*p~)7CFSPqU_c+82HA^D@7W>m}=7EB;@i zl{w3Q(}k+0Y9^fp+J7?Uy-QcybZpO>nqW*ay1Ji_besD;LH<)I8>*&FDAJo&n;}=3;dR1HBY4(> zW$E0d#+#cTBprTte&3Q)$Id>Hjnzm=Vy!SVRNmdd^Xp|qezVY?#`-7h*WX*XY-4=6 zq)Tf-HNRX3ug|uBnTM9%m2MI`)v7t8D?GR1<#f)?F*~c8+pJv#yYHkoTQFYS{Qrff zBA0aKvV1PqC1KoZ?>4&Dnrj>`(p>{^LJYW?$5N9Y5<&dVS5mS2yPOiP&hrbKS)!I+^cQ9NHkL7kA8E z-v8B^b>ThrQYQ|%?mvAc?sNL%{r&32QxB(lM4w1*j+wR7Aa+)hwR-!ZitQD%4op~hr7rcIjCJWEr_bCQ6Tdkf;nz7dtuShi zcUK3~jlC|jp3PO6sm&8{ID1pNQtg|mI~;Q)5`rE}t-cWaWRuz~-u3=P{fiS9$CS@H zbLeV{P#;&kmL8k^n{6udlZwN-I5^HmIO_KnnddwZuKBvOCus{lX#( zcZOWw*8E0rtNQtRWwZIee{t#htrXgxdw@;svDlN;imA1qUUPCEbz62|%B*?3n`9z_o{BRVp`t@A!W|r zi47i1Ngpg3y-wNftbH!G_IHYW>o>cDdKUgUFZSn%ExFiu-aq*Ex8^M>*`ep#t~}ay z|Jc-06PDB~cS{4sOSI1}+spOQ&W+`?m5sgAO$DW}WnXr!vsbzibm+5G=CpV#rV zC#2(~?(!E@PoDK6vh-V5d{+1MINq}>mv4A;HRV&hAG7iK0Fk-(f2h}dY53$~{ndV! zW%jLOD@!g}O#Ro`y7}9(onGZfE=5M?JX>(l|6$43El(Eeb_X4ua=~YRNX$!@LTTMl zqeI_gg>5HgKKdi_@2`edYnFGb*_ktu>3Pd~e%>wM$V|C8T_V!5{@j;?z6|GfGBDj~ zQ2gAqS#|k`r9#e4&Kc`W?uDu=G#W15&oF1RT9wMhitj3$0$#40shu3wAO2E1pGzxN zRdUz1F44REaWX^q+20!MrPHhyi_J>8B%?5)r*O^`InVwR!l4~9cY0iuo-3@dd|x7e zK&pV@{0F~98_yYTG1%9$D6s1#m-I>=@vq0$S|3a9^qafe#hSmyG4<>0jNA9lzdU!h z`pN3Ze|OnGFwg0@ZT4QgCu*C_f|YTdkE*{l>n@r9LeBqk&16l3t!-V`a$dRFsl9)p zRCt)pwjt5Ts=D=K;G&uk#=2iZpL%2U_?|M~;OS9gbKJCYQ|9K1vf~H!U#y*ZkV!{w zH6!!4uF~5_mtEbEe_5T)cg7Bv+m#P2YBt6HP~i!f9d*fRa?PX{&6Qb&3eBp{?~{Fx ztj>5^wX47BIQyl|{u6{wO1bu{zmbW*Q}UkOWvi7D5&To(DErm?l`1G9xs_?A5JOi#6G zJr{Q^Sm)iMxjfF#=ncPJO6YIDst+F~o{;@xA8s{=ODZ%hx5-*Zo;GeOu3*!_y`yJNWaRJ;;|*T;O=R8Td2RmgT3tA0^_NG#S3Pvw)~{abeAwIbYV+rt2iEvL&Mp*v>)9zfS9-GcuDt(O z4n^nxS*g!>`uR+4Q{81;Z)1YhzrNoXXYlXKVGhgkc{{zTF9$8HZ}BO;?zC`k%aec&g+&Qx5Vw`zy_U&-5MQ*Oj8o~^lM?z3~R_V;e<{+k{w{Z)He z*4N^!t-m6bXCL&GjNEWuO@Vo7xlz_vizg9&UuyY<+8<8v{2a`A_Mr2UqkP`2dZ+Jr zKF@i$s4n{g-z-g?<^O6DqUJtXtv`K^z-QwLAJP{rdT=fHLhlXu<8El9Sy*Snfe zR{h`Fw7#g;ol0esR(kzbWBGCQ_xs(p`H$o_|2dP>GR1VmZQE5G+w)g0>4{K_)IT*P z;8OUqsZl&+?h`#rum0)2dlmd(+QCZT=zvcl&5YM=i7^W=f6L^=YP_i zhqtab<|h~MS++egxZ>EiqFqci+-u!L)<+5uZZve_RPlVa;Ixn9Bub& zYR+XU*4(zDb=m@%#QL+1nR6M0_s@OduisO$TVqDC+}BP`q1+P*XMX9Y+Z^ASsArHa zw#ru9AoO7T#Gk!i(pLv;N$4?t$M!5IeOax$-^99?Sy2mg)wXxkT)ehHzG|`Rb(`DY zWQEtxQc?+LkgB(rwoDQ%diz;^hlJx94NGH-vRB%L43~RnZ=Y!0ACP<6?9$@ukM94| zn|Y3YOV4;#a`NQfw71vZHUIqW>Tp87{>827mj72cL@w8ka9*mRe@^LdVO4UHn!Ep; zCSA zw^jP9KSvJE{M@En-N$t-Tu}31=2a1aaJ54df5+`J;o&qYo_|BhN`KD32~C%>Ui4pX z7FhJs**O04)@Xz2$HQaJ<=!ftaVfyMO)b2usgr->v9EC#clp`~Z(h@QZ-$V9mPz!C zJde!lE)hb?4}2 zW7}k|lX@z%V;DKJx4&6_yZpU$zv8aWQ`z=&PEOq@e&(>Qkh|plJBl+m`|eEHSMpS9 zZRGxF9-HM0BR3p-F5@BI6=hWamCcGd`t*?~xnGagXw0}_?rbd1o)VPv&THdEy$b=~ zJ}%73{x|#g%7+>qar31*PMtQWO1QP_zr|SxUwMWc$s-FRMI_?;&go5$5u9}UyZmBF z?%<;j`e%NN;|Z~`?O}cP=lLBDFRfh1+rg~?J`!_w_H->jAHOIjrqbkOL!-(=i)(Jv z=7`_kR(v}>Y1v|~v%<a&oaVtM=Z>CNIt8GZN}qjvMxd72h%5c74&D4JRz z<<4S%n)z3B#~LmdQTY?od!=_M=DurWvlJ^l-Fx%tYV+RX0(V(u3MOCJQpf-Fdf9=T z%10B{dNIr4?5CT_kMO0Q2q5u{GX1!O?fFP zDtjT?*LlmA)IOHEA$LQ!X)OKLw0xOJsfS?Kf;o2L%l{|&O0@SWKQ}s(*=SlnOHe21 zKEuiGu3d2+tM3{Z?cAiSy{+Qyu8yU=<+g#bJCBP@R^DW7Eb?voVy1~fclWr?P-cEs z$=>rc;LYKhC9gG;EwdEn9Z{XOefNUb-WR5>$>#A`yH{l1Z5JnHg9+>lZED{;y_3n4 z*KBIY*ENnmai;g8QO=T8H%@HRU)kLi*RlR(mG0iW#~QJot9?@(FE2dNv{hu@ZI5jS zD>?Rx6jrA*`9I-!oyK=EUgBlp{U`Qocprayzi7#;Z}I9OIR^eKUU+rIT-&0lw0>fZ zaJDPwcf%}KvzZ)6O$_9py6W?lZ#T=U4O*0bB{uf_QQhdj{D!Kp!xTJM)jPU}O95eHK+wm!@y8Iv!pUqBf;`PTZWx`a@6nujNbC|2lX@ zH!;iicK+3)x%*B3Sx?w8)2O^>Zpy+->5Hl@8vT;&U%gW+UTSvxjOS*F)5l|07R$X} zGTX>+;ok#i;=YR;AAeDHW{%ctpR4&qc*KW$p(u~eY7mvCu(Csn^&PbbWmHF?-GZ**OEVf!9@fmv4fU)%L{4u)Z? z&N{8kGz&jEZGnIP#()eP-8ox}b!5K?Cp7Gzb?aq)a@MN4a~h|WZ&6ywW{>_0eOZv0Eb_}!;PuHoB1<*{&^qE5|8a}E`&wr_~ZEOTi4vN3$o7nf5u2M^tI|54L$ zzWUj@UC}$D#m|PWnX~P*lcaRU$}4`xp)S$=rCpmPY}KE|^B%gMm+^Fq7XQa5eiIVs z%w`Yk5KnjAeL&~rDSh8rHujN0t|DHHC$9z>a5bn}u2%iO#Ooxd*TOqf)D|sxu;`FL z+@1lA zYYN`u@>)AtO!c~l+^nD=w`bmE3vUYfSFZVEoN?_?NanKmO-FwJxOz^dyhV(~q~hF$ zD(wSIpN%hnv{8?ZmJsErmDY2#^{!{DQWG4K5>-<^e zxhm@uvAuI_lDk!x?s6#V)qC{Hp{Ud85nrrVadS~Zi_fE%Lfke-mE+EI-we>P(EjPWPx7DNI9ZC3Jdhev)yZ~J*6)1neY0ZZ%d{JXzlIs zU#Ik&BKO|b!Jd$xuUArKXW8%H@pX%nhe6Ut{mt4no@F{11#NwO3{_31h zJp6QhdWhoDMS)SRb^pw&X8k+z*S4I+=HCyou8pep_LcLtz5PAMOWKSxmB%pu1JCr! zIs*D(Mw@G7rM9eRoqsq$W%s5~8|76G<4&DA^f@P1FF;<~CjZH#nSm*5l&1YNP;H2t z|Hl8ZK=J9YwacIRf4g|>uKNq)FMi_tc51OX+)?>pvD*Hlz`Eqqj8RdS9SeW*icF1k zubY*m7?DsjGho)s8!NdVu}W6AIlo}%)YlJdI$bK)R3yIg*u#$*W==t?Ggp-h`MUnO zeIP0CMbV|iy<2C0zxeOG|7FiFm#6RBtLU`Od}Gu72fvNQ^^{r8U;b8m{K$Ww>-qN< zxwK5Fov`rzvp=7%9Fo2>M|Bxn&PKJm((Eno7-P(`TUt!@Bk&jOc~Wfo^zt)7Ss_=9FB=(Gw<|u5$b@)E^a#iIg}* zeAsQPUSVidE*iU|v8&0W<-IF?0yyxSBS?%6?JD;Dik+UdzWb@X#s%zW3 zzjLG(_z1mYxX*qpBh1Y&iz!t>r(KRa@4(NcSGGJ1X58H%b+G*QzsFCwchqvTNj?-% zjq})S$dmY4#PIy(uPsfcB`-gjR<$@wTw-$;`8SVU^Uv+AueZOvagzJWrcC|`OXD7t zdlW78a48Z>auv&%ewi;O#Ei|~#(njcFS|Ye9lE5mWyQ>%Q+887Bwb-#t;?`xu1&co z$EoR>3E+i!=ulUHsh1U}kzXl7aEMC)^_DkgwhqdWm#y{z+ zdkeY_K3%nV)z;`t+j)0eFFn%O@cYiu{FkS`Mwfg(7i;OY>!sbEt-@TPrul--Z)Uui zdMMaNQaq#igK$Q2P3zyiEz^^yI&0XhowaM5yh!^ztN*;BS4ufU9(tdgAM^M_wvh4j z2q|8XN7KLExnaQ^xu;ZhiJRoaW|ystlGROfZYalD7%+TUZe9NV@-Nx;2Q#@8YYMW~ z&Ty&IfB5sS`0k1N^2rwbDbE+D+}QPe#)kufEvM%k;0ZkaVac%uotsR0l^_3~X?N~V zp058eLof z!LNdQ4|b$;JQGUh+S3|0EzMZS^~vPwD@L1~H0{_@PllYYh?*1j{mG%zp- zm~FP~@QG~`cW4{$>YbqI6i^{^A-m(Jx=5T~P~(BSN1pnI?998pVB^W@{gbc9wt1u+ zEsl=|b27V+Pcx}=WgZ(`@o z_&h~?+TAnle&2c)Uk?oZCv3)F-m}PN$-A#7T7C!A%(W;F6bap)cVGC#!KN?jb$2VI z?rCM;;4pEV+}FKdKzznecJ*oH;Zxsm+Mg|2{Gjc~x8@6O8jYuquL~>s&$-&i&EAqt zJijMc$Jc$p4^*3PQ9Y6G!Op$y{tEsQ zUM>0i_Uak!exn*a<%y1s|0a3;nzO&Q9c+2Oze>65$2kcunZ+@$|D08ub8nuQN|)L7 z7fh$tWI4~=GvDlumD7AC-ygh!+GUcwJn8TL9m%)zZg85DSh-JrQrvarJ%u9n4nLl} zTIa`6e=dzNODHBntmFR0Z{q(7yG4ydrdm2_t}ETK%EYzOhEwkG#C`qSBz(&RSs9K` zXH@#RR(QI&QSAQG_M+ETFP&3j+nyW5TzfBKw{DGT^xXRuDmMFDYrcd&5!yLt&Y@37 zPc+>uiP zhjE-PFj02q?q~a>d_7j})uLn9g|w~Rj!oUZ+eOqQE17Hdwc{ctcRSV}agDy`w{T~6 zhN4j0@?}$Um^U}Rf3ZMp3CEAu`X2(iS;a3mZPHjSUJ>q*!F6Ca|z#qGa_mI7!>-24V)@nzZo9^3l zSZ(7PU9EkK7F({{^mfglfrEGD(!7!M(U`Nos?G;U$Mu$ys5} z``a(XaG&HlwR-!9Ki^imSNK;|PWin!Q8|{+*8b4Rvl^HF^SoSryIno3Tvct#+I;U7 zFYe5axmu~SaM9~M@m_CJkE@=}yT$g*rXXUDcVGAC4Mm=fF3(!3Ri`vf+IzZ(ahm)~ zKmFcMJIr%heqR%J)2L6k`Sjj7tMq#Du20*{ADy~&_EOgM^XDRVp5&h}M`rh))XG4>Rz7rn=N%`1f1@t7{ojA3J9pC=_|~gmFWWOaW_w{s)!k!~E4ND9EQl!Z@N53P r{@BldH;)|o!ut5}-V;Zj>}On-XZ81)YgzyU0|SGntDnm{r-UW|OO@(1 literal 0 HcmV?d00001 -- GitLab From f54968424747de2fbd015e23e4ab8666f5df764e Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 9 Sep 2025 18:21:27 +0000 Subject: [PATCH 226/367] Common: - Add new Device Types --- src/common/DeviceTypes.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/common/DeviceTypes.py b/src/common/DeviceTypes.py index 7698097f8..008cb448d 100644 --- a/src/common/DeviceTypes.py +++ b/src/common/DeviceTypes.py @@ -21,7 +21,10 @@ class DeviceTypeEnum(Enum): # Emulated device types EMULATED_CLIENT = 'emu-client' + EMULATED_COMPUTER = 'emu-computer' EMULATED_DATACENTER = 'emu-datacenter' + EMULATED_VIRTUAL_MACHINE = 'emu-virtual-machine' + EMULATED_IP_SDN_CONTROLLER = 'emu-ip-sdn-controller' EMULATED_MICROWAVE_RADIO_SYSTEM = 'emu-microwave-radio-system' EMULATED_OPEN_LINE_SYSTEM = 'emu-open-line-system' @@ -41,9 +44,13 @@ class DeviceTypeEnum(Enum): NCE = 'nce' MICROWAVE_RADIO_SYSTEM = 'microwave-radio-system' OPEN_LINE_SYSTEM = 'open-line-system' + OPTICAL_OLT = 'optical-olt' + OPTICAL_ONT = 'optical-ont' + OPTICAL_FGOTN = 'optical-fgotn' OPTICAL_ROADM = 'optical-roadm' OPTICAL_TRANSPONDER = 'optical-transponder' P4_SWITCH = 'p4-switch' + PACKET_POP = 'packet-pop' PACKET_RADIO_ROUTER = 'packet-radio-router' PACKET_ROUTER = 'packet-router' PACKET_SWITCH = 'packet-switch' -- GitLab From 0f5bc697b4d68fa52fe53a7e25d976a56802184d Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 9 Sep 2025 18:22:09 +0000 Subject: [PATCH 227/367] Device component: - Fix Driver mapping rules --- src/device/service/drivers/__init__.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/device/service/drivers/__init__.py b/src/device/service/drivers/__init__.py index bf422130a..395452bde 100644 --- a/src/device/service/drivers/__init__.py +++ b/src/device/service/drivers/__init__.py @@ -124,7 +124,10 @@ if LOAD_ALL_DEVICE_DRIVERS: (OpenConfigDriver, [ { # Real Packet Router, specifying OpenConfig Driver => use OpenConfigDriver - FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.PACKET_ROUTER, + FilterFieldEnum.DEVICE_TYPE: [ + DeviceTypeEnum.PACKET_POP, + DeviceTypeEnum.PACKET_ROUTER, + ], FilterFieldEnum.DRIVER : DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG, } ])) @@ -135,7 +138,10 @@ if LOAD_ALL_DEVICE_DRIVERS: (GnmiOpenConfigDriver, [ { # Real Packet Router, specifying gNMI OpenConfig Driver => use GnmiOpenConfigDriver - FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.PACKET_ROUTER, + FilterFieldEnum.DEVICE_TYPE: [ + DeviceTypeEnum.PACKET_POP, + DeviceTypeEnum.PACKET_ROUTER, + ], FilterFieldEnum.DRIVER : DeviceDriverEnum.DEVICEDRIVER_GNMI_OPENCONFIG, } ])) -- GitLab From 94461d98db64cf238ccb66d2b9c822582017cd65 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 9 Sep 2025 18:22:43 +0000 Subject: [PATCH 228/367] NBI component - IETF Network: - Fix network composition rules for new device types --- src/nbi/service/ietf_network/ComposeNetwork.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/nbi/service/ietf_network/ComposeNetwork.py b/src/nbi/service/ietf_network/ComposeNetwork.py index 8d62525ad..fb32e385b 100644 --- a/src/nbi/service/ietf_network/ComposeNetwork.py +++ b/src/nbi/service/ietf_network/ComposeNetwork.py @@ -24,10 +24,11 @@ from .NetworkTypeEnum import NetworkTypeEnum, get_network_topology_type LOGGER = logging.getLogger(__name__) IGNORE_DEVICE_TYPES = { - DeviceTypeEnum.CLIENT.value, - DeviceTypeEnum.DATACENTER.value, DeviceTypeEnum.EMULATED_CLIENT.value, + DeviceTypeEnum.EMULATED_COMPUTER.value, DeviceTypeEnum.EMULATED_DATACENTER.value, + DeviceTypeEnum.EMULATED_VIRTUAL_MACHINE.value, + DeviceTypeEnum.EMULATED_IP_SDN_CONTROLLER, DeviceTypeEnum.EMULATED_MICROWAVE_RADIO_SYSTEM.value, DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM.value, -- GitLab From ea9d59dc315b625040645e6d850739486329bb97 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 9 Sep 2025 18:23:10 +0000 Subject: [PATCH 229/367] PathComp component - FrontEnd: - Fix Algorithms for new device types --- .../frontend/service/algorithms/_Algorithm.py | 10 ++++++--- .../algorithms/tools/ResourceGroups.py | 21 ++++++++++++------- .../service/algorithms/tools/ServiceTypes.py | 2 +- 3 files changed, 21 insertions(+), 12 deletions(-) diff --git a/src/pathcomp/frontend/service/algorithms/_Algorithm.py b/src/pathcomp/frontend/service/algorithms/_Algorithm.py index a6dc5cb1d..fd389d321 100644 --- a/src/pathcomp/frontend/service/algorithms/_Algorithm.py +++ b/src/pathcomp/frontend/service/algorithms/_Algorithm.py @@ -264,9 +264,13 @@ class _Algorithm: self.logger.debug('path_hops = {:s}'.format(str(path_hops))) device_types = {v[0]['device_type'] for k,v in self.device_dict.items()} DEVICES_BASIC_CONNECTION = { - DeviceTypeEnum.DATACENTER.value, DeviceTypeEnum.EMULATED_DATACENTER.value, - DeviceTypeEnum.CLIENT.value, DeviceTypeEnum.EMULATED_CLIENT.value, - DeviceTypeEnum.PACKET_ROUTER.value, DeviceTypeEnum.EMULATED_PACKET_ROUTER.value, + DeviceTypeEnum.EMULATED_CLIENT.value, + DeviceTypeEnum.EMULATED_COMPUTER.value, + DeviceTypeEnum.EMULATED_DATACENTER.value, + DeviceTypeEnum.EMULATED_VIRTUAL_MACHINE.value, + DeviceTypeEnum.EMULATED_PACKET_ROUTER.value, + DeviceTypeEnum.PACKET_POP.value, + DeviceTypeEnum.PACKET_ROUTER.value, } self.logger.debug('device_types = {:s}'.format(str(device_types))) self.logger.debug('DEVICES_BASIC_CONNECTION = {:s}'.format(str(DEVICES_BASIC_CONNECTION))) diff --git a/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py b/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py index cbc0ecf88..6a6965013 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py +++ b/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py @@ -20,10 +20,10 @@ from common.tools.grpc.Tools import grpc_message_to_json_string DEVICE_TYPE_TO_DEEPNESS = { - DeviceTypeEnum.EMULATED_DATACENTER.value : 90, - DeviceTypeEnum.DATACENTER.value : 90, DeviceTypeEnum.EMULATED_CLIENT.value : 90, - DeviceTypeEnum.CLIENT.value : 90, + DeviceTypeEnum.EMULATED_COMPUTER.value : 90, + DeviceTypeEnum.EMULATED_DATACENTER.value : 90, + DeviceTypeEnum.EMULATED_VIRTUAL_MACHINE.value : 90, DeviceTypeEnum.TERAFLOWSDN_CONTROLLER.value : 80, DeviceTypeEnum.EMULATED_IP_SDN_CONTROLLER.value : 80, @@ -33,6 +33,7 @@ DEVICE_TYPE_TO_DEEPNESS = { DeviceTypeEnum.EMULATED_PACKET_ROUTER.value : 70, + DeviceTypeEnum.PACKET_POP.value : 70, DeviceTypeEnum.PACKET_ROUTER.value : 70, DeviceTypeEnum.EMULATED_PACKET_SWITCH.value : 60, @@ -49,14 +50,18 @@ DEVICE_TYPE_TO_DEEPNESS = { DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM.value : 30, DeviceTypeEnum.OPEN_LINE_SYSTEM.value : 30, - DeviceTypeEnum.EMULATED_PACKET_RADIO_ROUTER.value : 10, - DeviceTypeEnum.PACKET_RADIO_ROUTER.value : 10, - DeviceTypeEnum.EMULATED_OPTICAL_TRANSPONDER.value : 10, - DeviceTypeEnum.OPTICAL_TRANSPONDER.value : 10, DeviceTypeEnum.EMULATED_OPTICAL_ROADM.value : 10, + DeviceTypeEnum.EMULATED_OPTICAL_TRANSPONDER.value : 10, + DeviceTypeEnum.OPEN_ROADM.value : 10, + DeviceTypeEnum.OPTICAL_FGOTN.value : 10, + DeviceTypeEnum.OPTICAL_OLT.value : 10, + DeviceTypeEnum.OPTICAL_ONT.value : 10, DeviceTypeEnum.OPTICAL_ROADM.value : 10, + DeviceTypeEnum.OPTICAL_TRANSPONDER.value : 10, + + DeviceTypeEnum.EMULATED_PACKET_RADIO_ROUTER.value : 10, + DeviceTypeEnum.PACKET_RADIO_ROUTER.value : 10, DeviceTypeEnum.QKD_NODE.value : 10, - DeviceTypeEnum.OPEN_ROADM.value : 10, DeviceTypeEnum.EMULATED_OPTICAL_SPLITTER.value : 0, DeviceTypeEnum.NETWORK.value : 0, # network out of our control; always delegate diff --git a/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py b/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py index 6df4ea95c..5943bf1ba 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py +++ b/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py @@ -13,7 +13,6 @@ # limitations under the License. -from typing import Optional from common.DeviceTypes import DeviceTypeEnum from common.proto.context_pb2 import ServiceTypeEnum @@ -25,6 +24,7 @@ PACKET_DEVICE_TYPES = { DeviceTypeEnum.TERAFLOWSDN_CONTROLLER, DeviceTypeEnum.IETF_SLICE, DeviceTypeEnum.NCE, DeviceTypeEnum.IP_SDN_CONTROLLER, DeviceTypeEnum.EMULATED_IP_SDN_CONTROLLER, + DeviceTypeEnum.PACKET_POP, DeviceTypeEnum.PACKET_ROUTER, DeviceTypeEnum.EMULATED_PACKET_ROUTER, DeviceTypeEnum.PACKET_SWITCH, DeviceTypeEnum.EMULATED_PACKET_SWITCH, } -- GitLab From e4af25c97b2b3061debd678a52ea892cfc08dcb6 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 9 Sep 2025 18:23:39 +0000 Subject: [PATCH 230/367] Service component - L3NM gNMI OpenConfig Service Handler: - Fix for new device types --- .../l3nm_gnmi_openconfig/ConfigRuleComposer.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/service/service/service_handlers/l3nm_gnmi_openconfig/ConfigRuleComposer.py b/src/service/service/service_handlers/l3nm_gnmi_openconfig/ConfigRuleComposer.py index a1857c3b2..cf0eacab5 100644 --- a/src/service/service/service_handlers/l3nm_gnmi_openconfig/ConfigRuleComposer.py +++ b/src/service/service/service_handlers/l3nm_gnmi_openconfig/ConfigRuleComposer.py @@ -225,7 +225,11 @@ class DeviceComposer: self.static_routes.setdefault(prefix, dict())[metric] = next_hop def get_config_rules(self, network_instance_name : str, delete : bool = False) -> List[Dict]: - SELECTED_DEVICES = {DeviceTypeEnum.PACKET_ROUTER.value, DeviceTypeEnum.EMULATED_PACKET_ROUTER.value} + SELECTED_DEVICES = { + DeviceTypeEnum.PACKET_POP.value, + DeviceTypeEnum.PACKET_ROUTER.value, + DeviceTypeEnum.EMULATED_PACKET_ROUTER.value + } if self.objekt.device_type not in SELECTED_DEVICES: return [] json_config_rule = json_config_rule_delete if delete else json_config_rule_set -- GitLab From 208b00187f32d29b809c90fa976f5eb7a910b48c Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 9 Sep 2025 18:26:58 +0000 Subject: [PATCH 231/367] Common - Tools - Rest Conf - Implemented Callbacks to be executed when selected paths are changed - Renamed Dispatcher to DispatcherData --- .../tools/rest_conf/server/requirements.in | 1 + .../server/restconf_server/Callbacks.py | 134 ++++++++++++++++++ .../{Dispatch.py => DispatchData.py} | 47 ++++-- .../RestConfServerApplication.py | 14 +- 4 files changed, 183 insertions(+), 13 deletions(-) create mode 100644 src/common/tools/rest_conf/server/restconf_server/Callbacks.py rename src/common/tools/rest_conf/server/restconf_server/{Dispatch.py => DispatchData.py} (79%) diff --git a/src/common/tools/rest_conf/server/requirements.in b/src/common/tools/rest_conf/server/requirements.in index 17155ed58..f29c2d180 100644 --- a/src/common/tools/rest_conf/server/requirements.in +++ b/src/common/tools/rest_conf/server/requirements.in @@ -13,6 +13,7 @@ # limitations under the License. cryptography==39.0.1 +deepdiff==6.7.* eventlet==0.39.0 Flask-HTTPAuth==4.5.0 Flask-RESTful==0.3.9 diff --git a/src/common/tools/rest_conf/server/restconf_server/Callbacks.py b/src/common/tools/rest_conf/server/restconf_server/Callbacks.py new file mode 100644 index 000000000..16194fd81 --- /dev/null +++ b/src/common/tools/rest_conf/server/restconf_server/Callbacks.py @@ -0,0 +1,134 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging, re +from typing import Dict, List, Optional, Union + + +LOGGER = logging.getLogger(__name__) + + +class _Callback: + def __init__(self, path_pattern : Union[str, re.Pattern]) -> None: + ''' + Initialize a Callback + @param path_pattern: A regular expression (string or compiled `re.Pattern`) + ''' + if isinstance(path_pattern, str): + path_pattern = re.compile('^{:s}/?$'.format(path_pattern)) + self._path_pattern = path_pattern + + def match(self, path : str) -> Optional[re.Match]: + ''' + Match method used to check if this callback should be executed. + @param path: A RESTCONF request path to test + @returns `re.Match` object if pattern fully matches `path`, otherwise `None` + ''' + return self._path_pattern.fullmatch(path) + + def execute( + self, match : re.Match, path : str, old_data : Optional[Dict], + new_data : Optional[Dict] + ) -> bool: + ''' + Execute the callback action for a matched path. + This method should be implemented for each specific callback. + @param match: `re.Match` object returned by `match()`. + @param path: Original request path that was matched. + @param old_data: Resource representation before change, if applicable, otherwise `None` + @param new_data: Resource representation after change, if applicable, otherwise `None` + @returns boolean indicating whether additional callbacks should be executed, defaults to False + ''' + MSG = 'match={:s}, path={:s}, old_data={:s}, new_data={:s}' + msg = MSG.format(match.groupdict(), path, old_data, new_data) + raise NotImplementedError(msg) + + +class CallbackDispatcher: + def __init__(self): + self._callbacks : List[_Callback] = list() + + def register(self, callback : _Callback) -> None: + self._callbacks.append(callback) + + def dispatch( + self, path : str, old_data : Optional[Dict] = None, new_data : Optional[Dict] = None + ) -> None: + LOGGER.warning('Checking Callbacks for path={:s}'.format(str(path))) + for callback in self._callbacks: + match = callback.match(path) + if match is None: continue + keep_running_callbacks = callback.execute(match, path, old_data, new_data) + if not keep_running_callbacks: break + + +# ===== EXAMPLE ========================================================================================== + +class CallbackOnNetwork(_Callback): + def __init__(self) -> None: + pattern = r'/restconf/data' + pattern += r'/ietf-network:networks/network=(?P[^/]+)' + super().__init__(pattern) + + def execute( + self, match : re.Match, path : str, old_data : Optional[Dict], + new_data : Optional[Dict] + ) -> bool: + print('[on_network]', match.groupdict(), path, old_data, new_data) + return False + +class CallbackOnNode(_Callback): + def __init__(self) -> None: + pattern = r'/restconf/data' + pattern += r'/ietf-network:networks/network=(?P[^/]+)' + pattern += r'/node=(?P[^/]+)' + super().__init__(pattern) + + def execute( + self, match : re.Match, path : str, old_data : Optional[Dict], + new_data : Optional[Dict] + ) -> bool: + print('[on_node]', match.groupdict(), path, old_data, new_data) + return False + +class CallbackOnLink(_Callback): + def __init__(self) -> None: + pattern = r'/restconf/data' + pattern += r'/ietf-network:networks/network=(?P[^/]+)' + pattern += r'/ietf-network-topology:link=(?P[^/]+)' + super().__init__(pattern) + + def execute( + self, match : re.Match, path : str, old_data : Optional[Dict], + new_data : Optional[Dict] + ) -> bool: + print('[on_link]', match.groupdict(), path, old_data, new_data) + return False + +def main() -> None: + callbacks = CallbackDispatcher() + callbacks.register(CallbackOnNetwork()) + callbacks.register(CallbackOnNode()) + callbacks.register(CallbackOnLink()) + + callbacks.dispatch('/restconf/data/ietf-network:networks/network=admin') + callbacks.dispatch('/restconf/data/ietf-network:networks/network=admin/node=P-PE2') + callbacks.dispatch('/restconf/data/ietf-network:networks/network=admin/ietf-network-topology:link=L6') + callbacks.dispatch('/restconf/data/ietf-network:networks/network=admin/') + callbacks.dispatch('/restconf/data/ietf-network:networks/network=admin/node=P-PE1/') + callbacks.dispatch('/restconf/data/ietf-network:networks/network=admin/ietf-network-topology:link=L4/') + +if __name__ == '__main__': + main() diff --git a/src/common/tools/rest_conf/server/restconf_server/Dispatch.py b/src/common/tools/rest_conf/server/restconf_server/DispatchData.py similarity index 79% rename from src/common/tools/rest_conf/server/restconf_server/Dispatch.py rename to src/common/tools/rest_conf/server/restconf_server/DispatchData.py index 319aa9f7b..f0e811534 100644 --- a/src/common/tools/rest_conf/server/restconf_server/Dispatch.py +++ b/src/common/tools/rest_conf/server/restconf_server/DispatchData.py @@ -13,18 +13,22 @@ # limitations under the License. -import json, logging +import deepdiff, json, logging from flask import Response, abort, jsonify, request from flask_restful import Resource +from .Callbacks import CallbackDispatcher from .HttpStatusCodesEnum import HttpStatusCodesEnum from .YangHandler import YangHandler LOGGER = logging.getLogger(__name__) -class RestConfDispatch(Resource): - def __init__(self, yang_handler : YangHandler) -> None: +class RestConfDispatchData(Resource): + def __init__( + self, yang_handler : YangHandler, callback_dispatcher : CallbackDispatcher + ) -> None: super().__init__() self._yang_handler = yang_handler + self._callback_dispatcher = callback_dispatcher def get(self, subpath : str = '/') -> Response: data = self._yang_handler.get(subpath) @@ -66,6 +70,10 @@ class RestConfDispatch(Resource): LOGGER.info('[POST] {:s} {:s} => {:s}'.format(subpath, str(payload), str(json_data))) + self._callback_dispatcher.dispatch( + '/restconf/data/' + subpath, old_data=None, new_data=json_data + ) + response = jsonify({'status': 'created'}) response.status_code = HttpStatusCodesEnum.SUCCESS_CREATED.value return response @@ -78,8 +86,10 @@ class RestConfDispatch(Resource): LOGGER.exception('Invalid JSON') abort(HttpStatusCodesEnum.CLI_ERR_BAD_REQUEST.value, desctiption='Invalid JSON') + old_data = self._yang_handler.get(subpath) + try: - json_data = self._yang_handler.update(subpath, payload) + new_data = self._yang_handler.update(subpath, payload) except Exception as e: LOGGER.exception('Update failed') abort( @@ -87,8 +97,14 @@ class RestConfDispatch(Resource): description=str(e) ) - LOGGER.info('[PUT] {:s} {:s} => {:s}'.format(subpath, str(payload), str(json_data))) - updated = False # TODO: compute if create or update + LOGGER.info('[PUT] {:s} {:s} => {:s}'.format(subpath, str(payload), str(new_data))) + + diff_data = deepdiff.DeepDiff(old_data, new_data) + updated = len(diff_data) > 0 + + self._callback_dispatcher.dispatch( + '/restconf/data/' + subpath, old_data=old_data, new_data=new_data + ) response = jsonify({'status': ( 'updated' if updated else 'created' @@ -108,8 +124,10 @@ class RestConfDispatch(Resource): LOGGER.exception('Invalid JSON') abort(HttpStatusCodesEnum.CLI_ERR_BAD_REQUEST.value, desctiption='Invalid JSON') + old_data = self._yang_handler.get(subpath) + try: - json_data = self._yang_handler.update(subpath, payload) + new_data = self._yang_handler.update(subpath, payload) except Exception as e: LOGGER.exception('Update failed') abort( @@ -117,7 +135,14 @@ class RestConfDispatch(Resource): description=str(e) ) - LOGGER.info('[PATCH] {:s} {:s} => {:s}'.format(subpath, str(payload), str(json_data))) + LOGGER.info('[PATCH] {:s} {:s} => {:s}'.format(subpath, str(payload), str(new_data))) + + #diff_data = deepdiff.DeepDiff(old_data, new_data) + #updated = len(diff_data) > 0 + + self._callback_dispatcher.dispatch( + '/restconf/data/' + subpath, old_data=old_data, new_data=new_data + ) response = jsonify({'status': 'patched'}) response.status_code = HttpStatusCodesEnum.SUCCESS_NO_CONTENT.value @@ -126,6 +151,8 @@ class RestConfDispatch(Resource): def delete(self, subpath : str) -> Response: # NOTE: client should provide identifier of element to be patched + old_data = self._yang_handler.get(subpath) + try: deleted_node = self._yang_handler.delete(subpath) except Exception as e: @@ -143,6 +170,10 @@ class RestConfDispatch(Resource): description='Path({:s}) not found'.format(str(subpath)) ) + self._callback_dispatcher.dispatch( + '/restconf/data/' + subpath, old_data=old_data, new_data=None + ) + response = jsonify({}) response.status_code = HttpStatusCodesEnum.SUCCESS_NO_CONTENT.value return response diff --git a/src/common/tools/rest_conf/server/restconf_server/RestConfServerApplication.py b/src/common/tools/rest_conf/server/restconf_server/RestConfServerApplication.py index 0b5faa497..f07eae2db 100644 --- a/src/common/tools/rest_conf/server/restconf_server/RestConfServerApplication.py +++ b/src/common/tools/rest_conf/server/restconf_server/RestConfServerApplication.py @@ -16,8 +16,9 @@ import json, logging, time from flask import Flask, request from flask_restful import Api +from .Callbacks import CallbackDispatcher from .Config import RESTCONF_PREFIX, SECRET_KEY, STARTUP_FILE, YANG_SEARCH_PATH -from .Dispatch import RestConfDispatch +from .DispatchData import RestConfDispatchData from .HostMeta import HostMeta from .YangHandler import YangHandler from .YangModelDiscoverer import YangModuleDiscoverer @@ -53,11 +54,16 @@ class RestConfServerApplication: YANG_SEARCH_PATH, self._yang_module_names, self._yang_startup_data ) + self._callback_dispatcher = CallbackDispatcher() + self._app = Flask(__name__) self._app.config['SECRET_KEY'] = SECRET_KEY self._app.after_request(log_request) self._api = Api(self._app) + @property + def callback_dispatcher(self): return self._callback_dispatcher + def get_startup_data(self) -> None: return self._yang_startup_data @@ -70,12 +76,10 @@ class RestConfServerApplication: def register_restconf(self) -> None: self._api.add_resource( - RestConfDispatch, - RESTCONF_PREFIX + '/data', - RESTCONF_PREFIX + '/data/', + RestConfDispatchData, RESTCONF_PREFIX + '/data/', RESTCONF_PREFIX + '/data//', - resource_class_args=(self._yang_handler,) + resource_class_args=(self._yang_handler, self._callback_dispatcher) ) def get_flask_app(self) -> Flask: -- GitLab From 1e34494575f31dfe4a7f18b8c2df2487ad54ae28 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 9 Sep 2025 18:28:47 +0000 Subject: [PATCH 232/367] Tests - Tools - Mock NCE FAN Ctrl: - Added support for Data-change callbacks - Added redeploy script to test with SIMAP - Added NCE-FAN client for testing --- .../nce_fan_client/Requests.py | 54 +++++++++++++++ .../nce_fan_client/__init__.py | 14 ++++ .../nce_fan_client/__main__.py | 45 ++++++++++++ .../nce_fan_ctrl/Callbacks.py | 69 +++++++++++++++++++ .../mock_nce_fan_ctrl/nce_fan_ctrl/app.py | 7 ++ src/tests/tools/mock_nce_fan_ctrl/redeploy.sh | 36 ++++++++++ .../tools/mock_nce_fan_ctrl/run_client.sh | 19 +++++ 7 files changed, 244 insertions(+) create mode 100644 src/tests/tools/mock_nce_fan_ctrl/nce_fan_client/Requests.py create mode 100644 src/tests/tools/mock_nce_fan_ctrl/nce_fan_client/__init__.py create mode 100644 src/tests/tools/mock_nce_fan_ctrl/nce_fan_client/__main__.py create mode 100644 src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/Callbacks.py create mode 100755 src/tests/tools/mock_nce_fan_ctrl/redeploy.sh create mode 100755 src/tests/tools/mock_nce_fan_ctrl/run_client.sh diff --git a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_client/Requests.py b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_client/Requests.py new file mode 100644 index 000000000..7d6e8528d --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_client/Requests.py @@ -0,0 +1,54 @@ +QOS_PROFILE_NAME = 'AR_VR_Gaming' +URL_QOS_PROFILE_ITEM = '/huawei-nce-app-flow:qos-profiles/qos-profile={:s}'.format(QOS_PROFILE_NAME) +REQUEST_QOS_PROFILE = {"huawei-nce-app-flow:qos-profiles": {"qos-profile": [ + { + "downstream": { + "assure-bandwidth": "1000000000", + "max-bandwidth": "2000000000" + }, + "max-jitter": 10, + "max-latency": 10, + "max-loss": "0.001", + "name": QOS_PROFILE_NAME, + "upstream": { + "assure-bandwidth": "5000000000", + "max-bandwidth": "10000000000" + } + } +]}} + +APPLICATION_NAME = 'App_1_2_slice1' +URL_APPLICATION_ITEM = '/huawei-nce-app-flow:applications/application={:s}'.format(APPLICATION_NAME) +REQUEST_APPLICATION = {"huawei-nce-app-flow:applications": {"application": [ + { + "app-features": { + "app-feature": [ + { + "dest-ip": "172.1.101.22", + "dest-port": "10200", + "id": "feature_1_2_slice1", + "protocol": "tcp", + "src-ip": "172.16.204.221", + "src-port": "10500" + } + ] + }, + "app-id": ["app_1_2_slice1"], + "name": APPLICATION_NAME + } +]}} + +APP_FLOW_NAME = "App_Flow_1_2_slice1" +URL_APP_FLOW_ITEM = '/huawei-nce-app-flow:app-flows/app-flow={:s}'.format(APP_FLOW_NAME) +REQUEST_APP_FLOW = {"huawei-nce-app-flow:app-flows": {"app-flow": [ + { + "app-name": APPLICATION_NAME, + "duration": 9999, + "max-online-users": 1, + "name": APP_FLOW_NAME, + "qos-profile": QOS_PROFILE_NAME, + "service-profile": "service_1_2_slice1", + "stas": ["00:3D:E1:18:82:9E"], + "user-id": "ad2c2a94-3415-4676-867a-39eedfb9f205" + } +]}} diff --git a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_client/__init__.py b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_client/__init__.py new file mode 100644 index 000000000..3ccc21c7d --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_client/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_client/__main__.py b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_client/__main__.py new file mode 100644 index 000000000..fd02f323d --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_client/__main__.py @@ -0,0 +1,45 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging +from common.tools.rest_conf.client.RestConfClient import RestConfClient +from .Requests import ( + URL_QOS_PROFILE_ITEM, REQUEST_QOS_PROFILE, + URL_APPLICATION_ITEM, REQUEST_APPLICATION, + URL_APP_FLOW_ITEM, REQUEST_APP_FLOW, +) + +logging.basicConfig(level=logging.INFO) +logging.getLogger('RestConfClient').setLevel(logging.DEBUG) +LOGGER = logging.getLogger(__name__) + +def main() -> None: + restconf_client = RestConfClient( + '172.17.0.1', port=8081, + logger=logging.getLogger('RestConfClient') + ) + + LOGGER.info('Creating QoS Profile: {:s}'.format(str(REQUEST_QOS_PROFILE))) + restconf_client.post(URL_QOS_PROFILE_ITEM, body=REQUEST_QOS_PROFILE) + + LOGGER.info('Creating Application: {:s}'.format(str(REQUEST_APPLICATION))) + restconf_client.post(URL_APPLICATION_ITEM, body=REQUEST_APPLICATION) + + LOGGER.info('Creating App Flow: {:s}'.format(str(REQUEST_APP_FLOW))) + restconf_client.post(URL_APP_FLOW_ITEM, body=REQUEST_APP_FLOW) + + +if __name__ == '__main__': + main() diff --git a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/Callbacks.py b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/Callbacks.py new file mode 100644 index 000000000..5de08b9f7 --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/Callbacks.py @@ -0,0 +1,69 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging, re +from typing import Dict, Optional +from common.tools.rest_conf.server.restconf_server.Callbacks import _Callback + + +LOGGER = logging.getLogger(__name__) + + +class CallbackQosProfile(_Callback): + def __init__(self) -> None: + pattern = r'/restconf/data' + pattern += r'/huawei-nce-app-flow:qos-profiles' + pattern += r'/qos-profile=(?P[^/]+)' + super().__init__(pattern) + + def execute( + self, match : re.Match, path : str, old_data : Optional[Dict], + new_data : Optional[Dict] + ) -> bool: + MSG = '[on_qos_profile] match={:s} path={:s} old_data={:s} new_data={:s}' + LOGGER.warning(MSG.format(str(match.groupdict()), str(path), str(old_data), str(new_data))) + return False + + +class CallbackApplication(_Callback): + def __init__(self) -> None: + pattern = r'/restconf/data' + pattern += r'/huawei-nce-app-flow:applications' + pattern += r'/application=(?P[^/]+)' + super().__init__(pattern) + + def execute( + self, match : re.Match, path : str, old_data : Optional[Dict], + new_data : Optional[Dict] + ) -> bool: + MSG = '[on_application] match={:s} path={:s} old_data={:s} new_data={:s}' + LOGGER.warning(MSG.format(str(match.groupdict()), str(path), str(old_data), str(new_data))) + return False + + +class CallbackAppFlow(_Callback): + def __init__(self) -> None: + pattern = r'/restconf/data' + pattern += r'/huawei-nce-app-flow:app-flows' + pattern += r'/app-flow=(?P[^/]+)' + super().__init__(pattern) + + def execute( + self, match : re.Match, path : str, old_data : Optional[Dict], + new_data : Optional[Dict] + ) -> bool: + MSG = '[on_app_flow] match={:s} path={:s} old_data={:s} new_data={:s}' + LOGGER.warning(MSG.format(str(match.groupdict()), str(path), str(old_data), str(new_data))) + return False diff --git a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/app.py b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/app.py index 0b7648466..bf6e5c6a5 100644 --- a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/app.py +++ b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/app.py @@ -18,6 +18,7 @@ import logging from common.tools.rest_conf.server.restconf_server.RestConfServerApplication import RestConfServerApplication +from .Callbacks import CallbackApplication, CallbackAppFlow, CallbackQosProfile from .SimapUpdater import SimapUpdater @@ -31,6 +32,7 @@ logging.getLogger('RestConfClient').setLevel(logging.WARN) LOGGER.info('Starting...') rcs_app = RestConfServerApplication() + rcs_app.register_host_meta() rcs_app.register_restconf() LOGGER.info('All connectors registered') @@ -43,6 +45,11 @@ if len(networks) == 1 and networks[0]['network-id'] == 'admin': simap_updater = SimapUpdater() simap_updater.upload_topology(networks[0]) + rcs_app.callback_dispatcher.register(CallbackApplication()) + rcs_app.callback_dispatcher.register(CallbackAppFlow()) + rcs_app.callback_dispatcher.register(CallbackQosProfile()) + LOGGER.info('All callbacks registered') + rcs_app.dump_configuration() app = rcs_app.get_flask_app() diff --git a/src/tests/tools/mock_nce_fan_ctrl/redeploy.sh b/src/tests/tools/mock_nce_fan_ctrl/redeploy.sh new file mode 100755 index 000000000..5be64707f --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/redeploy.sh @@ -0,0 +1,36 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +echo "Building SIMAP Server..." +cd ~/tfs-ctrl/ +docker buildx build -t simap-server:mock -f ./src/tests/tools/simap_server/Dockerfile . + +echo "Building NCE-FAN Controller..." +cd ~/tfs-ctrl/ +docker buildx build -t nce-fan-ctrl:mock -f ./src/tests/tools/mock_nce_fan_ctrl/Dockerfile . + +echo "Cleaning up..." +docker rm --force simap-server +docker rm --force nce-fan-ctrl + +echo "Deploying support services..." +docker run --detach --name simap-server --publish 8080:8080 simap-server:mock +docker run --detach --name nce-fan-ctrl --publish 8081:8080 --env SIMAP_ADDRESS=172.17.0.1 --env SIMAP_PORT=8080 nce-fan-ctrl:mock + +sleep 2 +docker ps -a + +echo "Bye!" diff --git a/src/tests/tools/mock_nce_fan_ctrl/run_client.sh b/src/tests/tools/mock_nce_fan_ctrl/run_client.sh new file mode 100755 index 000000000..4384f0fdb --- /dev/null +++ b/src/tests/tools/mock_nce_fan_ctrl/run_client.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Make folder containing the script the root folder for its execution +cd $(dirname $0)/../../../ + +python -m tests.tools.mock_nce_fan_ctrl.nce_fan_client -- GitLab From 4ab38947bdd8a4088f15c6515d9dffc205f6ad14 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 9 Sep 2025 18:44:48 +0000 Subject: [PATCH 233/367] Tests - Tools - Mock NCE T Ctrl: - Added support for Data-change callbacks - Added redeploy script to test with SIMAP - Added NCE-T client for testing --- .../mock_nce_t_ctrl/nce_t_client/Requests.py | 134 ++++++++++++++++++ .../mock_nce_t_ctrl/nce_t_client/__init__.py | 14 ++ .../mock_nce_t_ctrl/nce_t_client/__main__.py | 41 ++++++ .../mock_nce_t_ctrl/nce_t_ctrl/Callbacks.py | 53 +++++++ .../tools/mock_nce_t_ctrl/nce_t_ctrl/app.py | 6 + src/tests/tools/mock_nce_t_ctrl/redeploy.sh | 36 +++++ src/tests/tools/mock_nce_t_ctrl/run_client.sh | 19 +++ 7 files changed, 303 insertions(+) create mode 100644 src/tests/tools/mock_nce_t_ctrl/nce_t_client/Requests.py create mode 100644 src/tests/tools/mock_nce_t_ctrl/nce_t_client/__init__.py create mode 100644 src/tests/tools/mock_nce_t_ctrl/nce_t_client/__main__.py create mode 100644 src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/Callbacks.py create mode 100755 src/tests/tools/mock_nce_t_ctrl/redeploy.sh create mode 100755 src/tests/tools/mock_nce_t_ctrl/run_client.sh diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_client/Requests.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_client/Requests.py new file mode 100644 index 000000000..c9d51f14c --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_client/Requests.py @@ -0,0 +1,134 @@ +OSU_TUNNEL_NAME = 'osu_tunnel_1' +URL_OSU_TUNNEL_ITEM = '/ietf-te:te/tunnels/tunnel={:s}'.format(OSU_TUNNEL_NAME) +REQUEST_OSU_TUNNEL = {"ietf-te:te": {"tunnels": {"tunnel": [ + { + "name": OSU_TUNNEL_NAME, + "title": "OSU_TUNNEL_1", + "admin-state": "ietf-te-types:tunnel-admin-state-up", + "delay": 20, + "te-bandwidth": { + "layer": "odu", + "odu-type": "osuflex", + "number": 40 + }, + "bidirectional": True, + "destination-endpoints": { + "destination-endpoint": [ + { + "node-id": "10.0.30.1", + "tp-id": "200", + "ttp-channel-name": "och:1-odu2:1-oduflex:3-osuflex:1", + "protection-role": "work" + } + ] + }, + "source-endpoints": { + "source-endpoint": [ + { + "node-id": "10.0.10.1", + "tp-id": "200", + "ttp-channel-name": "och:1-odu2:1-oduflex:1-osuflex:2", + "protection-role": "work" + } + ] + }, + "restoration": { + "restoration-type": "ietf-te-types:lsp-restoration-not-applicable", + "restoration-lock": False + }, + "protection": { + "protection-type": "ietf-te-types:lsp-protection-unprotected", + "protection-reversion-disable": True + } + } +]}}} + +ETHT_SERVICE_NAME = 'etht_service_1' +URL_ETHT_SERVICE_ITEM = '/ietf-eth-tran-service:etht-svc/etht-svc-instances={:s}'.format(ETHT_SERVICE_NAME) +REQUEST_ETHT_SERVICE = {"ietf-eth-tran-service:etht-svc": {"etht-svc-instances": [ + { + "etht-svc-name": ETHT_SERVICE_NAME, + "etht-svc-title": "ETHT_SERVICE_1", + "etht-svc-type": "op-mp2mp-svc", + "source-endpoints": { + "source-endpoint": [ + { + "node-id": "10.0.10.1", + "tp-id": "200", + "protection-role": "work", + "layer-specific": { + "access-type": "port" + }, + "is-extendable": False, + "is-terminal": True, + "static-route-list": [ + { + "destination": "128.32.10.5", + "destination-mask": 24, + "next-hop": "128.32.33.5" + }, + { + "destination": "128.32.20.5", + "destination-mask": 24, + "next-hop": "128.32.33.5" + } + ], + "outer-tag": { + "tag-type": "ietf-eth-tran-types:classify-c-vlan", + "vlan-value": 21 + }, + "service-classification-type": "ietf-eth-tran-type:vlan-classification", + "ingress-egress-bandwidth-profile" : { + "bandwidth-profile-type": "ietf-eth-tran-types:mef-10-bwp", + "CIR": 10000000, + "EIR": 10000000 + } + } + ] + }, + "destination-endpoints": { + "destination-endpoint": [ + { + "node-id": "10.0.30.1", + "tp-id": "200", + "protection-role": "work", + "layer-specific": { + "access-type": "port" + }, + "is-extendable": False, + "is-terminal": True, + "static-route-list": [ + { + "destination": "172.1.101.22", + "destination-mask": 24, + "next-hop": "172.10.33.5" + } + ], + "outer-tag": { + "tag-type": "ietf-eth-tran-types:classify-c-vlan", + "vlan-value": 101 + }, + "service-classification-type": "ietf-eth-tran-type:vlan-classification", + "ingress-egress-bandwidth-profile" : { + "bandwidth-profile-type": "ietf-eth-tran-types:mef-10-bwp", + "CIR": 10000000, + "EIR": 10000000 + } + } + ] + }, + "svc-tunnel": [ + { + "tunnel-name": "OSU_TUNNEL_NAME" + } + ], + "optimizations": { + "optimization-metric": [ + { + "metric-role": "work", + "metric-type": "ietf-te-types:path-metric-te" + } + ] + } + } +]}} diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_client/__init__.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_client/__init__.py new file mode 100644 index 000000000..3ccc21c7d --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_client/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_client/__main__.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_client/__main__.py new file mode 100644 index 000000000..ccda8f251 --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_client/__main__.py @@ -0,0 +1,41 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging +from common.tools.rest_conf.client.RestConfClient import RestConfClient +from .Requests import ( + URL_OSU_TUNNEL_ITEM, REQUEST_OSU_TUNNEL, + URL_ETHT_SERVICE_ITEM, REQUEST_ETHT_SERVICE, +) + +logging.basicConfig(level=logging.INFO) +logging.getLogger('RestConfClient').setLevel(logging.DEBUG) +LOGGER = logging.getLogger(__name__) + +def main() -> None: + restconf_client = RestConfClient( + '172.17.0.1', port=8081, + logger=logging.getLogger('RestConfClient') + ) + + LOGGER.info('Creating OSU Tunnel: {:s}'.format(str(REQUEST_OSU_TUNNEL))) + restconf_client.post(URL_OSU_TUNNEL_ITEM, body=REQUEST_OSU_TUNNEL) + + LOGGER.info('Creating ETH-T Service: {:s}'.format(str(REQUEST_ETHT_SERVICE))) + restconf_client.post(URL_ETHT_SERVICE_ITEM, body=REQUEST_ETHT_SERVICE) + + +if __name__ == '__main__': + main() diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/Callbacks.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/Callbacks.py new file mode 100644 index 000000000..dc728c00c --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/Callbacks.py @@ -0,0 +1,53 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging, re +from typing import Dict, Optional +from common.tools.rest_conf.server.restconf_server.Callbacks import _Callback + + +LOGGER = logging.getLogger(__name__) + + +class CallbackOsuTunnel(_Callback): + def __init__(self) -> None: + pattern = r'/restconf/data' + pattern += r'/ietf-te:te/tunnels' + pattern += r'/tunnel=(?P[^/]+)' + super().__init__(pattern) + + def execute( + self, match : re.Match, path : str, old_data : Optional[Dict], + new_data : Optional[Dict] + ) -> bool: + MSG = '[on_osu_tunnel] match={:s} path={:s} old_data={:s} new_data={:s}' + LOGGER.warning(MSG.format(str(match.groupdict()), str(path), str(old_data), str(new_data))) + return False + + +class CallbackEthTService(_Callback): + def __init__(self) -> None: + pattern = r'/restconf/data' + pattern += r'/ietf-eth-tran-service:etht-svc' + pattern += r'/etht-svc-instances=(?P[^/]+)' + super().__init__(pattern) + + def execute( + self, match : re.Match, path : str, old_data : Optional[Dict], + new_data : Optional[Dict] + ) -> bool: + MSG = '[on_etht_service] match={:s} path={:s} old_data={:s} new_data={:s}' + LOGGER.warning(MSG.format(str(match.groupdict()), str(path), str(old_data), str(new_data))) + return False diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py index 0b7648466..74de70ee8 100644 --- a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py @@ -18,6 +18,7 @@ import logging from common.tools.rest_conf.server.restconf_server.RestConfServerApplication import RestConfServerApplication +from .Callbacks import CallbackEthTService, CallbackOsuTunnel from .SimapUpdater import SimapUpdater @@ -31,6 +32,7 @@ logging.getLogger('RestConfClient').setLevel(logging.WARN) LOGGER.info('Starting...') rcs_app = RestConfServerApplication() + rcs_app.register_host_meta() rcs_app.register_restconf() LOGGER.info('All connectors registered') @@ -43,6 +45,10 @@ if len(networks) == 1 and networks[0]['network-id'] == 'admin': simap_updater = SimapUpdater() simap_updater.upload_topology(networks[0]) + rcs_app.callback_dispatcher.register(CallbackOsuTunnel()) + rcs_app.callback_dispatcher.register(CallbackEthTService()) + LOGGER.info('All callbacks registered') + rcs_app.dump_configuration() app = rcs_app.get_flask_app() diff --git a/src/tests/tools/mock_nce_t_ctrl/redeploy.sh b/src/tests/tools/mock_nce_t_ctrl/redeploy.sh new file mode 100755 index 000000000..ed5c25457 --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/redeploy.sh @@ -0,0 +1,36 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +echo "Building SIMAP Server..." +cd ~/tfs-ctrl/ +docker buildx build -t simap-server:mock -f ./src/tests/tools/simap_server/Dockerfile . + +echo "Building NCE-T Controller..." +cd ~/tfs-ctrl/ +docker buildx build -t nce-t-ctrl:mock -f ./src/tests/tools/mock_nce_t_ctrl/Dockerfile . + +echo "Cleaning up..." +docker rm --force simap-server +docker rm --force nce-t-ctrl + +echo "Deploying support services..." +docker run --detach --name simap-server --publish 8080:8080 simap-server:mock +docker run --detach --name nce-t-ctrl --publish 8081:8080 --env SIMAP_ADDRESS=172.17.0.1 --env SIMAP_PORT=8080 nce-t-ctrl:mock + +sleep 2 +docker ps -a + +echo "Bye!" diff --git a/src/tests/tools/mock_nce_t_ctrl/run_client.sh b/src/tests/tools/mock_nce_t_ctrl/run_client.sh new file mode 100755 index 000000000..fe5155b39 --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/run_client.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Make folder containing the script the root folder for its execution +cd $(dirname $0)/../../../ + +python -m tests.tools.mock_nce_t_ctrl.nce_t_client -- GitLab From 3b38d3462fdec9cd98a6532481e3ce08f27e26eb Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 17 Sep 2025 17:55:26 +0000 Subject: [PATCH 234/367] Common - Tools - Rest Conf - Added restconf version to RestConf client - Added method to register custom endpoints --- .../tools/rest_conf/client/RestConfClient.py | 3 +++ .../RestConfServerApplication.py | 17 ++++++++++++++++- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/src/common/tools/rest_conf/client/RestConfClient.py b/src/common/tools/rest_conf/client/RestConfClient.py index b11aa73de..a44200c0f 100644 --- a/src/common/tools/rest_conf/client/RestConfClient.py +++ b/src/common/tools/rest_conf/client/RestConfClient.py @@ -24,6 +24,7 @@ class RestConfClient(RestApiClient): def __init__( self, address : str, port : int = 8080, scheme : str = 'http', username : Optional[str] = None, password : Optional[str] = None, + restconf_version : Optional[str] = None, timeout : int = 30, verify_certs : bool = True, allow_redirects : bool = True, logger : Optional[logging.Logger] = None ) -> None: @@ -34,6 +35,8 @@ class RestConfClient(RestApiClient): ) self._discover_base_url() + if restconf_version is not None: + self._base_url += '/{:s}'.format(restconf_version) def _discover_base_url(self) -> None: diff --git a/src/common/tools/rest_conf/server/restconf_server/RestConfServerApplication.py b/src/common/tools/rest_conf/server/restconf_server/RestConfServerApplication.py index f07eae2db..677277fe3 100644 --- a/src/common/tools/rest_conf/server/restconf_server/RestConfServerApplication.py +++ b/src/common/tools/rest_conf/server/restconf_server/RestConfServerApplication.py @@ -14,8 +14,9 @@ import json, logging, time +from typing import Any, Dict, Tuple, Type from flask import Flask, request -from flask_restful import Api +from flask_restful import Api, Resource from .Callbacks import CallbackDispatcher from .Config import RESTCONF_PREFIX, SECRET_KEY, STARTUP_FILE, YANG_SEARCH_PATH from .DispatchData import RestConfDispatchData @@ -82,6 +83,20 @@ class RestConfServerApplication: resource_class_args=(self._yang_handler, self._callback_dispatcher) ) + def register_custom( + self, resource_class : Type[Resource], + *urls : str, add_prefix_to_urls : bool = True, + resource_class_args : Tuple[Any, ...] = tuple(), + resource_class_kwargs : Dict[str, Any] = dict() + ) -> None: + if add_prefix_to_urls: + urls = [RESTCONF_PREFIX + u for u in urls] + self._api.add_resource( + resource_class, *urls, + resource_class_args=resource_class_args, + resource_class_kwargs=resource_class_kwargs + ) + def get_flask_app(self) -> Flask: return self._app -- GitLab From 7e619115ce6d57c711284378c6d521b2d81d2f13 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 17 Sep 2025 17:57:31 +0000 Subject: [PATCH 235/367] Test - Tools - Mock NCE-T Ctrl - Client: set restconf version - Added mock endpoints for OSU Tunnels and EthT Services - Added creation/removal of SIMAP trans-otn - Added missing YANG data model for OTN Tunnel --- .../mock_nce_t_ctrl/nce_t_client/__main__.py | 2 +- .../nce_t_ctrl/ResourceEthServices.py | 80 ++ .../nce_t_ctrl/ResourceOsuTunnels.py | 85 ++ .../nce_t_ctrl/SimapUpdater.py | 54 + .../tools/mock_nce_t_ctrl/nce_t_ctrl/app.py | 42 +- .../ietf-otn-tunnel.yang | 1022 +++++++++++++++++ 6 files changed, 1282 insertions(+), 3 deletions(-) create mode 100644 src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/ResourceEthServices.py create mode 100644 src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/ResourceOsuTunnels.py create mode 100644 src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-ccamp-otn-tunnel-model-23/ietf-otn-tunnel.yang diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_client/__main__.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_client/__main__.py index ccda8f251..c4dc92bc5 100644 --- a/src/tests/tools/mock_nce_t_ctrl/nce_t_client/__main__.py +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_client/__main__.py @@ -26,7 +26,7 @@ LOGGER = logging.getLogger(__name__) def main() -> None: restconf_client = RestConfClient( - '172.17.0.1', port=8081, + '172.17.0.1', port=8081, restconf_version='v2', logger=logging.getLogger('RestConfClient') ) diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/ResourceEthServices.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/ResourceEthServices.py new file mode 100644 index 000000000..b7d41f41a --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/ResourceEthServices.py @@ -0,0 +1,80 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# REST-API resource implementing minimal support for "IETF YANG Data Model for Transport Network Client Signals". +# Ref: https://www.ietf.org/archive/id/draft-ietf-ccamp-client-signal-yang-10.html + +from flask import abort, jsonify, make_response, request +from flask_restful import Resource +from .SimapUpdater import SimapUpdater + +ETHT_SERVICES = {} + +class EthServices(Resource): + def __init__(self, simap_updater : SimapUpdater): + super().__init__() + self._simap_updater = simap_updater + + def get(self): + etht_services = [etht_service for etht_service in ETHT_SERVICES.values()] + data = {'ietf-eth-tran-service:etht-svc': {'etht-svc-instances': etht_services}} + return make_response(jsonify(data), 200) + + def post(self): + json_request = request.get_json() + if not json_request: abort(400) + if not isinstance(json_request, dict): abort(400) + if 'ietf-eth-tran-service:etht-svc' not in json_request: abort(400) + json_request = json_request['ietf-eth-tran-service:etht-svc'] + if 'etht-svc-instances' not in json_request: abort(400) + etht_services = json_request['etht-svc-instances'] + if not isinstance(etht_services, list): abort(400) + if len(etht_services) != 1: abort(400) + etht_service = etht_services[0] + etht_service_name = etht_service['etht-svc-name'] + ETHT_SERVICES[etht_service_name] = etht_service + self._simap_updater.create_simap_trans_otn(etht_service) + return make_response(jsonify({}), 201) + +class EthService(Resource): + def __init__(self, simap_updater : SimapUpdater): + super().__init__() + self._simap_updater = simap_updater + + def get(self, etht_service_name : str): + etht_service = ETHT_SERVICES.get(etht_service_name, None) + data,status = ({}, 404) if etht_service is None else (etht_service, 200) + return make_response(jsonify(data), status) + + def post(self, etht_service_name : str): + json_request = request.get_json() + if not json_request: abort(400) + if not isinstance(json_request, dict): abort(400) + if 'ietf-eth-tran-service:etht-svc' not in json_request: abort(400) + json_request = json_request['ietf-eth-tran-service:etht-svc'] + if 'etht-svc-instances' not in json_request: abort(400) + etht_services = json_request['etht-svc-instances'] + if not isinstance(etht_services, list): abort(400) + if len(etht_services) != 1: abort(400) + etht_service = etht_services[0] + assert etht_service_name == etht_service['etht-svc-name'] + ETHT_SERVICES[etht_service_name] = etht_service + self._simap_updater.create_simap_trans_otn(etht_service) + return make_response(jsonify({}), 201) + + def delete(self, etht_service_name : str): + etht_service = ETHT_SERVICES.pop(etht_service_name, None) + data,status = ({}, 404) if etht_service is None else (etht_service, 204) + self._simap_updater.delete_simap_trans_otn(etht_service_name) + return make_response(jsonify(data), status) diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/ResourceOsuTunnels.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/ResourceOsuTunnels.py new file mode 100644 index 000000000..7fe14208f --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/ResourceOsuTunnels.py @@ -0,0 +1,85 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# REST-API resource implementing minimal support for "IETF YANG Data Model for Traffic Engineering Tunnels, +# Label Switched Paths and Interfaces". +# Ref: https://www.ietf.org/archive/id/draft-ietf-teas-yang-te-34.html + + +from flask import abort, jsonify, make_response, request +from flask_restful import Resource +from .SimapUpdater import SimapUpdater + +OSU_TUNNELS = {} + +class OsuTunnels(Resource): + def __init__(self, simap_updater : SimapUpdater): + super().__init__() + self._simap_updater = simap_updater + + def get(self): + osu_tunnels = [osu_tunnel for osu_tunnel in OSU_TUNNELS.values()] + data = {'ietf-te:te': {'tunnels': {'tunnel': osu_tunnels}}} + return make_response(jsonify(data), 200) + + def post(self): + json_request = request.get_json() + if not json_request: abort(400) + if not isinstance(json_request, dict): abort(400) + if 'ietf-te:te' not in json_request: abort(400) + te_data = json_request['ietf-te:te'] + if not isinstance(te_data, dict): abort(400) + if 'tunnels' not in te_data: abort(400) + te_tunnels = te_data['tunnels'] + if 'tunnel' not in te_tunnels: abort(400) + osu_tunnels = te_tunnels['tunnel'] + if not isinstance(osu_tunnels, list): abort(400) + if len(osu_tunnels) != 1: abort(400) + osu_tunnel = osu_tunnels[0] + osu_tunnel_name = osu_tunnel['name'] + OSU_TUNNELS[osu_tunnel_name] = osu_tunnel + return make_response(jsonify({}), 201) + +class OsuTunnel(Resource): + def __init__(self, simap_updater : SimapUpdater): + super().__init__() + self._simap_updater = simap_updater + + def get(self, osu_tunnel_name : str): + osu_tunnel = OSU_TUNNELS.get(osu_tunnel_name, None) + data,status = ({}, 404) if osu_tunnel is None else (osu_tunnel, 200) + return make_response(jsonify(data), status) + + def post(self, osu_tunnel_name : str): + json_request = request.get_json() + if not json_request: abort(400) + if not isinstance(json_request, dict): abort(400) + if 'ietf-te:te' not in json_request: abort(400) + te_data = json_request['ietf-te:te'] + if not isinstance(te_data, dict): abort(400) + if 'tunnels' not in te_data: abort(400) + te_tunnels = te_data['tunnels'] + if 'tunnel' not in te_tunnels: abort(400) + osu_tunnels = te_tunnels['tunnel'] + if not isinstance(osu_tunnels, list): abort(400) + if len(osu_tunnels) != 1: abort(400) + osu_tunnel = osu_tunnels[0] + assert osu_tunnel_name == osu_tunnel['name'] + OSU_TUNNELS[osu_tunnel_name] = osu_tunnel + return make_response(jsonify({}), 201) + + def delete(self, osu_tunnel_name : str): + osu_tunnel = OSU_TUNNELS.pop(osu_tunnel_name, None) + data,status = ({}, 404) if osu_tunnel is None else (osu_tunnel, 204) + return make_response(jsonify(data), status) diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/SimapUpdater.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/SimapUpdater.py index cce0179c2..9f8e312c2 100644 --- a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/SimapUpdater.py +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/SimapUpdater.py @@ -66,3 +66,57 @@ class SimapUpdater: te_topo.link(link_id).create( link_src_node_id, link_src_tp_id, link_dst_node_id, link_dst_tp_id ) + + + def create_simap_trans_otn(self, etht_service : Dict) -> None: + #etht_svc_name = etht_service['etht-svc-name'] + + #src_node_ep = etht_service['source-endpoints']['source-endpoint'][0] + #src_node_id = src_node_ep['node-id'] + #src_tp_id = src_node_ep['tp-id'] + + #dst_node_ep = etht_service['destination-endpoints']['destination-endpoint'][0] + #dst_node_id = dst_node_ep['node-id'] + #dst_tp_id = dst_node_ep['tp-id'] + + simap = self._simap_client.network('trans-otn') + simap.update(supporting_network_ids=['admin']) + + node_a = simap.node('site1') + node_a.update(supporting_node_ids=[('admin', 'O-PE1')]) + node_a.termination_point('200').update(supporting_termination_point_ids=[('admin', 'O-PE1', '200')]) + node_a.termination_point('500').update(supporting_termination_point_ids=[('admin', 'O-PE1', '500')]) + node_a.termination_point('501').update(supporting_termination_point_ids=[('admin', 'O-PE1', '501')]) + + node_b = simap.node('site2') + node_b.update(supporting_node_ids=[('admin', 'O-PE2')]) + node_b.termination_point('200').update(supporting_termination_point_ids=[('admin', 'O-PE2', '200')]) + node_b.termination_point('500').update(supporting_termination_point_ids=[('admin', 'O-PE2', '500')]) + node_b.termination_point('501').update(supporting_termination_point_ids=[('admin', 'O-PE2', '501')]) + + link_ab = simap.link('Trans-L1ab') + link_ab.update( + 'site1', '500', 'site2', '500', + supporting_link_ids=[ + ('admin', 'L7ab'), ('admin', 'L11ab'), + ] + ) + + link_ba = simap.link('Trans-L1ba') + link_ba.update( + 'site2', '500', 'site1', '500', + supporting_link_ids=[ + ('admin', 'L11ba'), ('admin', 'L7ba'), + ] + ) + + + def delete_simap_trans_otn(self, etht_svc_name : str) -> None: + simap = self._simap_client.network('trans-otn') + simap.update(supporting_network_ids=['admin']) + + link_ab = simap.link('Trans-L1ab') + link_ab.delete() + + link_ba = simap.link('Trans-L1ba') + link_ba.delete() diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py index 74de70ee8..601350454 100644 --- a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/app.py @@ -15,10 +15,21 @@ # This file overwrites default RestConf Server `app.py` file. +# Mock IETF ACTN SDN controller +# ----------------------------- +# REST server implementing minimal support for: +# - IETF YANG Data Model for Transport Network Client Signals +# Ref: https://www.ietf.org/archive/id/draft-ietf-ccamp-client-signal-yang-10.html +# - IETF YANG Data Model for Traffic Engineering Tunnels, Label Switched Paths and Interfaces +# Ref: https://www.ietf.org/archive/id/draft-ietf-teas-yang-te-34.html + +# NOTE: we need here OSUflex tunnels that are still not standardized; hardcoded. import logging from common.tools.rest_conf.server.restconf_server.RestConfServerApplication import RestConfServerApplication from .Callbacks import CallbackEthTService, CallbackOsuTunnel +from .ResourceEthServices import EthService, EthServices +from .ResourceOsuTunnels import OsuTunnel, OsuTunnels from .SimapUpdater import SimapUpdater @@ -31,10 +42,38 @@ logging.getLogger('RestConfClient').setLevel(logging.WARN) LOGGER.info('Starting...') -rcs_app = RestConfServerApplication() +simap_updater = SimapUpdater() + +rcs_app = RestConfServerApplication() rcs_app.register_host_meta() rcs_app.register_restconf() + +rcs_app.register_custom( + OsuTunnels, + '/restconf/v2/data/ietf-te:te/tunnels', + add_prefix_to_urls=False, + resource_class_args=(simap_updater,) +) +rcs_app.register_custom( + OsuTunnel, + '/restconf/v2/data/ietf-te:te/tunnels/tunnel=', + add_prefix_to_urls=False, + resource_class_args=(simap_updater,) +) +rcs_app.register_custom( + EthServices, + '/restconf/v2/data/ietf-eth-tran-service:etht-svc', + add_prefix_to_urls=False, + resource_class_args=(simap_updater,) +) +rcs_app.register_custom( + EthService, + '/restconf/v2/data/ietf-eth-tran-service:etht-svc/etht-svc-instances=', + add_prefix_to_urls=False, + resource_class_args=(simap_updater,) +) + LOGGER.info('All connectors registered') startup_data = rcs_app.get_startup_data() @@ -42,7 +81,6 @@ startup_data = rcs_app.get_startup_data() networks = startup_data.get('ietf-network:networks', dict()) networks = networks.get('network', list()) if len(networks) == 1 and networks[0]['network-id'] == 'admin': - simap_updater = SimapUpdater() simap_updater.upload_topology(networks[0]) rcs_app.callback_dispatcher.register(CallbackOsuTunnel()) diff --git a/src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-ccamp-otn-tunnel-model-23/ietf-otn-tunnel.yang b/src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-ccamp-otn-tunnel-model-23/ietf-otn-tunnel.yang new file mode 100644 index 000000000..aa9e1d5e5 --- /dev/null +++ b/src/tests/tools/mock_nce_t_ctrl/yang/draft-ietf-ccamp-otn-tunnel-model-23/ietf-otn-tunnel.yang @@ -0,0 +1,1022 @@ +module ietf-otn-tunnel { + yang-version 1.1; + namespace "urn:ietf:params:xml:ns:yang:ietf-otn-tunnel"; + prefix "otn-tnl"; + + import ietf-te { + prefix "te"; + reference + "RFC ZZZZ: A YANG Data Model for Traffic Engineering Tunnels + and Interfaces."; + } + /* Note: The RFC Editor will replace ZZZZ with the number assigned + to the RFC once draft-ietf-teas-yang-te becomes an RFC.*/ + + import ietf-layer1-types { + prefix "l1-types"; + reference + "RFC YYYY: Common YANG Data Types for Layer 1 Networks."; + } + /* Note: The RFC Editor will replace YYYY with the number assigned + to the RFC once draft-ietf-ccamp-layer1-types becomes an RFC.*/ + + organization + "IETF CCAMP Working Group"; + contact + "WG Web: + WG List: + + Editor: Haomian Zheng + + + Editor: Italo Busi + + + Editor: Sergio Belotti + + + Editor: Victor Lopez + + + Editor: Yunbin Xu + "; + + description + "This module defines a model for OTN Tunnel Services. + + The model fully conforms to the Network Management + Datastore Architecture (NMDA). + + Copyright (c) 2024 IETF Trust and the persons + identified as authors of the code. All rights reserved. + + Redistribution and use in source and binary forms, with or + without modification, is permitted pursuant to, and subject + to the license terms contained in, the Revised BSD License + set forth in Section 4.c of the IETF Trust's Legal Provisions + Relating to IETF Documents + (https://trustee.ietf.org/license-info). + + This version of this YANG module is part of RFC XXXX; see + the RFC itself for full legal notices."; + + revision "2024-03-21" { + description + "Initial version"; + reference + "RFC XXXX: A YANG Data Model for Optical Transport Network + (OTN) Tunnels and Label Switched Paths"; + // RFC Ed.: replace XXXX with actual RFC number, update date + // information and remove this note + } + + /* + * Data nodes + */ + + /* + * Augment TE bandwidth + */ + + augment "/te:te/te:globals/te:named-path-constraints/" + + "te:named-path-constraint/" + + "te:te-bandwidth/te:technology" { + description + "Augment TE bandwidth of the named path constraint."; + case otn { + uses l1-types:otn-path-bandwidth; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:te-bandwidth/te:technology" { + description + "Augment TE bandwidth of the tunnel."; + case otn { + uses l1-types:otn-path-bandwidth; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:primary-paths/te:primary-path/" + + "te:te-bandwidth/te:technology" { + description + "Augment TE bandwidth of the primary path."; + case otn { + uses l1-types:otn-path-bandwidth; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:primary-paths/te:primary-path/" + + "te:computed-paths-properties/" + + "te:computed-path-properties/te:path-properties/" + + "te:te-bandwidth/te:technology" { + description + "Augment TE bandwidth of primary path's computed path + properties."; + case otn { + uses l1-types:otn-path-bandwidth; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:primary-paths/te:primary-path/" + + "te:primary-reverse-path/" + + "te:te-bandwidth/te:technology" { + description + "Augment TE bandwidth of the primary reverse path."; + case otn { + uses l1-types:otn-path-bandwidth; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:primary-paths/te:primary-path/" + + "te:primary-reverse-path/" + + "te:computed-paths-properties/" + + "te:computed-path-properties/te:path-properties/" + + "te:te-bandwidth/te:technology" { + description + "Augment TE bandwidth of the primary reverse path's computed + path properties."; + case otn { + uses l1-types:otn-path-bandwidth; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:secondary-paths/te:secondary-path/" + + "te:te-bandwidth/te:technology" { + description + "Augment TE bandwidth of the secondary path."; + case otn { + uses l1-types:otn-path-bandwidth; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:secondary-paths/te:secondary-path/" + + "te:computed-paths-properties/" + + "te:computed-path-properties/te:path-properties/" + + "te:te-bandwidth/te:technology" { + description + "Augment TE bandwidth of the secondary path's computed path + properties."; + case otn { + uses l1-types:otn-path-bandwidth; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:secondary-reverse-paths/" + + "te:secondary-reverse-path/" + + "te:te-bandwidth/te:technology" { + description + "Augment TE bandwidth of the secondary reverse path."; + case otn { + uses l1-types:otn-path-bandwidth; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:secondary-reverse-paths/" + + "te:secondary-reverse-path/" + + "te:computed-paths-properties/" + + "te:computed-path-properties/te:path-properties/" + + "te:te-bandwidth/te:technology" { + description + "Augment TE bandwidth of the secondary reverse path's computed + path properties."; + case otn { + uses l1-types:otn-path-bandwidth; + } + } + + /* + * Augment TE label range information + */ + + augment "/te:te/te:globals/te:named-path-constraints/" + + "te:named-path-constraint/te:path-in-segment/" + + "te:label-restrictions/te:label-restriction" { + description + "Augment TE label range information for the ingress segment + of the named path constraint."; + uses l1-types:otn-label-range-info; + } + + augment "/te:te/te:globals/te:named-path-constraints/" + + "te:named-path-constraint/te:path-out-segment/" + + "te:label-restrictions/" + + "te:label-restriction" { + description + "Augment TE label range information for the egress segment + of the named path constraint."; + uses l1-types:otn-label-range-info; + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:primary-paths/te:primary-path/" + + "te:path-in-segment/te:label-restrictions/" + + "te:label-restriction" { + description + "Augment TE label range information for the ingress segment + of the primay path."; + uses l1-types:otn-label-range-info; + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:primary-paths/te:primary-path/" + + "te:path-out-segment/te:label-restrictions/" + + "te:label-restriction" { + description + "Augment TE label range information for the egress segment + of the primay path."; + uses l1-types:otn-label-range-info; + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:primary-paths/te:primary-path/" + + "te:primary-reverse-path/" + + "te:path-in-segment/te:label-restrictions/" + + "te:label-restriction" { + description + "Augment TE label range information for the ingress segment + of the primay reverse path."; + uses l1-types:otn-label-range-info; + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:primary-paths/te:primary-path/" + + "te:primary-reverse-path/" + + "te:path-out-segment/te:label-restrictions/" + + "te:label-restriction" { + description + "Augment TE label range information for the egress segment + of the primay reverse path."; + uses l1-types:otn-label-range-info; + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:secondary-paths/te:secondary-path/" + + "te:path-in-segment/te:label-restrictions/" + + "te:label-restriction" { + description + "Augment TE label range information for the ingress segment + of the secondary path."; + uses l1-types:otn-label-range-info; + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:secondary-paths/te:secondary-path/" + + "te:path-out-segment/te:label-restrictions/" + + "te:label-restriction" { + description + "Augment TE label range information for the egress segment + of the secondary path."; + uses l1-types:otn-label-range-info; + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:secondary-reverse-paths/te:secondary-reverse-path/" + + "te:path-in-segment/te:label-restrictions/" + + "te:label-restriction" { + description + "Augment TE label range information for the ingress segment + of the secondary reverse path."; + uses l1-types:otn-label-range-info; + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:secondary-reverse-paths/te:secondary-reverse-path/" + + "te:path-out-segment/te:label-restrictions/" + + "te:label-restriction" { + description + "Augment TE label range information for the egress segment + of the secondary reverse path."; + uses l1-types:otn-label-range-info; + } + + /* + * Augment TE label. + */ + + augment "/te:te/te:globals/te:named-path-constraints/" + + "te:named-path-constraint/" + + "te:explicit-route-objects/" + + "te:route-object-exclude-always/te:type/te:label/" + + "te:label-hop/te:te-label/te:technology" { + description + "Augment TE label hop for the explicit route objects always + excluded by the path computation with the named path + constraint."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/te:te/te:globals/te:named-path-constraints/" + + "te:named-path-constraint/" + + "te:explicit-route-objects/" + + "te:route-object-include-exclude/te:type/te:label/" + + "te:label-hop/te:te-label/te:technology" { + description + "Augment TE label hop for the explicit route objects included + or excluded by the path computation with the named path + constraint."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/te:te/te:globals/te:named-path-constraints/" + + "te:named-path-constraint/te:path-in-segment/" + + "te:label-restrictions/" + + "te:label-restriction/te:label-start/" + + "te:te-label/te:technology" { + description + "Augment TE label range start for the ingress segment + of the named path constraint."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/te:te/te:globals/te:named-path-constraints/" + + "te:named-path-constraint/te:path-in-segment/" + + "te:label-restrictions/" + + "te:label-restriction/te:label-end/" + + "te:te-label/te:technology" { + description + "Augment TE label range end for the ingress segment + of the named path constraint."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/te:te/te:globals/te:named-path-constraints/" + + "te:named-path-constraint/te:path-in-segment/" + + "te:label-restrictions/te:label-restriction/" + + "te:label-step/te:technology" { + description + "Augment TE label range step for the ingress segment + of the named path constraint."; + case otn { + uses l1-types:otn-label-step; + } + } + + augment "/te:te/te:globals/te:named-path-constraints/" + + "te:named-path-constraint/te:path-out-segment/" + + "te:label-restrictions/" + + "te:label-restriction/te:label-start/" + + "te:te-label/te:technology" { + description + "Augment TE label range start for the egress segment + of the named path constraint."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/te:te/te:globals/te:named-path-constraints/" + + "te:named-path-constraint/te:path-out-segment/" + + "te:label-restrictions/" + + "te:label-restriction/te:label-end/" + + "te:te-label/te:technology" { + description + "Augment TE label range end for the egress segment + of the named path constraint."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/te:te/te:globals/te:named-path-constraints/" + + "te:named-path-constraint/te:path-out-segment/" + + "te:label-restrictions/te:label-restriction/" + + "te:label-step/te:technology" { + description + "Augment TE label range step for the egress segment + of the named path constraint."; + case otn { + uses l1-types:otn-label-step; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:primary-paths/te:primary-path/" + + "te:optimizations/te:algorithm/te:metric/" + + "te:optimization-metric/te:explicit-route-exclude-objects/" + + "te:route-object-exclude-object/te:type/te:label/" + + "te:label-hop/te:te-label/te:technology" { + description + "Augment TE label hop for the optimization of the explicit + route objects excluded by the path computation of the primary + path."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:primary-paths/te:primary-path/" + + "te:optimizations/te:algorithm/te:metric/" + + "te:optimization-metric/te:explicit-route-include-objects/" + + "te:route-object-include-object/te:type/te:label/" + + "te:label-hop/te:te-label/te:technology" { + description + "Augment TE label hop for the optimization of the explicit + route objects included by the path computation of the primary + path."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:primary-paths/te:primary-path/" + + "te:explicit-route-objects/" + + "te:route-object-exclude-always/te:type/te:label/" + + "te:label-hop/te:te-label/te:technology" { + description + "Augment TE label hop for the explicit route objects always + excluded by the path computation of the primary path."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:primary-paths/te:primary-path/" + + "te:explicit-route-objects/" + + "te:route-object-include-exclude/te:type/te:label/" + + "te:label-hop/te:te-label/te:technology" { + description + "Augment TE label hop for the explicit route objects included + or excluded by the path computation of the primary path."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:primary-paths/te:primary-path/" + + "te:path-in-segment/te:label-restrictions/" + + "te:label-restriction/te:label-start/" + + "te:te-label/te:technology" { + description + "Augment TE label range start for the ingress segment + of the primay path."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:primary-paths/te:primary-path/" + + "te:path-in-segment/te:label-restrictions/" + + "te:label-restriction/te:label-end/" + + "te:te-label/te:technology" { + description + "Augment TE label range end for the ingress segment + of the primay path."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:primary-paths/te:primary-path/" + + "te:path-in-segment/te:label-restrictions/" + + "te:label-restriction/te:label-step/te:technology" { + description + "Augment TE label range step for the ingress segment + of the primay path."; + case otn { + uses l1-types:otn-label-step; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:primary-paths/te:primary-path/" + + "te:path-out-segment/te:label-restrictions/" + + "te:label-restriction/te:label-start/" + + "te:te-label/te:technology" { + description + "Augment TE label range start for the egress segment + of the primay path."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:primary-paths/te:primary-path/" + + "te:path-out-segment/te:label-restrictions/" + + "te:label-restriction/te:label-end/" + + "te:te-label/te:technology" { + description + "Augment TE label range end for the egress segment + of the primay path."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:primary-paths/te:primary-path/" + + "te:path-out-segment/te:label-restrictions/" + + "te:label-restriction/te:label-step/te:technology" { + description + "Augment TE label range end for the egress segment + of the primay path."; + case otn { + uses l1-types:otn-label-step; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:primary-paths/te:primary-path/" + + "te:computed-paths-properties/" + + "te:computed-path-properties/te:path-properties/" + + "te:path-route-objects/te:path-route-object/" + + "te:type/te:label/" + + "te:label-hop/te:te-label/te:technology" { + description + "Augment TE label hop for the route object of the computed + primary path."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:primary-paths/te:primary-path/" + + "te:primary-reverse-path/" + + "te:optimizations/te:algorithm/te:metric/" + + "te:optimization-metric/te:explicit-route-exclude-objects/" + + "te:route-object-exclude-object/te:type/te:label/" + + "te:label-hop/te:te-label/te:technology" { + description + "Augment TE label hop for the optimization of the explicit + route objects excluded by the path computation of the primary + reverse path."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:primary-paths/te:primary-path/" + + "te:primary-reverse-path/" + + "te:optimizations/te:algorithm/te:metric/" + + "te:optimization-metric/te:explicit-route-include-objects/" + + "te:route-object-include-object/te:type/te:label/" + + "te:label-hop/te:te-label/te:technology" { + description + "Augment TE label hop for the optimization of the explicit + route objects included by the path computation of the primary + reverse path."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:primary-paths/te:primary-path/" + + "te:primary-reverse-path/" + + "te:explicit-route-objects/" + + "te:route-object-exclude-always/" + + "te:type/te:label/" + + "te:label-hop/te:te-label/te:technology" { + description + "Augment TE label hop for the explicit route objects always + excluded by the path computation of the primary reverse + path."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:primary-paths/te:primary-path/" + + "te:primary-reverse-path/" + + "te:explicit-route-objects/" + + "te:route-object-include-exclude/" + + "te:type/te:label/" + + "te:label-hop/te:te-label/te:technology" { + description + "Augment TE label hop for the explicit route objects included + or excluded by the path computation of the primary reverse + path."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:primary-paths/te:primary-path/" + + "te:primary-reverse-path/" + + "te:path-in-segment/te:label-restrictions/" + + "te:label-restriction/te:label-start/" + + "te:te-label/te:technology" { + description + "Augment TE label range start for the ingress segment + of the primay reverse path."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:primary-paths/te:primary-path/" + + "te:primary-reverse-path/" + + "te:path-in-segment/te:label-restrictions/" + + "te:label-restriction/te:label-end/" + + "te:te-label/te:technology" { + description + "Augment TE label range end for the ingress segment + of the primay reverse path."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:primary-paths/te:primary-path/" + + "te:primary-reverse-path/" + + "te:path-in-segment/te:label-restrictions/" + + "te:label-restriction/te:label-step/te:technology" { + description + "Augment TE label range step for the ingress segment + of the primay reverse path."; + case otn { + uses l1-types:otn-label-step; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:primary-paths/te:primary-path/" + + "te:primary-reverse-path/" + + "te:path-out-segment/te:label-restrictions/" + + "te:label-restriction/te:label-start/" + + "te:te-label/te:technology" { + description + "Augment TE label range start for the egress segment + of the primay reverse path."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:primary-paths/te:primary-path/" + + "te:primary-reverse-path/" + + "te:path-out-segment/te:label-restrictions/" + + "te:label-restriction/te:label-end/" + + "te:te-label/te:technology" { + description + "Augment TE label range end for the egress segment + of the primay reverse path."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:primary-paths/te:primary-path/" + + "te:primary-reverse-path/" + + "te:path-out-segment/te:label-restrictions/" + + "te:label-restriction/te:label-step/te:technology" { + description + "Augment TE label range step for the egress segment + of the primay reverse path."; + case otn { + uses l1-types:otn-label-step; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:primary-paths/te:primary-path/" + + "te:primary-reverse-path/" + + "te:computed-paths-properties/te:computed-path-properties/" + + "te:path-properties/te:path-route-objects/" + + "te:path-route-object/te:type/te:label/" + + "te:label-hop/te:te-label/te:technology" { + description + "Augment TE label hop for the route object of the computed + primary reverse path."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:secondary-paths/te:secondary-path/" + + "te:optimizations/te:algorithm/te:metric/" + + "te:optimization-metric/te:explicit-route-exclude-objects/" + + "te:route-object-exclude-object/te:type/te:label/" + + "te:label-hop/te:te-label/te:technology" { + description + "Augment TE label hop for the optimization of the explicit + route objects excluded by the path computation of the + secondary path."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:secondary-paths/te:secondary-path/" + + "te:optimizations/te:algorithm/te:metric/" + + "te:optimization-metric/te:explicit-route-include-objects/" + + "te:route-object-include-object/te:type/te:label/" + + "te:label-hop/te:te-label/te:technology" { + description + "Augment TE label hop for the optimization of the explicit + route objects included by the path computation of the + secondary path."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:secondary-paths/te:secondary-path/" + + "te:explicit-route-objects/" + + "te:route-object-exclude-always/te:type/te:label/" + + "te:label-hop/te:te-label/te:technology" { + description + "Augment TE label hop for the explicit route objects always + excluded by the path computation of the secondary path."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:secondary-paths/te:secondary-path/" + + "te:explicit-route-objects/" + + "te:route-object-include-exclude/te:type/te:label/" + + "te:label-hop/te:te-label/te:technology" { + description + "Augment TE label hop for the explicit route objects included + or excluded by the path computation of the secondary path."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:secondary-paths/te:secondary-path/" + + "te:path-in-segment/te:label-restrictions/" + + "te:label-restriction/te:label-start/" + + "te:te-label/te:technology" { + description + "Augment TE label range start for the ingress segment + of the secondary path."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:secondary-paths/te:secondary-path/" + + "te:path-in-segment/te:label-restrictions/" + + "te:label-restriction/te:label-end/" + + "te:te-label/te:technology" { + description + "Augment TE label range end for the ingress segment + of the secondary path."; + case otn { + uses l1-types:otn-label-start-end; + } + } + augment "/te:te/te:tunnels/te:tunnel/" + + "te:secondary-paths/te:secondary-path/" + + "te:path-in-segment/te:label-restrictions/" + + "te:label-restriction/te:label-step/te:technology" { + description + "Augment TE label range step for the ingress segment + of the secondary path."; + case otn { + uses l1-types:otn-label-step; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:secondary-paths/te:secondary-path/" + + "te:path-out-segment/te:label-restrictions/" + + "te:label-restriction/te:label-start/" + + "te:te-label/te:technology" { + description + "Augment TE label range start for the egress segment + of the secondary path."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:secondary-paths/te:secondary-path/" + + "te:path-out-segment/te:label-restrictions/" + + "te:label-restriction/te:label-end/" + + "te:te-label/te:technology" { + description + "Augment TE label range end for the egress segment + of the secondary path."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:secondary-paths/te:secondary-path/" + + "te:path-out-segment/te:label-restrictions/" + + "te:label-restriction/te:label-step/te:technology" { + description + "Augment TE label range step for the egress segment + of the secondary path."; + case otn { + uses l1-types:otn-label-step; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:secondary-paths/te:secondary-path/" + + "te:computed-paths-properties/" + + "te:computed-path-properties/" + + "te:path-properties/te:path-route-objects/" + + "te:path-route-object/te:type/te:label/" + + "te:label-hop/te:te-label/te:technology" { + description + "Augment TE label hop for the route object of the computed + secondary path."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:secondary-reverse-paths/te:secondary-reverse-path/" + + "te:optimizations/te:algorithm/te:metric/" + + "te:optimization-metric/te:explicit-route-exclude-objects/" + + "te:route-object-exclude-object/te:type/te:label/" + + "te:label-hop/te:te-label/te:technology" { + description + "Augment TE label hop for the optimization of the explicit + route objects excluded by the path computation of the + secondary reverse path."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:secondary-reverse-paths/te:secondary-reverse-path/" + + "te:optimizations/te:algorithm/te:metric/" + + "te:optimization-metric/te:explicit-route-include-objects/" + + "te:route-object-include-object/te:type/te:label/" + + "te:label-hop/te:te-label/te:technology" { + description + "Augment TE label hop for the optimization of the explicit + route objects included by the path computation of the + secondary reverse path."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:secondary-reverse-paths/te:secondary-reverse-path/" + + "te:explicit-route-objects/" + + "te:route-object-exclude-always/te:type/te:label/" + + "te:label-hop/te:te-label/te:technology" { + description + "Augment TE label hop for the explicit route objects always + excluded by the path computation of the secondary reverse + path."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:secondary-reverse-paths/te:secondary-reverse-path/" + + "te:explicit-route-objects/" + + "te:route-object-include-exclude/te:type/te:label/" + + "te:label-hop/te:te-label/te:technology" { + description + "Augment TE label hop for the explicit route objects included + or excluded by the path computation of the secondary reverse + path."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:secondary-reverse-paths/te:secondary-reverse-path/" + + "te:path-in-segment/te:label-restrictions/" + + "te:label-restriction/te:label-start/" + + "te:te-label/te:technology" { + description + "Augment TE label range start for the ingress segment + of the secondary reverse path."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:secondary-reverse-paths/te:secondary-reverse-path/" + + "te:path-in-segment/te:label-restrictions/" + + "te:label-restriction/te:label-end/" + + "te:te-label/te:technology" { + description + "Augment TE label range end for the ingress segment + of the secondary reverse path."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:secondary-reverse-paths/te:secondary-reverse-path/" + + "te:path-in-segment/te:label-restrictions/" + + "te:label-restriction/te:label-step/te:technology" { + description + "Augment TE label range step for the ingress segment + of the secondary reverse path."; + case otn { + uses l1-types:otn-label-step; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:secondary-reverse-paths/te:secondary-reverse-path/" + + "te:path-out-segment/te:label-restrictions/" + + "te:label-restriction/te:label-start/" + + "te:te-label/te:technology" { + description + "Augment TE label range start for the egress segment + of the secondary reverse path."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:secondary-reverse-paths/te:secondary-reverse-path/" + + "te:path-out-segment/te:label-restrictions/" + + "te:label-restriction/te:label-end/" + + "te:te-label/te:technology" { + description + "Augment TE label range end for the egress segment + of the secondary reverse path."; + case otn { + uses l1-types:otn-label-start-end; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:secondary-reverse-paths/te:secondary-reverse-path/" + + "te:path-out-segment/te:label-restrictions/" + + "te:label-restriction/te:label-step/te:technology" { + description + "Augment TE label range step for the egress segment + of the secondary reverse path."; + case otn { + uses l1-types:otn-label-step; + } + } + + augment "/te:te/te:tunnels/te:tunnel/" + + "te:secondary-reverse-paths/te:secondary-reverse-path/" + + "te:computed-paths-properties/" + + "te:computed-path-properties/" + + "te:path-properties/te:path-route-objects/" + + "te:path-route-object/te:type/te:label/" + + "te:label-hop/te:te-label/te:technology" { + description + "Augment TE label hop for the route object of the computed + secondary reverse path."; + case otn { + uses l1-types:otn-label-hop; + } + } + + augment "/te:te/te:lsps/" + + "te:lsp/te:lsp-actual-route-information/" + + "te:lsp-actual-route-information/te:type/te:label/" + + "te:label-hop/te:te-label/te:technology" { + description + "Augment TE label hop for the record route of the LSP."; + case otn { + uses l1-types:otn-label-hop; + } + } +} \ No newline at end of file -- GitLab From c85fce1aa612e49c686caeeb140d3023e6be9388 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 17 Sep 2025 18:16:41 +0000 Subject: [PATCH 236/367] Device component - IETF ACTN / NCE drivers: - Fixed device type inference --- .../drivers/ietf_actn/handlers/NetworkTopologyHandler.py | 2 +- .../service/drivers/nce/handlers/NetworkTopologyHandler.py | 7 ++++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/device/service/drivers/ietf_actn/handlers/NetworkTopologyHandler.py b/src/device/service/drivers/ietf_actn/handlers/NetworkTopologyHandler.py index ee7efdf7a..691d4b01e 100644 --- a/src/device/service/drivers/ietf_actn/handlers/NetworkTopologyHandler.py +++ b/src/device/service/drivers/ietf_actn/handlers/NetworkTopologyHandler.py @@ -80,7 +80,7 @@ class NetworkTopologyHandler: if 'ietf-te-topology:te-topology' in nnt: nnt_tet = nnt['ietf-te-topology:te-topology'] if 'ietf-otn-topology:otn-topology' in nnt_tet: - device_type = DeviceTypeEnum.EMULATED_OPTICAL_ROADM.value + device_type = DeviceTypeEnum.OPTICAL_FGOTN.value endpoint_type = 'optical' elif 'ietf-eth-te-topology:eth-tran-topology' in nnt_tet: device_type = DeviceTypeEnum.EMULATED_PACKET_SWITCH.value diff --git a/src/device/service/drivers/nce/handlers/NetworkTopologyHandler.py b/src/device/service/drivers/nce/handlers/NetworkTopologyHandler.py index 15c87a19d..6e8d2e555 100644 --- a/src/device/service/drivers/nce/handlers/NetworkTopologyHandler.py +++ b/src/device/service/drivers/nce/handlers/NetworkTopologyHandler.py @@ -80,7 +80,7 @@ class NetworkTopologyHandler: if 'ietf-te-topology:te-topology' in nnt: nnt_tet = nnt['ietf-te-topology:te-topology'] if 'ietf-otn-topology:otn-topology' in nnt_tet: - device_type = DeviceTypeEnum.EMULATED_OPTICAL_ROADM.value + device_type = DeviceTypeEnum.OPTICAL_OLT.value endpoint_type = 'optical' elif 'ietf-eth-te-topology:eth-tran-topology' in nnt_tet: device_type = DeviceTypeEnum.EMULATED_PACKET_SWITCH.value @@ -105,6 +105,11 @@ class NetworkTopologyHandler: if 'name' in ntea: node_name = ntea['name'] + if 'OLT' in node_id: + device_type = DeviceTypeEnum.OPTICAL_OLT.value + elif 'ONT' in node_id: + device_type = DeviceTypeEnum.OPTICAL_ONT.value + device_url = '/devices/device[{:s}]'.format(node_id) device_data = { 'uuid': node_id, -- GitLab From 2b6dc28825e99c4d74c9a48e9121dd00c2efd6b6 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 17 Sep 2025 18:17:24 +0000 Subject: [PATCH 237/367] ECOC F5GA Telemetry Demo: - Updated device types in descriptors --- .../ecoc25-f5ga-telemetry/data/topology/topology-agg.json | 4 ++-- .../ecoc25-f5ga-telemetry/data/topology/topology-ip.json | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json index eace7a399..7231696d6 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json @@ -27,7 +27,7 @@ "timeout": 120, "verify_certs": false, "import_topology": "topology" }}} ]}}, - {"device_id": {"device_uuid": {"uuid": "POP1"}}, "device_type": "emu-packet-router", + {"device_id": {"device_uuid": {"uuid": "POP1"}}, "device_type": "packet-pop", "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.16.204.220"}}, @@ -54,7 +54,7 @@ "site_location": "transport", "mtu": "1500" }}} ]}}, - {"device_id": {"device_uuid": {"uuid": "POP2"}}, "device_type": "emu-packet-router", + {"device_id": {"device_uuid": {"uuid": "POP2"}}, "device_type": "packet-pop", "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.16.204.221"}}, diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-ip.json b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-ip.json index cf91710a3..d4b72d428 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-ip.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-ip.json @@ -8,7 +8,7 @@ ], "devices": [ { - "device_id": {"device_uuid": {"uuid": "P-PE1"}}, "device_type": "emu-packet-router", + "device_id": {"device_uuid": {"uuid": "P-PE1"}}, "device_type": "packet-router", "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.16.122.25"}}, @@ -37,7 +37,7 @@ ]} }, { - "device_id": {"device_uuid": {"uuid": "P-P1"}}, "device_type": "emu-packet-router", + "device_id": {"device_uuid": {"uuid": "P-P1"}}, "device_type": "packet-router", "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.16.125.31"}}, @@ -60,7 +60,7 @@ ]} }, { - "device_id": {"device_uuid": {"uuid": "P-P2"}}, "device_type": "emu-packet-router", + "device_id": {"device_uuid": {"uuid": "P-P2"}}, "device_type": "packet-router", "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.16.125.33"}}, @@ -83,7 +83,7 @@ ]} }, { - "device_id": {"device_uuid": {"uuid": "P-PE2"}}, "device_type": "emu-packet-router", + "device_id": {"device_uuid": {"uuid": "P-PE2"}}, "device_type": "packet-router", "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.16.125.32"}}, -- GitLab From 17d4d12368c47867c1fad50c4e2dbab33a1eca71 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Wed, 17 Sep 2025 18:45:05 +0000 Subject: [PATCH 238/367] Device component: - Fix driver selection logic --- src/device/service/drivers/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/device/service/drivers/__init__.py b/src/device/service/drivers/__init__.py index 395452bde..40bb9176b 100644 --- a/src/device/service/drivers/__init__.py +++ b/src/device/service/drivers/__init__.py @@ -41,7 +41,8 @@ DRIVERS.append( #DeviceTypeEnum.OPTICAL_ROADM, #DeviceTypeEnum.OPTICAL_TRANSPONDER, #DeviceTypeEnum.P4_SWITCH, - #DeviceTypeEnum.PACKET_ROUTER, + DeviceTypeEnum.PACKET_ROUTER, + DeviceTypeEnum.PACKET_POP, #DeviceTypeEnum.PACKET_SWITCH, ], FilterFieldEnum.DRIVER: [ -- GitLab From 250b1064ae9442df948371c4f47cc17570952603 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 18 Sep 2025 08:43:08 +0000 Subject: [PATCH 239/367] Device component: - Improved logging in DriverFactory --- src/device/service/driver_api/DriverFactory.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/device/service/driver_api/DriverFactory.py b/src/device/service/driver_api/DriverFactory.py index 72583069b..8f3d90cdb 100644 --- a/src/device/service/driver_api/DriverFactory.py +++ b/src/device/service/driver_api/DriverFactory.py @@ -25,7 +25,8 @@ LOGGER = logging.getLogger(__name__) class DriverFactory: def __init__(self, drivers : List[Tuple[type, List[Dict[FilterFieldEnum, Any]]]]) -> None: - self.__indices : Dict[str, Dict[str, Set[_Driver]]] = {} # Dict{field_name => Dict{field_value => Set{Driver}}} + # Dict{field_name => Dict{field_value => Set{Driver}}} + self.__indices : Dict[str, Dict[str, Set[_Driver]]] = dict() for driver_class,filter_field_sets in drivers: for filter_fields in filter_field_sets: @@ -85,4 +86,8 @@ class DriverFactory: if len(candidate_driver_classes) == 0: raise UnsatisfiedFilterException(filter_fields) candidate_driver_classes = sorted(candidate_driver_classes.items(), key=operator.itemgetter(1), reverse=True) + + MSG = '[get_driver_class] candidate_driver_classes={:s}' + LOGGER.debug(MSG.format(str(candidate_driver_classes))) + return candidate_driver_classes[0][0] -- GitLab From fe7a440c53528a0306f128185071e58d5065fc44 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 18 Sep 2025 12:27:35 +0000 Subject: [PATCH 240/367] Device component: - Fixed Driver selection logic. Now selection is strict (all filter fields from device should match those from driver, except those the driver explicitly keeps open) --- .../service/driver_api/DriverFactory.py | 163 +++++++++++------- src/device/service/driver_api/Exceptions.py | 15 ++ src/device/service/drivers/__init__.py | 136 ++++++--------- 3 files changed, 162 insertions(+), 152 deletions(-) diff --git a/src/device/service/driver_api/DriverFactory.py b/src/device/service/driver_api/DriverFactory.py index 8f3d90cdb..38ae0ac56 100644 --- a/src/device/service/driver_api/DriverFactory.py +++ b/src/device/service/driver_api/DriverFactory.py @@ -12,82 +12,111 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging, operator +import logging from enum import Enum -from typing import Any, Dict, Iterable, List, Set, Tuple +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Type from ._Driver import _Driver from .Exceptions import ( - UnsatisfiedFilterException, UnsupportedDriverClassException, UnsupportedFilterFieldException, - UnsupportedFilterFieldValueException) + AmbiguousFilterException, EmptyFilterFieldException, + UnsatisfiedFilterException, UnsupportedDriverClassException, + UnsupportedFilterFieldException, UnsupportedFilterFieldValueException +) from .FilterFields import FILTER_FIELD_ALLOWED_VALUES, FilterFieldEnum + LOGGER = logging.getLogger(__name__) +SUPPORTED_FILTER_FIELDS = set(FILTER_FIELD_ALLOWED_VALUES.keys()) + + +def sanitize_filter_fields( + filter_fields : Dict[FilterFieldEnum, Any], driver_name : Optional[str] = None +) -> Dict[FilterFieldEnum, Any]: + if len(filter_fields) == 0: + raise EmptyFilterFieldException( + filter_fields, driver_class_name=driver_name + ) + + unsupported_filter_fields = set(filter_fields.keys()).difference(SUPPORTED_FILTER_FIELDS) + if len(unsupported_filter_fields) > 0: + raise UnsupportedFilterFieldException( + unsupported_filter_fields, driver_class_name=driver_name + ) + + sanitized_filter_fields : Dict[FilterFieldEnum, Set[Any]] = dict() + for field_name, field_values in filter_fields.items(): + field_enum_values = FILTER_FIELD_ALLOWED_VALUES.get(field_name) + if not isinstance(field_values, Iterable) or isinstance(field_values, str): + field_values = [field_values] + + sanitized_field_values : Set[Any] = set() + for field_value in field_values: + if isinstance(field_value, Enum): field_value = field_value.value + if field_enum_values is not None and field_value not in field_enum_values: + raise UnsupportedFilterFieldValueException( + field_name, field_value, field_enum_values, + driver_class_name=driver_name + ) + sanitized_field_values.add(field_value) + + if len(sanitized_field_values) == 0: continue # do not add empty filters + sanitized_filter_fields[field_name] = sanitized_field_values + + return sanitized_filter_fields + + class DriverFactory: - def __init__(self, drivers : List[Tuple[type, List[Dict[FilterFieldEnum, Any]]]]) -> None: - # Dict{field_name => Dict{field_value => Set{Driver}}} - self.__indices : Dict[str, Dict[str, Set[_Driver]]] = dict() + def __init__( + self, drivers : List[Tuple[Type[_Driver], List[Dict[FilterFieldEnum, Any]]]] + ) -> None: + self.__drivers : List[Tuple[Type[_Driver], Dict[FilterFieldEnum, Any]]] = list() for driver_class,filter_field_sets in drivers: + #if not issubclass(driver_class, _Driver): + # raise UnsupportedDriverClassException(str(driver_class)) + driver_name = driver_class #.__name__ + for filter_fields in filter_field_sets: filter_fields = {k.value:v for k,v in filter_fields.items()} - self.register_driver_class(driver_class, **filter_fields) - - def register_driver_class(self, driver_class, **filter_fields): - if not issubclass(driver_class, _Driver): raise UnsupportedDriverClassException(str(driver_class)) - - driver_name = driver_class.__name__ - supported_filter_fields = set(FILTER_FIELD_ALLOWED_VALUES.keys()) - unsupported_filter_fields = set(filter_fields.keys()).difference(supported_filter_fields) - if len(unsupported_filter_fields) > 0: - raise UnsupportedFilterFieldException(unsupported_filter_fields, driver_class_name=driver_name) - - for field_name, field_values in filter_fields.items(): - field_indice = self.__indices.setdefault(field_name, dict()) - field_enum_values = FILTER_FIELD_ALLOWED_VALUES.get(field_name) - if not isinstance(field_values, Iterable) or isinstance(field_values, str): - field_values = [field_values] - for field_value in field_values: - if isinstance(field_value, Enum): field_value = field_value.value - if field_enum_values is not None and field_value not in field_enum_values: - raise UnsupportedFilterFieldValueException( - field_name, field_value, field_enum_values, driver_class_name=driver_name) - field_indice_drivers = field_indice.setdefault(field_value, set()) - field_indice_drivers.add(driver_class) - - def get_driver_class(self, **filter_fields) -> _Driver: - supported_filter_fields = set(FILTER_FIELD_ALLOWED_VALUES.keys()) - unsupported_filter_fields = set(filter_fields.keys()).difference(supported_filter_fields) - if len(unsupported_filter_fields) > 0: raise UnsupportedFilterFieldException(unsupported_filter_fields) - - candidate_driver_classes : Dict[_Driver, int] = None # number of filter hits per driver - for field_name, field_values in filter_fields.items(): - field_indice = self.__indices.get(field_name) - if field_indice is None: continue - field_enum_values = FILTER_FIELD_ALLOWED_VALUES.get(field_name) - if not isinstance(field_values, Iterable) or isinstance(field_values, str): - field_values = [field_values] - - field_candidate_driver_classes = set() - for field_value in field_values: - if field_enum_values is not None and field_value not in field_enum_values: - raise UnsupportedFilterFieldValueException(field_name, field_value, field_enum_values) - field_indice_drivers = field_indice.get(field_value) - if field_indice_drivers is None: continue - field_candidate_driver_classes = field_candidate_driver_classes.union(field_indice_drivers) - - if candidate_driver_classes is None: - if len(field_candidate_driver_classes) == 0: continue - candidate_driver_classes = {k:1 for k in field_candidate_driver_classes} - else: - for candidate_driver_class in candidate_driver_classes: - if candidate_driver_class not in field_candidate_driver_classes: continue - candidate_driver_classes[candidate_driver_class] += 1 - - if len(candidate_driver_classes) == 0: raise UnsatisfiedFilterException(filter_fields) - candidate_driver_classes = sorted(candidate_driver_classes.items(), key=operator.itemgetter(1), reverse=True) - - MSG = '[get_driver_class] candidate_driver_classes={:s}' - LOGGER.debug(MSG.format(str(candidate_driver_classes))) - - return candidate_driver_classes[0][0] + filter_fields = sanitize_filter_fields( + filter_fields, driver_name=driver_name + ) + self.__drivers.append((driver_class, filter_fields)) + + + def is_driver_compatible( + self, driver_filter_fields : Dict[FilterFieldEnum, Any], + selection_filter_fields : Dict[FilterFieldEnum, Any] + ) -> bool: + # by construction empty driver_filter_fields are not allowed + # by construction empty selection_filter_fields are not allowed + for filter_field in SUPPORTED_FILTER_FIELDS: + driver_values = set(driver_filter_fields.get(filter_field, set())) + if driver_values is None : continue # means driver does not restrict + if len(driver_values) == 0: continue # means driver does not restrict + + selection_values = set(selection_filter_fields.get(filter_field, set())) + is_field_compatible = selection_values.issubset(driver_values) + if not is_field_compatible: return False + + return True + + + def get_driver_class(self, **selection_filter_fields) -> _Driver: + sanitized_filter_fields = sanitize_filter_fields(selection_filter_fields) + + compatible_drivers : List[Tuple[Type[_Driver], Dict[FilterFieldEnum, Any]]] = [ + driver_class + for driver_class,driver_filter_fields in self.__drivers + if self.is_driver_compatible(driver_filter_fields, sanitized_filter_fields) + ] + + MSG = '[get_driver_class] compatible_drivers={:s}' + LOGGER.debug(MSG.format(str(compatible_drivers))) + + num_compatible = len(compatible_drivers) + if num_compatible == 0: + raise UnsatisfiedFilterException(selection_filter_fields) + if num_compatible > 1: + raise AmbiguousFilterException(selection_filter_fields, compatible_drivers) + return compatible_drivers[0] diff --git a/src/device/service/driver_api/Exceptions.py b/src/device/service/driver_api/Exceptions.py index 1871fc2e0..8f33ebc57 100644 --- a/src/device/service/driver_api/Exceptions.py +++ b/src/device/service/driver_api/Exceptions.py @@ -17,11 +17,26 @@ class UnsatisfiedFilterException(Exception): msg = 'No Driver satisfies FilterFields({:s})' super().__init__(msg.format(str(filter_fields))) +class AmbiguousFilterException(Exception): + def __init__(self, filter_fields, compatible_drivers): + msg = 'Multiple Drivers satisfy FilterFields({:s}): {:s}' + super().__init__(msg.format(str(filter_fields), str(compatible_drivers))) + class UnsupportedDriverClassException(Exception): def __init__(self, driver_class_name): msg = 'Class({:s}) is not a subclass of _Driver' super().__init__(msg.format(str(driver_class_name))) +class EmptyFilterFieldException(Exception): + def __init__(self, filter_fields, driver_class_name=None): + if driver_class_name: + msg = 'Empty FilterField({:s}) specified by Driver({:s}) is not supported' + msg = msg.format(str(filter_fields), str(driver_class_name)) + else: + msg = 'Empty FilterField({:s}) is not supported' + msg = msg.format(str(filter_fields)) + super().__init__(msg) + class UnsupportedFilterFieldException(Exception): def __init__(self, unsupported_filter_fields, driver_class_name=None): if driver_class_name: diff --git a/src/device/service/drivers/__init__.py b/src/device/service/drivers/__init__.py index 40bb9176b..99e18f0d6 100644 --- a/src/device/service/drivers/__init__.py +++ b/src/device/service/drivers/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os + from common.DeviceTypes import DeviceTypeEnum from common.proto.context_pb2 import DeviceDriverEnum from device.Config import LOAD_ALL_DEVICE_DRIVERS @@ -23,101 +23,66 @@ DRIVERS = [] from .emulated.EmulatedDriver import EmulatedDriver # pylint: disable=wrong-import-position DRIVERS.append( (EmulatedDriver, [ - # TODO: multi-filter is not working { - FilterFieldEnum.DEVICE_TYPE: [ - DeviceTypeEnum.EMULATED_DATACENTER, - DeviceTypeEnum.EMULATED_MICROWAVE_RADIO_SYSTEM, - DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM, - DeviceTypeEnum.EMULATED_OPTICAL_ROADM, - DeviceTypeEnum.EMULATED_OPTICAL_TRANSPONDER, - DeviceTypeEnum.EMULATED_P4_SWITCH, - DeviceTypeEnum.EMULATED_PACKET_ROUTER, - DeviceTypeEnum.EMULATED_PACKET_SWITCH, - - #DeviceTypeEnum.DATACENTER, - #DeviceTypeEnum.MICROWAVE_RADIO_SYSTEM, - #DeviceTypeEnum.OPEN_LINE_SYSTEM, - #DeviceTypeEnum.OPTICAL_ROADM, - #DeviceTypeEnum.OPTICAL_TRANSPONDER, - #DeviceTypeEnum.P4_SWITCH, - DeviceTypeEnum.PACKET_ROUTER, - DeviceTypeEnum.PACKET_POP, - #DeviceTypeEnum.PACKET_SWITCH, - ], + FilterFieldEnum.DEVICE_TYPE: [], # any device type FilterFieldEnum.DRIVER: [ DeviceDriverEnum.DEVICEDRIVER_UNDEFINED, ], - }, - #{ - # # Emulated devices, all drivers => use Emulated - # FilterFieldEnum.DEVICE_TYPE: [ - # DeviceTypeEnum.EMULATED_DATACENTER, - # DeviceTypeEnum.EMULATED_MICROWAVE_RADIO_SYSTEM, - # DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM, - # DeviceTypeEnum.EMULATED_OPTICAL_ROADM, - # DeviceTypeEnum.EMULATED_OPTICAL_TRANSPONDER, - # DeviceTypeEnum.EMULATED_P4_SWITCH, - # DeviceTypeEnum.EMULATED_PACKET_ROUTER, - # DeviceTypeEnum.EMULATED_PACKET_SWITCH, - # ], - # FilterFieldEnum.DRIVER: [ - # DeviceDriverEnum.DEVICEDRIVER_UNDEFINED, - # DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG, - # DeviceDriverEnum.DEVICEDRIVER_TRANSPORT_API, - # DeviceDriverEnum.DEVICEDRIVER_P4, - # DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY, - # DeviceDriverEnum.DEVICEDRIVER_ONF_TR_532, - # DeviceDriverEnum.DEVICEDRIVER_GNMI_OPENCONFIG, - # ], - #} - ])) - -from .ietf_l2vpn.IetfL2VpnDriver import IetfL2VpnDriver # pylint: disable=wrong-import-position -DRIVERS.append( - (IetfL2VpnDriver, [ - { - FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.TERAFLOWSDN_CONTROLLER, - FilterFieldEnum.DRIVER: DeviceDriverEnum.DEVICEDRIVER_IETF_L2VPN, } ])) +if LOAD_ALL_DEVICE_DRIVERS: + from .ietf_l2vpn.IetfL2VpnDriver import IetfL2VpnDriver # pylint: disable=wrong-import-position + DRIVERS.append( + (IetfL2VpnDriver, [ + { + FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.TERAFLOWSDN_CONTROLLER, + FilterFieldEnum.DRIVER: DeviceDriverEnum.DEVICEDRIVER_IETF_L2VPN, + } + ])) -from .ietf_l3vpn.IetfL3VpnDriver import IetfL3VpnDriver # pylint: disable=wrong-import-position -DRIVERS.append( - (IetfL3VpnDriver, [ - { - FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.TERAFLOWSDN_CONTROLLER, - FilterFieldEnum.DRIVER: DeviceDriverEnum.DEVICEDRIVER_IETF_L3VPN, - } - ])) +if LOAD_ALL_DEVICE_DRIVERS: + from .ietf_l3vpn.IetfL3VpnDriver import IetfL3VpnDriver # pylint: disable=wrong-import-position + DRIVERS.append( + (IetfL3VpnDriver, [ + { + FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.TERAFLOWSDN_CONTROLLER, + FilterFieldEnum.DRIVER: DeviceDriverEnum.DEVICEDRIVER_IETF_L3VPN, + } + ])) -from .ietf_actn.IetfActnDriver import IetfActnDriver # pylint: disable=wrong-import-position -DRIVERS.append( - (IetfActnDriver, [ - { - FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.OPEN_LINE_SYSTEM, - FilterFieldEnum.DRIVER: DeviceDriverEnum.DEVICEDRIVER_IETF_ACTN, - } - ])) +if LOAD_ALL_DEVICE_DRIVERS: + from .ietf_actn.IetfActnDriver import IetfActnDriver # pylint: disable=wrong-import-position + DRIVERS.append( + (IetfActnDriver, [ + { + FilterFieldEnum.DEVICE_TYPE: [ + DeviceTypeEnum.OPEN_LINE_SYSTEM, + DeviceTypeEnum.NCE, + ], + FilterFieldEnum.DRIVER: DeviceDriverEnum.DEVICEDRIVER_IETF_ACTN, + } + ])) -from .ietf_slice.IetfSliceDriver import IetfSliceDriver # pylint: disable=wrong-import-position -DRIVERS.append( - (IetfSliceDriver, [ - { - FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.TERAFLOWSDN_CONTROLLER, - FilterFieldEnum.DRIVER: DeviceDriverEnum.DEVICEDRIVER_IETF_SLICE, - } - ])) +if LOAD_ALL_DEVICE_DRIVERS: + from .ietf_slice.IetfSliceDriver import IetfSliceDriver # pylint: disable=wrong-import-position + DRIVERS.append( + (IetfSliceDriver, [ + { + FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.TERAFLOWSDN_CONTROLLER, + FilterFieldEnum.DRIVER: DeviceDriverEnum.DEVICEDRIVER_IETF_SLICE, + } + ])) -from .nce.NCEDriver import NCEDriver # pylint: disable=wrong-import-position -DRIVERS.append( - (NCEDriver, [ - { - FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.NCE, - FilterFieldEnum.DRIVER: DeviceDriverEnum.DEVICEDRIVER_NCE, - } - ])) +if LOAD_ALL_DEVICE_DRIVERS: + from .nce.NCEDriver import NCEDriver # pylint: disable=wrong-import-position + DRIVERS.append( + (NCEDriver, [ + { + FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.NCE, + FilterFieldEnum.DRIVER: DeviceDriverEnum.DEVICEDRIVER_NCE, + } + ])) if LOAD_ALL_DEVICE_DRIVERS: from .openconfig.OpenConfigDriver import OpenConfigDriver # pylint: disable=wrong-import-position @@ -200,6 +165,7 @@ if LOAD_ALL_DEVICE_DRIVERS: FilterFieldEnum.DRIVER : DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY, } ])) + if LOAD_ALL_DEVICE_DRIVERS: from .ryu.RyuDriver import RyuDriver DRIVERS.append( -- GitLab From 97b1d9c6fb467cb204788c85d45e8d8afc44b505 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 18 Sep 2025 12:52:15 +0000 Subject: [PATCH 241/367] ECOC F5GA Telemetry Demo: - Fixed slice2 provisioning descriptor --- src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json b/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json index 2a4fac447..d1030e8ec 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json @@ -29,7 +29,7 @@ }, { "id": "2", - "node-id": "POP2", + "node-id": "POP1", "sdp-ip-address": ["172.16.204.220"], "service-match-criteria": {"match-criterion": [{ "index": 1, -- GitLab From 6bc24749e43acf78b42f06fe930f51eadd6f3eb8 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 18 Sep 2025 13:01:00 +0000 Subject: [PATCH 242/367] Service component - L3NM - NCE-FAN Service Handler: - Fixed composition of qos-profile name --- .../service/service_handlers/l3nm_ncefan/ConfigRules.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/service/service/service_handlers/l3nm_ncefan/ConfigRules.py b/src/service/service/service_handlers/l3nm_ncefan/ConfigRules.py index 312bbbf50..ad44a3e8e 100644 --- a/src/service/service/service_handlers/l3nm_ncefan/ConfigRules.py +++ b/src/service/service/service_handlers/l3nm_ncefan/ConfigRules.py @@ -41,9 +41,10 @@ def setup_config_rules(service_name: str, json_settings: Dict) -> List[Dict]: app_id: str = f"app_{app_flow_id}" app_feature_id: str = f"feature_{app_flow_id}" app_flow_name: str = f"App_Flow_{app_flow_id}" + qos_profile_name: str = f"AR_VR_Gaming_{app_flow_id}" + app_flow_max_online_users: int = json_settings.get("app_flow_max_online_users", 1) app_flow_stas: str = json_settings.get("stas", "00:3D:E1:18:82:9E") - qos_profile_name: str = json_settings.get("app_flow_qos_profile", "AR_VR_Gaming") app_flow_duration: int = json_settings.get("app_flow_duration", 9999) protocol: str = json_settings.get("protocol", "tcp") -- GitLab From 1d971bba827094ac8731c8d392e6c5d16890be35 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 18 Sep 2025 15:29:41 +0000 Subject: [PATCH 243/367] Service component - L3NM - IETF ACTN Service Handler: - Fixed retrieval of endpoint configuration rules - Fixed order for removal configuration rules --- .../l3nm_ietfactn/L3NM_IETFACTN_ServiceHandler.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/service/service/service_handlers/l3nm_ietfactn/L3NM_IETFACTN_ServiceHandler.py b/src/service/service/service_handlers/l3nm_ietfactn/L3NM_IETFACTN_ServiceHandler.py index 1f4eaccb0..9db6fb2db 100644 --- a/src/service/service/service_handlers/l3nm_ietfactn/L3NM_IETFACTN_ServiceHandler.py +++ b/src/service/service/service_handlers/l3nm_ietfactn/L3NM_IETFACTN_ServiceHandler.py @@ -43,6 +43,8 @@ class L3NM_IETFACTN_ServiceHandler(_ServiceHandler): ) -> Tuple[Device, EndPoint, Dict]: device_uuid, endpoint_uuid = get_device_endpoint_uuids(endpoint) device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) + for config_rule in device_obj.device_config.config_rules: + self.__settings_handler.update_config_rule(config_rule) endpoint_obj = get_endpoint_matching(device_obj, endpoint_uuid) endpoint_settings = self.__settings_handler.get_endpoint_settings(device_obj, endpoint_obj) device_name = device_obj.name @@ -256,8 +258,8 @@ class L3NM_IETFACTN_ServiceHandler(_ServiceHandler): ) del controller.device_config.config_rules[:] - controller.device_config.config_rules.append(osu_tunnel_config_rule) controller.device_config.config_rules.append(etht_service_config_rule) + controller.device_config.config_rules.append(osu_tunnel_config_rule) self.__task_executor.configure_device(controller) results.append(True) except Exception as e: # pylint: disable=broad-except -- GitLab From 458705722cd9e52e5a2fbc18c3b7829efef931da Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 18 Sep 2025 16:38:50 +0000 Subject: [PATCH 244/367] Service component - Service Handler API: - Enhanced Settings Handler get_endpoint_settings() --- .../service/service_handler_api/SettingsHandler.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/service/service/service_handler_api/SettingsHandler.py b/src/service/service/service_handler_api/SettingsHandler.py index b9b8b2950..0313e09f6 100644 --- a/src/service/service/service_handler_api/SettingsHandler.py +++ b/src/service/service/service_handler_api/SettingsHandler.py @@ -78,12 +78,16 @@ class SettingsHandler: for device_key in device_keys: for endpoint_key in endpoint_keys: - endpoint_settings_uri = '/device[{:s}]/endpoint[{:s}]/settings'.format(device_key, endpoint_key) + endpoint_settings_uri = '/device[{:s}]/endpoint[{:s}]'.format(device_key, endpoint_key) + endpoint_settings = self.get(endpoint_settings_uri) + if endpoint_settings is not None: return endpoint_settings + + endpoint_settings_uri += '/settings' endpoint_settings = self.get(endpoint_settings_uri) if endpoint_settings is not None: return endpoint_settings return None - + def get_endpoint_acls(self, device : Device, endpoint : EndPoint) -> List [Tuple]: endpoint_name = endpoint.name device_keys = device.device_id.device_uuid.uuid, device.name -- GitLab From 1ec3e35f9bfaf1786fd3ebb3a182962a34aa7c45 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 18 Sep 2025 16:39:15 +0000 Subject: [PATCH 245/367] Service component - L3NM - IETF ACTN Service Handler: - Added pre-loading of device config rules in SettingsHandler --- .../l3nm_ietfactn/L3NM_IETFACTN_ServiceHandler.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/service/service/service_handlers/l3nm_ietfactn/L3NM_IETFACTN_ServiceHandler.py b/src/service/service/service_handlers/l3nm_ietfactn/L3NM_IETFACTN_ServiceHandler.py index 9db6fb2db..acaf31b87 100644 --- a/src/service/service/service_handlers/l3nm_ietfactn/L3NM_IETFACTN_ServiceHandler.py +++ b/src/service/service/service_handlers/l3nm_ietfactn/L3NM_IETFACTN_ServiceHandler.py @@ -43,11 +43,19 @@ class L3NM_IETFACTN_ServiceHandler(_ServiceHandler): ) -> Tuple[Device, EndPoint, Dict]: device_uuid, endpoint_uuid = get_device_endpoint_uuids(endpoint) device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) + device_name = device_obj.name + device_prefix = '/device[{:s}]/'.format(device_name) for config_rule in device_obj.device_config.config_rules: - self.__settings_handler.update_config_rule(config_rule) + if config_rule.WhichOneof('config_rule') != 'custom': continue + rw_config_rule = ConfigRule() + rw_config_rule.CopyFrom(config_rule) + resource_key = str(config_rule.custom.resource_key) + if resource_key.startswith('/endpoints/endpoint['): + resource_key = resource_key.replace('/endpoints/', device_prefix) + rw_config_rule.custom.resource_key = resource_key + self.__settings_handler.update_config_rule(rw_config_rule) endpoint_obj = get_endpoint_matching(device_obj, endpoint_uuid) endpoint_settings = self.__settings_handler.get_endpoint_settings(device_obj, endpoint_obj) - device_name = device_obj.name endpoint_name = endpoint_obj.name if endpoint_settings is None: MSG = 'Settings not found for Endpoint(device=[uuid={:s}, name={:s}], endpoint=[uuid={:s}, name={:s}])' -- GitLab From 8c35c9c3c583cd9f15ec649185e2e99d1ad70649 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 18 Sep 2025 16:40:14 +0000 Subject: [PATCH 246/367] PathComp component - Frontend: - Added injection of config rules for device endpoints used in the hops --- .../frontend/service/algorithms/_Algorithm.py | 48 +++++++++++++++++-- 1 file changed, 45 insertions(+), 3 deletions(-) diff --git a/src/pathcomp/frontend/service/algorithms/_Algorithm.py b/src/pathcomp/frontend/service/algorithms/_Algorithm.py index fd389d321..da5aa3764 100644 --- a/src/pathcomp/frontend/service/algorithms/_Algorithm.py +++ b/src/pathcomp/frontend/service/algorithms/_Algorithm.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json, logging, requests, uuid +import json, logging, re, requests, uuid from typing import Dict, List, Optional, Tuple, Union from common.DeviceTypes import DeviceTypeEnum from common.proto.context_pb2 import ( @@ -183,11 +183,9 @@ class _Algorithm: if service_type == ServiceTypeEnum.SERVICETYPE_L2NM and rules_nb == 0: compose_l2nm_config_rules(config_rules, service.service_config.config_rules) self.logger.info("Installing default rules for L2NM service") - pass elif service_type == ServiceTypeEnum.SERVICETYPE_L3NM and rules_nb == 0: compose_l3nm_config_rules(config_rules, service.service_config.config_rules) self.logger.info("Installing default rules for L3NM service") - pass elif service_type == ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE and rules_nb == 0: compose_tapi_config_rules(config_rules, service.service_config.config_rules) self.logger.info("Installing default rules for TAPI service") @@ -199,6 +197,50 @@ class _Algorithm: config_rules, service.service_config.config_rules, path_hops, self.device_name_mapping, self.endpoint_name_mapping) + for path_hop in path_hops: + path_hop_device_id = path_hop['device'] + path_hop_ingress_ep_id = path_hop['ingress_ep'] + path_hop_egress_ep_id = path_hop['egress_ep'] + + path_hop_device_uuid = self.device_name_mapping [path_hop_device_id] + path_hop_ingress_ep_uuid = self.endpoint_name_mapping[(path_hop_device_id, path_hop_ingress_ep_id)] + path_hop_egress_ep_uuid = self.endpoint_name_mapping[(path_hop_device_id, path_hop_egress_ep_id )] + + target_endpoint_ids = set() + target_endpoint_ids.add(path_hop_ingress_ep_uuid) + target_endpoint_ids.add(path_hop_egress_ep_uuid) + + path_hop_device = self.device_dict [path_hop_device_uuid][1] + path_hop_ingress_ep = self.endpoint_dict[path_hop_device_uuid][path_hop_ingress_ep_uuid][1] + path_hop_egress_ep = self.endpoint_dict[path_hop_device_uuid][path_hop_egress_ep_uuid ][1] + + path_hop_device_name = path_hop_device.name + path_hop_ingress_ep_name = path_hop_ingress_ep.name + path_hop_egress_ep_name = path_hop_egress_ep.name + + target_endpoint_ids.add(path_hop_ingress_ep_name) + target_endpoint_ids.add(path_hop_egress_ep_name) + + RE_ENDPOINT_SETTINGS = re.compile(r'\/endpoints\/endpoint\[([^\]]+)\](\/settings)?') + for config_rule in path_hop_device.device_config.config_rules: + if config_rule.WhichOneof('config_rule') != 'custom': continue + rw_config_rule = ConfigRule() + rw_config_rule.CopyFrom(config_rule) + resource_key = str(config_rule.custom.resource_key) + ep_match = RE_ENDPOINT_SETTINGS.match(resource_key) + if ep_match is None: continue + endpoint_id = ep_match.group(1) + if endpoint_id not in target_endpoint_ids: continue + + endpoint_uuid = self.endpoint_name_mapping[(path_hop_device_id, endpoint_id)] + endpoint = self.endpoint_dict[path_hop_device_uuid][endpoint_uuid][1] + endpoint_name = endpoint.name + resource_key = '/device[{:s}]/endpoint[{:s}]/settings'.format( + path_hop_device_name, endpoint_name + ) + rw_config_rule.custom.resource_key = resource_key + service.service_config.config_rules.append(rw_config_rule) + if path_hops is not None and len(path_hops) > 0: ingress_endpoint_id = service.service_endpoint_ids.add() ingress_endpoint_id.device_id.device_uuid.uuid = path_hops[0]['device'] -- GitLab From 4b266d384265503727eb4de3b0b485da97bf6b18 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 18 Sep 2025 16:55:02 +0000 Subject: [PATCH 247/367] PathComp component - Frontend: - Fixed injection of config rules for device endpoints used in the hops --- .../frontend/service/algorithms/_Algorithm.py | 48 ++++++++++--------- 1 file changed, 25 insertions(+), 23 deletions(-) diff --git a/src/pathcomp/frontend/service/algorithms/_Algorithm.py b/src/pathcomp/frontend/service/algorithms/_Algorithm.py index da5aa3764..8e1af04f6 100644 --- a/src/pathcomp/frontend/service/algorithms/_Algorithm.py +++ b/src/pathcomp/frontend/service/algorithms/_Algorithm.py @@ -48,8 +48,10 @@ class _Algorithm: self.device_list : List[Dict] = list() self.device_dict : Dict[str, Tuple[Dict, Device]] = dict() + self.device_uuid_mapping : Dict[str, str] = dict() self.device_name_mapping : Dict[str, str] = dict() self.endpoint_dict : Dict[str, Dict[str, Tuple[Dict, EndPointId]]] = dict() + self.endpoint_uuid_mapping : Dict[Tuple[str, str], str] = dict() self.endpoint_name_mapping : Dict[Tuple[str, str], str] = dict() self.link_list : List[Dict] = list() self.link_dict : Dict[str, Tuple[Dict, Link]] = dict() @@ -68,8 +70,10 @@ class _Algorithm: _device_uuid = grpc_device.device_id.device_uuid.uuid _device_name = grpc_device.name - self.device_name_mapping[_device_name] = _device_uuid - self.device_name_mapping[_device_uuid] = _device_uuid + self.device_uuid_mapping[_device_name] = _device_uuid + self.device_uuid_mapping[_device_uuid] = _device_uuid + self.device_name_mapping[_device_name] = _device_name + self.device_name_mapping[_device_uuid] = _device_name device_endpoint_dict : Dict[str, Tuple[Dict, EndPointId]] = dict() for json_endpoint,grpc_endpoint in zip(json_device['device_endpoints'], grpc_device.device_endpoints): @@ -79,10 +83,14 @@ class _Algorithm: _endpoint_uuid = grpc_endpoint.endpoint_id.endpoint_uuid.uuid _endpoint_name = grpc_endpoint.name - self.endpoint_name_mapping[(_device_uuid, _endpoint_name)] = _endpoint_uuid - self.endpoint_name_mapping[(_device_name, _endpoint_name)] = _endpoint_uuid - self.endpoint_name_mapping[(_device_uuid, _endpoint_uuid)] = _endpoint_uuid - self.endpoint_name_mapping[(_device_name, _endpoint_uuid)] = _endpoint_uuid + self.endpoint_uuid_mapping[(_device_uuid, _endpoint_name)] = _endpoint_uuid + self.endpoint_uuid_mapping[(_device_name, _endpoint_name)] = _endpoint_uuid + self.endpoint_uuid_mapping[(_device_uuid, _endpoint_uuid)] = _endpoint_uuid + self.endpoint_uuid_mapping[(_device_name, _endpoint_uuid)] = _endpoint_uuid + self.endpoint_name_mapping[(_device_uuid, _endpoint_name)] = _endpoint_name + self.endpoint_name_mapping[(_device_name, _endpoint_name)] = _endpoint_name + self.endpoint_name_mapping[(_device_uuid, _endpoint_uuid)] = _endpoint_name + self.endpoint_name_mapping[(_device_name, _endpoint_uuid)] = _endpoint_name self.endpoint_dict[device_uuid] = device_endpoint_dict @@ -195,32 +203,28 @@ class _Algorithm: compose_device_config_rules( config_rules, service.service_config.config_rules, path_hops, - self.device_name_mapping, self.endpoint_name_mapping) + self.device_uuid_mapping, self.endpoint_uuid_mapping) for path_hop in path_hops: path_hop_device_id = path_hop['device'] path_hop_ingress_ep_id = path_hop['ingress_ep'] path_hop_egress_ep_id = path_hop['egress_ep'] - path_hop_device_uuid = self.device_name_mapping [path_hop_device_id] - path_hop_ingress_ep_uuid = self.endpoint_name_mapping[(path_hop_device_id, path_hop_ingress_ep_id)] - path_hop_egress_ep_uuid = self.endpoint_name_mapping[(path_hop_device_id, path_hop_egress_ep_id )] + path_hop_device_uuid = self.device_uuid_mapping [path_hop_device_id] + path_hop_ingress_ep_uuid = self.endpoint_uuid_mapping[(path_hop_device_id, path_hop_ingress_ep_id)] + path_hop_egress_ep_uuid = self.endpoint_uuid_mapping[(path_hop_device_id, path_hop_egress_ep_id )] + path_hop_ingress_ep_name = self.endpoint_name_mapping[(path_hop_device_id, path_hop_ingress_ep_id)] + path_hop_egress_ep_name = self.endpoint_name_mapping[(path_hop_device_id, path_hop_egress_ep_id )] target_endpoint_ids = set() target_endpoint_ids.add(path_hop_ingress_ep_uuid) target_endpoint_ids.add(path_hop_egress_ep_uuid) - - path_hop_device = self.device_dict [path_hop_device_uuid][1] - path_hop_ingress_ep = self.endpoint_dict[path_hop_device_uuid][path_hop_ingress_ep_uuid][1] - path_hop_egress_ep = self.endpoint_dict[path_hop_device_uuid][path_hop_egress_ep_uuid ][1] - - path_hop_device_name = path_hop_device.name - path_hop_ingress_ep_name = path_hop_ingress_ep.name - path_hop_egress_ep_name = path_hop_egress_ep.name - target_endpoint_ids.add(path_hop_ingress_ep_name) target_endpoint_ids.add(path_hop_egress_ep_name) + path_hop_device = self.device_dict[path_hop_device_uuid][1] + path_hop_device_name = path_hop_device.name + RE_ENDPOINT_SETTINGS = re.compile(r'\/endpoints\/endpoint\[([^\]]+)\](\/settings)?') for config_rule in path_hop_device.device_config.config_rules: if config_rule.WhichOneof('config_rule') != 'custom': continue @@ -232,9 +236,7 @@ class _Algorithm: endpoint_id = ep_match.group(1) if endpoint_id not in target_endpoint_ids: continue - endpoint_uuid = self.endpoint_name_mapping[(path_hop_device_id, endpoint_id)] - endpoint = self.endpoint_dict[path_hop_device_uuid][endpoint_uuid][1] - endpoint_name = endpoint.name + endpoint_name = self.endpoint_name_mapping[(path_hop_device_id, endpoint_id)] resource_key = '/device[{:s}]/endpoint[{:s}]/settings'.format( path_hop_device_name, endpoint_name ) @@ -293,7 +295,7 @@ class _Algorithm: path_hops = eropath_to_hops(service_path_ero['devices'], self.endpoint_to_link_dict) json_generated_config_rules = generate_neighbor_endpoint_config_rules( - json_orig_config_rules, path_hops, self.device_name_mapping, self.endpoint_name_mapping + json_orig_config_rules, path_hops, self.device_uuid_mapping, self.endpoint_uuid_mapping ) json_extended_config_rules = list() json_extended_config_rules.extend(json_orig_config_rules) -- GitLab From 45d196ba30bec74b435db548df4d64692500e031 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 18 Sep 2025 17:12:08 +0000 Subject: [PATCH 248/367] PathComp component - Frontend: - Fixed injection of config rules for device endpoints used in the hops --- .../frontend/service/algorithms/_Algorithm.py | 30 ++++++++++++------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/src/pathcomp/frontend/service/algorithms/_Algorithm.py b/src/pathcomp/frontend/service/algorithms/_Algorithm.py index 8e1af04f6..610bffaf2 100644 --- a/src/pathcomp/frontend/service/algorithms/_Algorithm.py +++ b/src/pathcomp/frontend/service/algorithms/_Algorithm.py @@ -19,7 +19,9 @@ from common.proto.context_pb2 import ( ConfigRule, Connection, Device, DeviceList, EndPointId, Link, LinkList, Service, ServiceStatusEnum, ServiceTypeEnum ) from common.proto.pathcomp_pb2 import PathCompReply, PathCompRequest +from common.tools.context_queries.Device import get_device from common.tools.grpc.Tools import grpc_message_list_to_json +from context.client.ContextClient import ContextClient from pathcomp.frontend.Config import BACKEND_URL from .tools.EroPathToHops import eropath_to_hops from .tools.ComposeConfigRules import ( @@ -205,24 +207,30 @@ class _Algorithm: config_rules, service.service_config.config_rules, path_hops, self.device_uuid_mapping, self.endpoint_uuid_mapping) + context_client = ContextClient() + device_cache : Dict[str, Device] = dict() for path_hop in path_hops: path_hop_device_id = path_hop['device'] path_hop_ingress_ep_id = path_hop['ingress_ep'] path_hop_egress_ep_id = path_hop['egress_ep'] - path_hop_device_uuid = self.device_uuid_mapping [path_hop_device_id] - path_hop_ingress_ep_uuid = self.endpoint_uuid_mapping[(path_hop_device_id, path_hop_ingress_ep_id)] - path_hop_egress_ep_uuid = self.endpoint_uuid_mapping[(path_hop_device_id, path_hop_egress_ep_id )] - path_hop_ingress_ep_name = self.endpoint_name_mapping[(path_hop_device_id, path_hop_ingress_ep_id)] - path_hop_egress_ep_name = self.endpoint_name_mapping[(path_hop_device_id, path_hop_egress_ep_id )] + path_hop_device_uuid = self.device_uuid_mapping[path_hop_device_id] + if path_hop_device_uuid in device_cache: + path_hop_device = device_cache[path_hop_device_uuid] + else: + path_hop_device = get_device( + context_client, path_hop_device_uuid, include_components=False, + include_endpoints=False, include_config_rules=True + ) + device_cache[path_hop_device_uuid] = path_hop_device - target_endpoint_ids = set() - target_endpoint_ids.add(path_hop_ingress_ep_uuid) - target_endpoint_ids.add(path_hop_egress_ep_uuid) - target_endpoint_ids.add(path_hop_ingress_ep_name) - target_endpoint_ids.add(path_hop_egress_ep_name) + target_endpoint_ids = { + self.endpoint_uuid_mapping[(path_hop_device_id, path_hop_ingress_ep_id)], + self.endpoint_uuid_mapping[(path_hop_device_id, path_hop_egress_ep_id )], + self.endpoint_name_mapping[(path_hop_device_id, path_hop_ingress_ep_id)], + self.endpoint_name_mapping[(path_hop_device_id, path_hop_egress_ep_id )], + } - path_hop_device = self.device_dict[path_hop_device_uuid][1] path_hop_device_name = path_hop_device.name RE_ENDPOINT_SETTINGS = re.compile(r'\/endpoints\/endpoint\[([^\]]+)\](\/settings)?') -- GitLab From 0bf91659421f42c9601e7bc83a26d4889444f7d3 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 18 Sep 2025 17:29:00 +0000 Subject: [PATCH 249/367] Service component - Service Handler API: - Fixed Settings Handler get_endpoint_settings() --- src/service/service/service_handler_api/SettingsHandler.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/service/service/service_handler_api/SettingsHandler.py b/src/service/service/service_handler_api/SettingsHandler.py index 0313e09f6..5c9bc79a7 100644 --- a/src/service/service/service_handler_api/SettingsHandler.py +++ b/src/service/service/service_handler_api/SettingsHandler.py @@ -78,11 +78,13 @@ class SettingsHandler: for device_key in device_keys: for endpoint_key in endpoint_keys: - endpoint_settings_uri = '/device[{:s}]/endpoint[{:s}]'.format(device_key, endpoint_key) + # should navigate from deepest to top-level + + endpoint_settings_uri = '/device[{:s}]/endpoint[{:s}]/settings'.format(device_key, endpoint_key) endpoint_settings = self.get(endpoint_settings_uri) if endpoint_settings is not None: return endpoint_settings - endpoint_settings_uri += '/settings' + endpoint_settings_uri = '/device[{:s}]/endpoint[{:s}]'.format(device_key, endpoint_key) endpoint_settings = self.get(endpoint_settings_uri) if endpoint_settings is not None: return endpoint_settings -- GitLab From cb25c7173b79f499ba4a7ce208f5b8f6168d14dc Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 19 Sep 2025 06:51:12 +0000 Subject: [PATCH 250/367] NBI component - IETF Network Slice: - Fixed discovery of SDP IP Address - Fixed discovery of SDP VLAN tag --- .../ietf_network_slice/ietf_slice_handler.py | 28 +++++++++++++++---- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/src/nbi/service/ietf_network_slice/ietf_slice_handler.py b/src/nbi/service/ietf_network_slice/ietf_slice_handler.py index b4b32cd3d..930790d20 100644 --- a/src/nbi/service/ietf_network_slice/ietf_slice_handler.py +++ b/src/nbi/service/ietf_network_slice/ietf_slice_handler.py @@ -347,18 +347,36 @@ class IETFSliceHandler: endpoint.endpoint_uuid.uuid = endpoint_uuid list_endpoints.append(endpoint) + match_criteria = sdp["service-match-criteria"]["match-criterion"] + if len(match_criteria) != 1: + raise Exception('Each SDP must have exactly 1 service-match-criteria/match-criterion') + match_criterion = match_criteria[0] + # Keep track of connection-group-id from each SDP connection_group_ids.add( - sdp["service-match-criteria"]["match-criterion"][0][ - "target-connection-group-id" - ] + match_criterion['target-connection-group-id'] ) + sdp_ip_addresses = sdp['sdp-ip-address'] + if len(sdp_ip_addresses) != 1: + raise Exception('Each SDP must have exactly 1 sdp-ip-address') + sdp_ip_address = sdp_ip_addresses[0] + + vlan_tag = None + match_type = match_criterion['match-type'] + for match_type_item in match_type: + item_type = match_type_item['type'] + if item_type != 'ietf-network-slice-service:vlan': continue + vlan_tag = int(match_type_item['value'][0]) + break + # Endpoint-specific config rule fields endpoint_config_rule_fields = { - "address_ip": (endpoint_uuid, RAISE_IF_DIFFERS), - "address_prefix": (ADDRESS_PREFIX, RAISE_IF_DIFFERS), + 'address_ip': (sdp_ip_address, RAISE_IF_DIFFERS), + 'address_prefix': (ADDRESS_PREFIX, RAISE_IF_DIFFERS), } + if vlan_tag is not None: + endpoint_config_rule_fields['vlan_tag'] = vlan_tag endpoint_config_rules.append( ( f"/device[{device_uuid}]/endpoint[{endpoint_uuid}]/settings", -- GitLab From c1ff4a5790ab8c6decc59daaed54dc2e3e07f65f Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 19 Sep 2025 08:38:18 +0000 Subject: [PATCH 251/367] SIMAP Connector: - Implemented TelemetryPool skeleton --- src/simap_connector/service/__main__.py | 5 +- .../service/simap_updater/SimapUpdater.py | 12 ++- .../service/telemetry/TelemetryPool.py | 74 +++++++++++++++++++ .../service/telemetry/TelemetryWorker.py | 59 +++++++++++++++ .../service/telemetry/__init__.py | 13 ++++ 5 files changed, 160 insertions(+), 3 deletions(-) create mode 100644 src/simap_connector/service/telemetry/TelemetryPool.py create mode 100644 src/simap_connector/service/telemetry/TelemetryWorker.py create mode 100644 src/simap_connector/service/telemetry/__init__.py diff --git a/src/simap_connector/service/__main__.py b/src/simap_connector/service/__main__.py index 6f8bdbc87..9ef0700f2 100644 --- a/src/simap_connector/service/__main__.py +++ b/src/simap_connector/service/__main__.py @@ -20,6 +20,7 @@ from common.Settings import ( get_log_level, get_metrics_port, wait_for_environment_variables ) from .simap_updater.SimapUpdater import SimapUpdater +from .telemetry.TelemetryPool import TelemetryPool from .SimapConnectorService import SimapConnectorService TERMINATE = threading.Event() @@ -55,7 +56,8 @@ def main(): grpc_service = SimapConnectorService() grpc_service.start() - simap_updater = SimapUpdater(TERMINATE) + telemetry_pool = TelemetryPool(terminate=TERMINATE) + simap_updater = SimapUpdater(TERMINATE, telemetry_pool) simap_updater.start() LOGGER.info('Running...') @@ -64,6 +66,7 @@ def main(): LOGGER.info('Terminating...') simap_updater.stop() + telemetry_pool.stop_all() grpc_service.stop() LOGGER.info('Bye') diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index f580addf2..7253c0139 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -30,6 +30,7 @@ from simap_connector.Config import ( SIMAP_SERVER_USERNAME, SIMAP_SERVER_PASSWORD, ) from simap_connector.service.simap_updater.MockSimaps import delete_mock_simap, set_mock_simap +from simap_connector.service.telemetry.TelemetryPool import TelemetryPool from .SimapClient import SimapClient from .ObjectCache import CachedEntities, ObjectCache from .Tools import get_device_endpoint, get_link_endpoint, get_service_endpoint @@ -51,10 +52,12 @@ class EventDispatcher(BaseEventDispatcher): def __init__( self, events_queue : queue.PriorityQueue, context_client : ContextClient, + telemetry_pool : TelemetryPool, terminate : Optional[threading.Event] = None ) -> None: super().__init__(events_queue, terminate) self._context_client = context_client + self._telemetry_pool = telemetry_pool self._object_cache = ObjectCache(self._context_client) self._restconf_client = RestConfClient( scheme=SIMAP_SERVER_SCHEME, address=SIMAP_SERVER_ADDRESS, @@ -507,6 +510,8 @@ class EventDispatcher(BaseEventDispatcher): #) #dom_link = domain_topo.link(link_name) #dom_link.update(src_dev_name, src_ep_name, dst_dev_name, dst_ep_name) + + self._telemetry_pool.start_worker(domain_name) return True @@ -602,12 +607,15 @@ class EventDispatcher(BaseEventDispatcher): #self._object_cache.delete(CachedEntities.SERVICE, service_uuid) #self._object_cache.delete(CachedEntities.SERVICE, service_name) + self._telemetry_pool.stop_worker(domain_name) + MSG = 'Logical Link Removed for Service: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(service_event))) class SimapUpdater: - def __init__(self, terminate : threading.Event) -> None: + def __init__(self, terminate : threading.Event, telemetry_pool : TelemetryPool) -> None: + self._telemetry_pool = telemetry_pool self._context_client = ContextClient() self._event_collector = BaseEventCollector(terminate=terminate) @@ -617,7 +625,7 @@ class SimapUpdater: self._event_dispatcher = EventDispatcher( self._event_collector.get_events_queue(), self._context_client, - terminate=terminate + self._telemetry_pool, terminate=terminate ) def start(self) -> None: diff --git a/src/simap_connector/service/telemetry/TelemetryPool.py b/src/simap_connector/service/telemetry/TelemetryPool.py new file mode 100644 index 000000000..808b2b468 --- /dev/null +++ b/src/simap_connector/service/telemetry/TelemetryPool.py @@ -0,0 +1,74 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging, threading +from typing import Dict, Optional +from .TelemetryWorker import TelemetryWorker + + +LOGGER = logging.getLogger(__name__) + + +class TelemetryPool: + def __init__( + self, terminate : Optional[threading.Event] = None + ) -> None: + self._workers : Dict[str, TelemetryWorker] = dict() + self._lock = threading.Lock() + self._terminate = threading.Event() if terminate is None else terminate + + + def start_worker(self, domain_name : str) -> None: + with self._lock: + if domain_name in self._workers: + MSG = '[start_worker] Worker already running for Domain({:s})' + LOGGER.debug(MSG.format(str(domain_name))) + return + + worker = TelemetryWorker(domain_name, terminate=self._terminate) + self._workers[domain_name] = worker + worker.start() + + MSG = '[start_worker] Started worker for Domain({:s})' + LOGGER.info(MSG.format(str(domain_name))) + + + def stop_worker(self, domain_name : str) -> None: + with self._lock: + worker = self._workers.pop(domain_name, None) + + if worker is None: + MSG = '[stop_worker] No worker found for Domain({:s})' + LOGGER.debug(MSG.format(str(domain_name))) + return + + worker.stop() + + MSG = '[stop_worker] Stopped worker for Domain({:s})' + LOGGER.info(MSG.format(str(domain_name))) + + + def stop_all(self) -> None: + LOGGER.info('[stop_all] Stopping all worker') + + with self._lock: + names = list(self._workers.keys()) + + for name in names: + try: + self.stop_worker(name) + except Exception: + MSG = '[stop_all] Unhandled Exception stopping Worker({:s})' + LOGGER.exception(MSG.format(str(name))) diff --git a/src/simap_connector/service/telemetry/TelemetryWorker.py b/src/simap_connector/service/telemetry/TelemetryWorker.py new file mode 100644 index 000000000..9d04d7a3b --- /dev/null +++ b/src/simap_connector/service/telemetry/TelemetryWorker.py @@ -0,0 +1,59 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging, threading, time +from typing import Optional + + +LOGGER = logging.getLogger(__name__) + + +class TelemetryWorker(threading.Thread): + def __init__( + self, domain_name : str, terminate : Optional[threading.Event] = None + ) -> None: + name = 'TelemetryWorker-{:s}'.format(str(domain_name)) + super().__init__(name=name, daemon=True) + self.domain_name = domain_name + self._stop_event = threading.Event() + self._terminate = threading.Event() if terminate is None else terminate + + def stop(self) -> None: + MSG = '[stop][{:s}] Stopping...' + LOGGER.info(MSG.format(str(self.domain_name))) + self._stop_event.set() + self.join() + + def run(self) -> None: + MSG = '[run][{:s}] Starting...' + LOGGER.info(MSG.format(str(self.domain_name))) + + try: + while not self._stop_event.is_set() and not self._terminate.is_set(): + + MSG = '[run][{:s}] Heartbeat' + LOGGER.info(MSG.format(str(self.domain_name))) + + for _ in range(10): + if self._stop_event.is_set(): break + if self._terminate.is_set() : break + time.sleep(0.1) + + except Exception: + MSG = '[run][{:s}] Unhandled Exception' + LOGGER.info(MSG.format(str(self.domain_name))) + finally: + MSG = '[run][{:s}] Terminated' + LOGGER.info(MSG.format(str(self.domain_name))) diff --git a/src/simap_connector/service/telemetry/__init__.py b/src/simap_connector/service/telemetry/__init__.py new file mode 100644 index 000000000..7363515f0 --- /dev/null +++ b/src/simap_connector/service/telemetry/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. -- GitLab From b3f541d1ca014df54194e87a573d98e4db244228 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 19 Sep 2025 08:50:21 +0000 Subject: [PATCH 252/367] NBI component - IETF Network Slice: - Fixed population of SDP VLAN tag in config rules --- src/nbi/service/ietf_network_slice/ietf_slice_handler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/nbi/service/ietf_network_slice/ietf_slice_handler.py b/src/nbi/service/ietf_network_slice/ietf_slice_handler.py index 930790d20..56061615b 100644 --- a/src/nbi/service/ietf_network_slice/ietf_slice_handler.py +++ b/src/nbi/service/ietf_network_slice/ietf_slice_handler.py @@ -376,7 +376,7 @@ class IETFSliceHandler: 'address_prefix': (ADDRESS_PREFIX, RAISE_IF_DIFFERS), } if vlan_tag is not None: - endpoint_config_rule_fields['vlan_tag'] = vlan_tag + endpoint_config_rule_fields['vlan_tag'] = (vlan_tag, RAISE_IF_DIFFERS) endpoint_config_rules.append( ( f"/device[{device_uuid}]/endpoint[{endpoint_uuid}]/settings", -- GitLab From f122bd022057c006054b953f20ad772c933ab073 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 19 Sep 2025 10:32:04 +0000 Subject: [PATCH 253/367] PathComp component - Frontend: - Fixed merge of config rules injected for device endpoints based on path hops --- .../frontend/service/algorithms/_Algorithm.py | 29 +++++++++++++------ 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/src/pathcomp/frontend/service/algorithms/_Algorithm.py b/src/pathcomp/frontend/service/algorithms/_Algorithm.py index 610bffaf2..5e1a5e3a6 100644 --- a/src/pathcomp/frontend/service/algorithms/_Algorithm.py +++ b/src/pathcomp/frontend/service/algorithms/_Algorithm.py @@ -16,21 +16,25 @@ import json, logging, re, requests, uuid from typing import Dict, List, Optional, Tuple, Union from common.DeviceTypes import DeviceTypeEnum from common.proto.context_pb2 import ( - ConfigRule, Connection, Device, DeviceList, EndPointId, Link, LinkList, Service, ServiceStatusEnum, ServiceTypeEnum + ConfigActionEnum, ConfigRule, Connection, Device, DeviceList, EndPointId, + Link, LinkList, Service, ServiceStatusEnum, ServiceTypeEnum, ) from common.proto.pathcomp_pb2 import PathCompReply, PathCompRequest from common.tools.context_queries.Device import get_device +from common.tools.grpc.ConfigRules import update_config_rule_custom from common.tools.grpc.Tools import grpc_message_list_to_json from context.client.ContextClient import ContextClient from pathcomp.frontend.Config import BACKEND_URL from .tools.EroPathToHops import eropath_to_hops from .tools.ComposeConfigRules import ( - compose_device_config_rules, compose_l2nm_config_rules, compose_l3nm_config_rules, compose_tapi_config_rules, - generate_neighbor_endpoint_config_rules + compose_device_config_rules, compose_l2nm_config_rules, compose_l3nm_config_rules, + compose_tapi_config_rules, generate_neighbor_endpoint_config_rules, ) from .tools.ComposeRequest import compose_device, compose_link, compose_service from .tools.ComputeSubServices import ( - convert_explicit_path_hops_to_connections, convert_explicit_path_hops_to_plain_connection) + convert_explicit_path_hops_to_connections, + convert_explicit_path_hops_to_plain_connection, +) SRC_END = 'src' DST_END = 'dst' @@ -236,20 +240,27 @@ class _Algorithm: RE_ENDPOINT_SETTINGS = re.compile(r'\/endpoints\/endpoint\[([^\]]+)\](\/settings)?') for config_rule in path_hop_device.device_config.config_rules: if config_rule.WhichOneof('config_rule') != 'custom': continue - rw_config_rule = ConfigRule() - rw_config_rule.CopyFrom(config_rule) resource_key = str(config_rule.custom.resource_key) ep_match = RE_ENDPOINT_SETTINGS.match(resource_key) if ep_match is None: continue endpoint_id = ep_match.group(1) if endpoint_id not in target_endpoint_ids: continue - endpoint_name = self.endpoint_name_mapping[(path_hop_device_id, endpoint_id)] + + resource_value : Dict = json.loads(config_rule.custom.resource_value) + address_ip = resource_value.pop('address_ip', '0.0.0.0') + if address_ip != '0.0.0.0': resource_value['address_ip'] = address_ip + + if len(resource_value) == 0: continue + field_updates = {name:(value, False) for name,value in resource_value.items()} + resource_key = '/device[{:s}]/endpoint[{:s}]/settings'.format( path_hop_device_name, endpoint_name ) - rw_config_rule.custom.resource_key = resource_key - service.service_config.config_rules.append(rw_config_rule) + update_config_rule_custom( + service.service_config.config_rules, resource_key, field_updates, + new_action=ConfigActionEnum.CONFIGACTION_SET + ) if path_hops is not None and len(path_hops) > 0: ingress_endpoint_id = service.service_endpoint_ids.add() -- GitLab From 98e656624348400923be5976046df731061f64c1 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 19 Sep 2025 10:32:52 +0000 Subject: [PATCH 254/367] Tests - Tools - SIMAP Server - Client: - Added support for SIMAP telemetry --- .../simap_server/simap_client/SimapClient.py | 110 ++++++++++++++++- .../tools/simap_server/simap_client/Tools.py | 109 +++++++++++++++++ .../simap_server/simap_client/__main__.py | 113 ++++-------------- 3 files changed, 239 insertions(+), 93 deletions(-) create mode 100644 src/tests/tools/simap_server/simap_client/Tools.py diff --git a/src/tests/tools/simap_server/simap_client/SimapClient.py b/src/tests/tools/simap_server/simap_client/SimapClient.py index 8f457d452..a300aca74 100644 --- a/src/tests/tools/simap_server/simap_client/SimapClient.py +++ b/src/tests/tools/simap_server/simap_client/SimapClient.py @@ -13,7 +13,7 @@ # limitations under the License. -from typing import Dict, List, Tuple +from typing import Dict, List, Optional, Tuple from common.tools.rest_conf.client.RestConfClient import RestConfClient @@ -62,6 +62,51 @@ class TerminationPoint: endpoint = TerminationPoint.ENDPOINT_ID.format(self._network_id, self._node_id, self._tp_id) self._restconf_client.delete(endpoint) + +class NodeTelemetry: + ENDPOINT = '/ietf-network:networks/network={:s}/node={:s}/simap-telemetry:simap-telemetry' + + def __init__(self, restconf_client : RestConfClient, network_id : str, node_id : str): + self._restconf_client = restconf_client + self._network_id = network_id + self._node_id = node_id + + def create( + self, cpu_utilization : float, related_service_ids : List[str] = [] + ) -> None: + endpoint = NodeTelemetry.ENDPOINT.format(self._network_id, self._node_id) + telemetry = { + 'cpu-utilization': '{:.2f}'.format(cpu_utilization), + } + if len(related_service_ids) > 0: telemetry['related-service-ids'] = related_service_ids + node = {'node-id': self._node_id, 'simap-telemetry:simap-telemetry': telemetry} + network = {'network-id': self._network_id, 'node': [node]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.post(endpoint, payload) + + def get(self) -> Dict: + endpoint = NodeTelemetry.ENDPOINT.format(self._network_id, self._node_id) + telemetry : Dict = self._restconf_client.get(endpoint) + return telemetry + + def update( + self, cpu_utilization : float, related_service_ids : List[str] = [] + ) -> None: + endpoint = NodeTelemetry.ENDPOINT.format(self._network_id, self._node_id) + telemetry = { + 'cpu-utilization': '{:.2f}'.format(cpu_utilization), + } + if len(related_service_ids) > 0: telemetry['related-service-ids'] = related_service_ids + node = {'node-id': self._node_id, 'simap-telemetry:simap-telemetry': telemetry} + network = {'network-id': self._network_id, 'node': [node]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.patch(endpoint, payload) + + def delete(self) -> None: + endpoint = NodeTelemetry.ENDPOINT.format(self._network_id, self._node_id) + self._restconf_client.delete(endpoint) + + class Node: ENDPOINT_NO_ID = '/ietf-network:networks/network={:s}' ENDPOINT_ID = ENDPOINT_NO_ID + '/node={:s}' @@ -71,6 +116,13 @@ class Node: self._network_id = network_id self._node_id = node_id self._tps : Dict[str, TerminationPoint] = dict() + self._telemetry : Optional[NodeTelemetry] = None + + @property + def telemetry(self) -> NodeTelemetry: + if self._telemetry is None: + self._telemetry = NodeTelemetry(self._restconf_client, self._network_id, self._node_id) + return self._telemetry def termination_points(self) -> List[Dict]: tps : Dict = self._restconf_client.get(TerminationPoint.ENDPOINT_NO_ID) @@ -119,6 +171,55 @@ class Node: endpoint = Node.ENDPOINT_ID.format(self._network_id, self._node_id) self._restconf_client.delete(endpoint) + +class LinkTelemetry: + ENDPOINT = '/ietf-network:networks/network={:s}/ietf-network-topology:link={:s}/simap-telemetry:simap-telemetry' + + def __init__(self, restconf_client : RestConfClient, network_id : str, link_id : str): + self._restconf_client = restconf_client + self._network_id = network_id + self._link_id = link_id + + def create( + self, bandwidth_utilization : float, latency : float, + related_service_ids : List[str] = [] + ) -> None: + endpoint = LinkTelemetry.ENDPOINT.format(self._network_id, self._link_id) + telemetry = { + 'bandwidth-utilization': bandwidth_utilization, + 'latency' : latency, + } + if len(related_service_ids) > 0: telemetry['related-service-ids'] = related_service_ids + link = {'link-id': self._link_id, 'simap-telemetry:simap-telemetry': telemetry} + network = {'network-id': self._network_id, 'ietf-network-topology:link': [link]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.post(endpoint, payload) + + def get(self) -> Dict: + endpoint = LinkTelemetry.ENDPOINT.format(self._network_id, self._link_id) + telemetry : Dict = self._restconf_client.get(endpoint) + return telemetry + + def update( + self, bandwidth_utilization : float, latency : float, + related_service_ids : List[str] = [] + ) -> None: + endpoint = LinkTelemetry.ENDPOINT.format(self._network_id, self._link_id) + telemetry = { + 'bandwidth-utilization': '{:.2f}'.format(bandwidth_utilization), + 'latency' : '{:.3f}'.format(latency), + } + if len(related_service_ids) > 0: telemetry['related-service-ids'] = related_service_ids + link = {'link-id': self._link_id, 'simap-telemetry:simap-telemetry': telemetry} + network = {'network-id': self._network_id, 'ietf-network-topology:link': [link]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.patch(endpoint, payload) + + def delete(self) -> None: + endpoint = LinkTelemetry.ENDPOINT.format(self._network_id, self._link_id) + self._restconf_client.delete(endpoint) + + class Link: ENDPOINT_NO_ID = '/ietf-network:networks/network={:s}' ENDPOINT_ID = ENDPOINT_NO_ID + '/ietf-network-topology:link={:s}' @@ -127,6 +228,13 @@ class Link: self._restconf_client = restconf_client self._network_id = network_id self._link_id = link_id + self._telemetry : Optional[LinkTelemetry] = None + + @property + def telemetry(self) -> LinkTelemetry: + if self._telemetry is None: + self._telemetry = LinkTelemetry(self._restconf_client, self._network_id, self._link_id) + return self._telemetry def create( self, src_node_id : str, src_tp_id : str, dst_node_id : str, dst_tp_id : str, diff --git a/src/tests/tools/simap_server/simap_client/Tools.py b/src/tests/tools/simap_server/simap_client/Tools.py new file mode 100644 index 000000000..b49110f82 --- /dev/null +++ b/src/tests/tools/simap_server/simap_client/Tools.py @@ -0,0 +1,109 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .SimapClient import SimapClient + +def create_simap_te(simap_client : SimapClient) -> None: + te_topo = simap_client.network('te') + te_topo.create() + + te_topo.node('ONT1').create(termination_point_ids=['200', '500']) + te_topo.node('ONT2').create(termination_point_ids=['200', '500']) + te_topo.node('OLT' ).create(termination_point_ids=['200', '201', '500', '501']) + te_topo.link('L1').create('ONT1', '500', 'OLT', '200') + te_topo.link('L2').create('ONT2', '500', 'OLT', '201') + + te_topo.node('PE1').create(termination_point_ids=['200', '500', '501']) + te_topo.node('P1' ).create(termination_point_ids=['500', '501']) + te_topo.node('P2' ).create(termination_point_ids=['500', '501']) + te_topo.node('PE2').create(termination_point_ids=['200', '500', '501']) + te_topo.link('L5' ).create('PE1', '500', 'P1', '500') + te_topo.link('L6' ).create('PE1', '501', 'P2', '500') + te_topo.link('L9' ).create('P1', '501', 'PE2', '500') + te_topo.link('L10').create('P2', '501', 'PE2', '501') + + te_topo.node('OA' ).create(termination_point_ids=['200', '500', '501']) + te_topo.node('OTN1').create(termination_point_ids=['500', '501']) + te_topo.node('OTN2').create(termination_point_ids=['500', '501']) + te_topo.node('OE' ).create(termination_point_ids=['200', '500', '501']) + te_topo.link('L7' ).create('OA', '500', 'OTN1', '500') + te_topo.link('L8' ).create('OA', '501', 'OTN2', '500') + te_topo.link('L11' ).create('OTN1', '501', 'OE', '500') + te_topo.link('L12' ).create('OTN2', '501', 'OE', '501') + + te_topo.link('L3').create('OLT', '500', 'PE1', '200') + te_topo.link('L4').create('OLT', '501', 'OA', '200') + + te_topo.node('POP1').create(termination_point_ids=['200', '201', '500']) + te_topo.link('L13').create('PE2', '200', 'POP1', '500') + + te_topo.node('POP2').create(termination_point_ids=['200', '201', '500']) + te_topo.link('L14').create('OE', '200', 'POP2', '500') + + +def create_simap_trans(simap_client : SimapClient) -> None: + simap_trans = simap_client.network('simap-trans') + simap_trans.create(supporting_network_ids=['te']) + + site_1 = simap_trans.node('site1') + site_1.create(supporting_node_ids=[('te', 'PE1')]) + site_1.termination_point('200').create(supporting_termination_point_ids=[('te', 'PE1', '200')]) + site_1.termination_point('500').create(supporting_termination_point_ids=[('te', 'PE1', '500')]) + site_1.termination_point('501').create(supporting_termination_point_ids=[('te', 'PE1', '501')]) + + site_2 = simap_trans.node('site2') + site_2.create(supporting_node_ids=[('te', 'PE2')]) + site_2.termination_point('200').create(supporting_termination_point_ids=[('te', 'PE2', '200')]) + site_2.termination_point('500').create(supporting_termination_point_ids=[('te', 'PE2', '500')]) + site_2.termination_point('501').create(supporting_termination_point_ids=[('te', 'PE2', '501')]) + + simap_trans.link('Trans-L1').create('site1', '500', 'site2', '500', supporting_link_ids=[('te', 'L5'), ('te', 'L9')]) + + +def create_simap_aggnet(simap_client : SimapClient) -> None: + simap_aggnet = simap_client.network('simap-aggnet') + simap_aggnet.create(supporting_network_ids=['te', 'simap-trans']) + + sdp_1 = simap_aggnet.node('sdp1') + sdp_1.create(supporting_node_ids=[('te', 'OLT')]) + sdp_1.termination_point('200').create(supporting_termination_point_ids=[('te', 'OLT', '200')]) + sdp_1.termination_point('201').create(supporting_termination_point_ids=[('te', 'OLT', '201')]) + sdp_1.termination_point('500').create(supporting_termination_point_ids=[('te', 'OLT', '500')]) + sdp_1.termination_point('501').create(supporting_termination_point_ids=[('te', 'OLT', '501')]) + + sdp_2 = simap_aggnet.node('sdp2') + sdp_2.create(supporting_node_ids=[('te', 'POP1')]) + sdp_2.termination_point('200').create(supporting_termination_point_ids=[('te', 'POP1', '200')]) + sdp_2.termination_point('201').create(supporting_termination_point_ids=[('te', 'POP1', '201')]) + sdp_2.termination_point('500').create(supporting_termination_point_ids=[('te', 'POP1', '500')]) + + simap_aggnet.link('AggNet-L1').create('sdp1', '500', 'sdp2', '500', supporting_link_ids=[('te', 'L3'), ('simap-trans', 'Trans-L1'), ('te', 'L13')]) + + +def create_simap_e2enet(simap_client : SimapClient) -> None: + simap_e2e = simap_client.network('simap-e2e') + simap_e2e.create(supporting_network_ids=['te', 'simap-trans']) + + sdp_1 = simap_e2e.node('sdp1') + sdp_1.create(supporting_node_ids=[('te', 'ONT1')]) + sdp_1.termination_point('200').create(supporting_termination_point_ids=[('te', 'ONT1', '200')]) + sdp_1.termination_point('500').create(supporting_termination_point_ids=[('te', 'ONT1', '500')]) + + sdp_2 = simap_e2e.node('sdp2') + sdp_2.create(supporting_node_ids=[('te', 'POP1')]) + sdp_2.termination_point('200').create(supporting_termination_point_ids=[('te', 'POP1', '200')]) + sdp_2.termination_point('201').create(supporting_termination_point_ids=[('te', 'POP1', '201')]) + sdp_2.termination_point('500').create(supporting_termination_point_ids=[('te', 'POP1', '500')]) + + simap_e2e.link('E2E-L1').create('sdp1', '500', 'sdp2', '500', supporting_link_ids=[('te', 'L1'), ('simap-aggnet', 'AggNet-L1')]) diff --git a/src/tests/tools/simap_server/simap_client/__main__.py b/src/tests/tools/simap_server/simap_client/__main__.py index 40dcc8847..67803b092 100644 --- a/src/tests/tools/simap_server/simap_client/__main__.py +++ b/src/tests/tools/simap_server/simap_client/__main__.py @@ -13,14 +13,16 @@ # limitations under the License. -import json, logging +import json, logging, time from common.tools.rest_conf.client.RestConfClient import RestConfClient from .SimapClient import SimapClient +from .Tools import create_simap_aggnet, create_simap_e2enet, create_simap_te, create_simap_trans + logging.basicConfig(level=logging.INFO) +logging.getLogger('RestConfClient').setLevel(logging.WARN) LOGGER = logging.getLogger(__name__) -logging.getLogger('RestConfClient').setLevel(logging.WARN) def main() -> None: restconf_client = RestConfClient( @@ -29,103 +31,30 @@ def main() -> None: ) simap_client = SimapClient(restconf_client) - te_topo = simap_client.network('te') - te_topo.create() - - te_topo.node('ONT1').create(termination_point_ids=['200', '500']) - te_topo.node('ONT2').create(termination_point_ids=['200', '500']) - te_topo.node('OLT' ).create(termination_point_ids=['200', '201', '500', '501']) - te_topo.link('L1').create('ONT1', '500', 'OLT', '200') - te_topo.link('L2').create('ONT2', '500', 'OLT', '201') - - te_topo.node('PE1').create(termination_point_ids=['200', '500', '501']) - te_topo.node('P1' ).create(termination_point_ids=['500', '501']) - te_topo.node('P2' ).create(termination_point_ids=['500', '501']) - te_topo.node('PE2').create(termination_point_ids=['200', '500', '501']) - te_topo.link('L5' ).create('PE1', '500', 'P1', '500') - te_topo.link('L6' ).create('PE1', '501', 'P2', '500') - te_topo.link('L9' ).create('P1', '501', 'PE2', '500') - te_topo.link('L10').create('P2', '501', 'PE2', '501') - - te_topo.node('OA' ).create(termination_point_ids=['200', '500', '501']) - te_topo.node('OTN1').create(termination_point_ids=['500', '501']) - te_topo.node('OTN2').create(termination_point_ids=['500', '501']) - te_topo.node('OE' ).create(termination_point_ids=['200', '500', '501']) - te_topo.link('L7' ).create('OA', '500', 'OTN1', '500') - te_topo.link('L8' ).create('OA', '501', 'OTN2', '500') - te_topo.link('L11' ).create('OTN1', '501', 'OE', '500') - te_topo.link('L12' ).create('OTN2', '501', 'OE', '501') - - te_topo.link('L3').create('OLT', '500', 'PE1', '200') - te_topo.link('L4').create('OLT', '501', 'OA', '200') - - te_topo.node('POP1').create(termination_point_ids=['200', '201', '500']) - te_topo.link('L13').create('PE2', '200', 'POP1', '500') - - te_topo.node('POP2').create(termination_point_ids=['200', '201', '500']) - te_topo.link('L14').create('OE', '200', 'POP2', '500') - - - - simap_trans = simap_client.network('simap-trans') - simap_trans.create(supporting_network_ids=['te']) - - site_1 = simap_trans.node('site1') - site_1.create(supporting_node_ids=[('te', 'PE1')]) - site_1.termination_point('200').create(supporting_termination_point_ids=[('te', 'PE1', '200')]) - site_1.termination_point('500').create(supporting_termination_point_ids=[('te', 'PE1', '500')]) - site_1.termination_point('501').create(supporting_termination_point_ids=[('te', 'PE1', '501')]) - - site_2 = simap_trans.node('site2') - site_2.create(supporting_node_ids=[('te', 'PE2')]) - site_2.termination_point('200').create(supporting_termination_point_ids=[('te', 'PE2', '200')]) - site_2.termination_point('500').create(supporting_termination_point_ids=[('te', 'PE2', '500')]) - site_2.termination_point('501').create(supporting_termination_point_ids=[('te', 'PE2', '501')]) - - simap_trans.link('Trans-L1').create('site1', '500', 'site2', '500', supporting_link_ids=[('te', 'L5'), ('te', 'L9')]) - - - - - simap_aggnet = simap_client.network('simap-aggnet') - simap_aggnet.create(supporting_network_ids=['te', 'simap-trans']) - - sdp_1 = simap_aggnet.node('sdp1') - sdp_1.create(supporting_node_ids=[('te', 'OLT')]) - sdp_1.termination_point('200').create(supporting_termination_point_ids=[('te', 'OLT', '200')]) - sdp_1.termination_point('201').create(supporting_termination_point_ids=[('te', 'OLT', '201')]) - sdp_1.termination_point('500').create(supporting_termination_point_ids=[('te', 'OLT', '500')]) - sdp_1.termination_point('501').create(supporting_termination_point_ids=[('te', 'OLT', '501')]) - - sdp_2 = simap_aggnet.node('sdp2') - sdp_2.create(supporting_node_ids=[('te', 'POP1')]) - sdp_2.termination_point('200').create(supporting_termination_point_ids=[('te', 'POP1', '200')]) - sdp_2.termination_point('201').create(supporting_termination_point_ids=[('te', 'POP1', '201')]) - sdp_2.termination_point('500').create(supporting_termination_point_ids=[('te', 'POP1', '500')]) - - simap_aggnet.link('AggNet-L1').create('sdp1', '500', 'sdp2', '500', supporting_link_ids=[('te', 'L3'), ('simap-trans', 'Trans-L1'), ('te', 'L13')]) - - + create_simap_te(simap_client) + create_simap_trans(simap_client) + create_simap_aggnet(simap_client) + create_simap_e2enet(simap_client) + print('networks=', json.dumps(simap_client.networks())) - simap_e2e = simap_client.network('simap-e2e') - simap_e2e.create(supporting_network_ids=['te', 'simap-trans']) + trans_link = simap_client.network('simap-trans').link('Trans-L1') + trans_node_site1 = simap_client.network('simap-trans').node('site1') + trans_node_site2 = simap_client.network('simap-trans').node('site2') - sdp_1 = simap_e2e.node('sdp1') - sdp_1.create(supporting_node_ids=[('te', 'ONT1')]) - sdp_1.termination_point('200').create(supporting_termination_point_ids=[('te', 'ONT1', '200')]) - sdp_1.termination_point('500').create(supporting_termination_point_ids=[('te', 'ONT1', '500')]) + related_service_ids = ['trans-svc1', 'trans-svc2', 'trans-svc3'] - sdp_2 = simap_e2e.node('sdp2') - sdp_2.create(supporting_node_ids=[('te', 'POP1')]) - sdp_2.termination_point('200').create(supporting_termination_point_ids=[('te', 'POP1', '200')]) - sdp_2.termination_point('201').create(supporting_termination_point_ids=[('te', 'POP1', '201')]) - sdp_2.termination_point('500').create(supporting_termination_point_ids=[('te', 'POP1', '500')]) + for i in range(1000): + trans_link.telemetry.update(float(i), float(i), related_service_ids=related_service_ids) + trans_node_site1.telemetry.update(float(i), related_service_ids=related_service_ids) + trans_node_site2.telemetry.update(float(i), related_service_ids=related_service_ids) - simap_e2e.link('E2E-L1').create('sdp1', '500', 'sdp2', '500', supporting_link_ids=[('te', 'L1'), ('simap-aggnet', 'AggNet-L1')]) + print('trans link telemetry =', json.dumps(trans_link.telemetry.get())) + print('trans site1 telemetry =', json.dumps(trans_node_site1.telemetry.get())) + print('trans site2 telemetry =', json.dumps(trans_node_site2.telemetry.get())) + time.sleep(10) - print('networks=', json.dumps(simap_client.networks())) if __name__ == '__main__': main() -- GitLab From b4c1e700695ca6da4f9969fd898d8b23976080ef Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 19 Sep 2025 10:33:36 +0000 Subject: [PATCH 255/367] Test - Tools - Mock NCE-T /NCE-FAN Ctrl - Updated SIMAP Client --- .../nce_fan_ctrl/SimapClient.py | 110 +++++++++++++++++- .../mock_nce_t_ctrl/nce_t_ctrl/SimapClient.py | 110 +++++++++++++++++- 2 files changed, 218 insertions(+), 2 deletions(-) diff --git a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/SimapClient.py b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/SimapClient.py index 8f457d452..a300aca74 100644 --- a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/SimapClient.py +++ b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/SimapClient.py @@ -13,7 +13,7 @@ # limitations under the License. -from typing import Dict, List, Tuple +from typing import Dict, List, Optional, Tuple from common.tools.rest_conf.client.RestConfClient import RestConfClient @@ -62,6 +62,51 @@ class TerminationPoint: endpoint = TerminationPoint.ENDPOINT_ID.format(self._network_id, self._node_id, self._tp_id) self._restconf_client.delete(endpoint) + +class NodeTelemetry: + ENDPOINT = '/ietf-network:networks/network={:s}/node={:s}/simap-telemetry:simap-telemetry' + + def __init__(self, restconf_client : RestConfClient, network_id : str, node_id : str): + self._restconf_client = restconf_client + self._network_id = network_id + self._node_id = node_id + + def create( + self, cpu_utilization : float, related_service_ids : List[str] = [] + ) -> None: + endpoint = NodeTelemetry.ENDPOINT.format(self._network_id, self._node_id) + telemetry = { + 'cpu-utilization': '{:.2f}'.format(cpu_utilization), + } + if len(related_service_ids) > 0: telemetry['related-service-ids'] = related_service_ids + node = {'node-id': self._node_id, 'simap-telemetry:simap-telemetry': telemetry} + network = {'network-id': self._network_id, 'node': [node]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.post(endpoint, payload) + + def get(self) -> Dict: + endpoint = NodeTelemetry.ENDPOINT.format(self._network_id, self._node_id) + telemetry : Dict = self._restconf_client.get(endpoint) + return telemetry + + def update( + self, cpu_utilization : float, related_service_ids : List[str] = [] + ) -> None: + endpoint = NodeTelemetry.ENDPOINT.format(self._network_id, self._node_id) + telemetry = { + 'cpu-utilization': '{:.2f}'.format(cpu_utilization), + } + if len(related_service_ids) > 0: telemetry['related-service-ids'] = related_service_ids + node = {'node-id': self._node_id, 'simap-telemetry:simap-telemetry': telemetry} + network = {'network-id': self._network_id, 'node': [node]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.patch(endpoint, payload) + + def delete(self) -> None: + endpoint = NodeTelemetry.ENDPOINT.format(self._network_id, self._node_id) + self._restconf_client.delete(endpoint) + + class Node: ENDPOINT_NO_ID = '/ietf-network:networks/network={:s}' ENDPOINT_ID = ENDPOINT_NO_ID + '/node={:s}' @@ -71,6 +116,13 @@ class Node: self._network_id = network_id self._node_id = node_id self._tps : Dict[str, TerminationPoint] = dict() + self._telemetry : Optional[NodeTelemetry] = None + + @property + def telemetry(self) -> NodeTelemetry: + if self._telemetry is None: + self._telemetry = NodeTelemetry(self._restconf_client, self._network_id, self._node_id) + return self._telemetry def termination_points(self) -> List[Dict]: tps : Dict = self._restconf_client.get(TerminationPoint.ENDPOINT_NO_ID) @@ -119,6 +171,55 @@ class Node: endpoint = Node.ENDPOINT_ID.format(self._network_id, self._node_id) self._restconf_client.delete(endpoint) + +class LinkTelemetry: + ENDPOINT = '/ietf-network:networks/network={:s}/ietf-network-topology:link={:s}/simap-telemetry:simap-telemetry' + + def __init__(self, restconf_client : RestConfClient, network_id : str, link_id : str): + self._restconf_client = restconf_client + self._network_id = network_id + self._link_id = link_id + + def create( + self, bandwidth_utilization : float, latency : float, + related_service_ids : List[str] = [] + ) -> None: + endpoint = LinkTelemetry.ENDPOINT.format(self._network_id, self._link_id) + telemetry = { + 'bandwidth-utilization': bandwidth_utilization, + 'latency' : latency, + } + if len(related_service_ids) > 0: telemetry['related-service-ids'] = related_service_ids + link = {'link-id': self._link_id, 'simap-telemetry:simap-telemetry': telemetry} + network = {'network-id': self._network_id, 'ietf-network-topology:link': [link]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.post(endpoint, payload) + + def get(self) -> Dict: + endpoint = LinkTelemetry.ENDPOINT.format(self._network_id, self._link_id) + telemetry : Dict = self._restconf_client.get(endpoint) + return telemetry + + def update( + self, bandwidth_utilization : float, latency : float, + related_service_ids : List[str] = [] + ) -> None: + endpoint = LinkTelemetry.ENDPOINT.format(self._network_id, self._link_id) + telemetry = { + 'bandwidth-utilization': '{:.2f}'.format(bandwidth_utilization), + 'latency' : '{:.3f}'.format(latency), + } + if len(related_service_ids) > 0: telemetry['related-service-ids'] = related_service_ids + link = {'link-id': self._link_id, 'simap-telemetry:simap-telemetry': telemetry} + network = {'network-id': self._network_id, 'ietf-network-topology:link': [link]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.patch(endpoint, payload) + + def delete(self) -> None: + endpoint = LinkTelemetry.ENDPOINT.format(self._network_id, self._link_id) + self._restconf_client.delete(endpoint) + + class Link: ENDPOINT_NO_ID = '/ietf-network:networks/network={:s}' ENDPOINT_ID = ENDPOINT_NO_ID + '/ietf-network-topology:link={:s}' @@ -127,6 +228,13 @@ class Link: self._restconf_client = restconf_client self._network_id = network_id self._link_id = link_id + self._telemetry : Optional[LinkTelemetry] = None + + @property + def telemetry(self) -> LinkTelemetry: + if self._telemetry is None: + self._telemetry = LinkTelemetry(self._restconf_client, self._network_id, self._link_id) + return self._telemetry def create( self, src_node_id : str, src_tp_id : str, dst_node_id : str, dst_tp_id : str, diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/SimapClient.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/SimapClient.py index 8f457d452..a300aca74 100644 --- a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/SimapClient.py +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/SimapClient.py @@ -13,7 +13,7 @@ # limitations under the License. -from typing import Dict, List, Tuple +from typing import Dict, List, Optional, Tuple from common.tools.rest_conf.client.RestConfClient import RestConfClient @@ -62,6 +62,51 @@ class TerminationPoint: endpoint = TerminationPoint.ENDPOINT_ID.format(self._network_id, self._node_id, self._tp_id) self._restconf_client.delete(endpoint) + +class NodeTelemetry: + ENDPOINT = '/ietf-network:networks/network={:s}/node={:s}/simap-telemetry:simap-telemetry' + + def __init__(self, restconf_client : RestConfClient, network_id : str, node_id : str): + self._restconf_client = restconf_client + self._network_id = network_id + self._node_id = node_id + + def create( + self, cpu_utilization : float, related_service_ids : List[str] = [] + ) -> None: + endpoint = NodeTelemetry.ENDPOINT.format(self._network_id, self._node_id) + telemetry = { + 'cpu-utilization': '{:.2f}'.format(cpu_utilization), + } + if len(related_service_ids) > 0: telemetry['related-service-ids'] = related_service_ids + node = {'node-id': self._node_id, 'simap-telemetry:simap-telemetry': telemetry} + network = {'network-id': self._network_id, 'node': [node]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.post(endpoint, payload) + + def get(self) -> Dict: + endpoint = NodeTelemetry.ENDPOINT.format(self._network_id, self._node_id) + telemetry : Dict = self._restconf_client.get(endpoint) + return telemetry + + def update( + self, cpu_utilization : float, related_service_ids : List[str] = [] + ) -> None: + endpoint = NodeTelemetry.ENDPOINT.format(self._network_id, self._node_id) + telemetry = { + 'cpu-utilization': '{:.2f}'.format(cpu_utilization), + } + if len(related_service_ids) > 0: telemetry['related-service-ids'] = related_service_ids + node = {'node-id': self._node_id, 'simap-telemetry:simap-telemetry': telemetry} + network = {'network-id': self._network_id, 'node': [node]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.patch(endpoint, payload) + + def delete(self) -> None: + endpoint = NodeTelemetry.ENDPOINT.format(self._network_id, self._node_id) + self._restconf_client.delete(endpoint) + + class Node: ENDPOINT_NO_ID = '/ietf-network:networks/network={:s}' ENDPOINT_ID = ENDPOINT_NO_ID + '/node={:s}' @@ -71,6 +116,13 @@ class Node: self._network_id = network_id self._node_id = node_id self._tps : Dict[str, TerminationPoint] = dict() + self._telemetry : Optional[NodeTelemetry] = None + + @property + def telemetry(self) -> NodeTelemetry: + if self._telemetry is None: + self._telemetry = NodeTelemetry(self._restconf_client, self._network_id, self._node_id) + return self._telemetry def termination_points(self) -> List[Dict]: tps : Dict = self._restconf_client.get(TerminationPoint.ENDPOINT_NO_ID) @@ -119,6 +171,55 @@ class Node: endpoint = Node.ENDPOINT_ID.format(self._network_id, self._node_id) self._restconf_client.delete(endpoint) + +class LinkTelemetry: + ENDPOINT = '/ietf-network:networks/network={:s}/ietf-network-topology:link={:s}/simap-telemetry:simap-telemetry' + + def __init__(self, restconf_client : RestConfClient, network_id : str, link_id : str): + self._restconf_client = restconf_client + self._network_id = network_id + self._link_id = link_id + + def create( + self, bandwidth_utilization : float, latency : float, + related_service_ids : List[str] = [] + ) -> None: + endpoint = LinkTelemetry.ENDPOINT.format(self._network_id, self._link_id) + telemetry = { + 'bandwidth-utilization': bandwidth_utilization, + 'latency' : latency, + } + if len(related_service_ids) > 0: telemetry['related-service-ids'] = related_service_ids + link = {'link-id': self._link_id, 'simap-telemetry:simap-telemetry': telemetry} + network = {'network-id': self._network_id, 'ietf-network-topology:link': [link]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.post(endpoint, payload) + + def get(self) -> Dict: + endpoint = LinkTelemetry.ENDPOINT.format(self._network_id, self._link_id) + telemetry : Dict = self._restconf_client.get(endpoint) + return telemetry + + def update( + self, bandwidth_utilization : float, latency : float, + related_service_ids : List[str] = [] + ) -> None: + endpoint = LinkTelemetry.ENDPOINT.format(self._network_id, self._link_id) + telemetry = { + 'bandwidth-utilization': '{:.2f}'.format(bandwidth_utilization), + 'latency' : '{:.3f}'.format(latency), + } + if len(related_service_ids) > 0: telemetry['related-service-ids'] = related_service_ids + link = {'link-id': self._link_id, 'simap-telemetry:simap-telemetry': telemetry} + network = {'network-id': self._network_id, 'ietf-network-topology:link': [link]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.patch(endpoint, payload) + + def delete(self) -> None: + endpoint = LinkTelemetry.ENDPOINT.format(self._network_id, self._link_id) + self._restconf_client.delete(endpoint) + + class Link: ENDPOINT_NO_ID = '/ietf-network:networks/network={:s}' ENDPOINT_ID = ENDPOINT_NO_ID + '/ietf-network-topology:link={:s}' @@ -127,6 +228,13 @@ class Link: self._restconf_client = restconf_client self._network_id = network_id self._link_id = link_id + self._telemetry : Optional[LinkTelemetry] = None + + @property + def telemetry(self) -> LinkTelemetry: + if self._telemetry is None: + self._telemetry = LinkTelemetry(self._restconf_client, self._network_id, self._link_id) + return self._telemetry def create( self, src_node_id : str, src_tp_id : str, dst_node_id : str, dst_tp_id : str, -- GitLab From cea36dad60e77299402025b47714ccdca3a5ba79 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 19 Sep 2025 10:34:01 +0000 Subject: [PATCH 256/367] SIMAP Connector: - Updated SIMAP Client --- .../service/simap_updater/SimapClient.py | 110 +++++++++++++++++- 1 file changed, 109 insertions(+), 1 deletion(-) diff --git a/src/simap_connector/service/simap_updater/SimapClient.py b/src/simap_connector/service/simap_updater/SimapClient.py index 8f457d452..a300aca74 100644 --- a/src/simap_connector/service/simap_updater/SimapClient.py +++ b/src/simap_connector/service/simap_updater/SimapClient.py @@ -13,7 +13,7 @@ # limitations under the License. -from typing import Dict, List, Tuple +from typing import Dict, List, Optional, Tuple from common.tools.rest_conf.client.RestConfClient import RestConfClient @@ -62,6 +62,51 @@ class TerminationPoint: endpoint = TerminationPoint.ENDPOINT_ID.format(self._network_id, self._node_id, self._tp_id) self._restconf_client.delete(endpoint) + +class NodeTelemetry: + ENDPOINT = '/ietf-network:networks/network={:s}/node={:s}/simap-telemetry:simap-telemetry' + + def __init__(self, restconf_client : RestConfClient, network_id : str, node_id : str): + self._restconf_client = restconf_client + self._network_id = network_id + self._node_id = node_id + + def create( + self, cpu_utilization : float, related_service_ids : List[str] = [] + ) -> None: + endpoint = NodeTelemetry.ENDPOINT.format(self._network_id, self._node_id) + telemetry = { + 'cpu-utilization': '{:.2f}'.format(cpu_utilization), + } + if len(related_service_ids) > 0: telemetry['related-service-ids'] = related_service_ids + node = {'node-id': self._node_id, 'simap-telemetry:simap-telemetry': telemetry} + network = {'network-id': self._network_id, 'node': [node]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.post(endpoint, payload) + + def get(self) -> Dict: + endpoint = NodeTelemetry.ENDPOINT.format(self._network_id, self._node_id) + telemetry : Dict = self._restconf_client.get(endpoint) + return telemetry + + def update( + self, cpu_utilization : float, related_service_ids : List[str] = [] + ) -> None: + endpoint = NodeTelemetry.ENDPOINT.format(self._network_id, self._node_id) + telemetry = { + 'cpu-utilization': '{:.2f}'.format(cpu_utilization), + } + if len(related_service_ids) > 0: telemetry['related-service-ids'] = related_service_ids + node = {'node-id': self._node_id, 'simap-telemetry:simap-telemetry': telemetry} + network = {'network-id': self._network_id, 'node': [node]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.patch(endpoint, payload) + + def delete(self) -> None: + endpoint = NodeTelemetry.ENDPOINT.format(self._network_id, self._node_id) + self._restconf_client.delete(endpoint) + + class Node: ENDPOINT_NO_ID = '/ietf-network:networks/network={:s}' ENDPOINT_ID = ENDPOINT_NO_ID + '/node={:s}' @@ -71,6 +116,13 @@ class Node: self._network_id = network_id self._node_id = node_id self._tps : Dict[str, TerminationPoint] = dict() + self._telemetry : Optional[NodeTelemetry] = None + + @property + def telemetry(self) -> NodeTelemetry: + if self._telemetry is None: + self._telemetry = NodeTelemetry(self._restconf_client, self._network_id, self._node_id) + return self._telemetry def termination_points(self) -> List[Dict]: tps : Dict = self._restconf_client.get(TerminationPoint.ENDPOINT_NO_ID) @@ -119,6 +171,55 @@ class Node: endpoint = Node.ENDPOINT_ID.format(self._network_id, self._node_id) self._restconf_client.delete(endpoint) + +class LinkTelemetry: + ENDPOINT = '/ietf-network:networks/network={:s}/ietf-network-topology:link={:s}/simap-telemetry:simap-telemetry' + + def __init__(self, restconf_client : RestConfClient, network_id : str, link_id : str): + self._restconf_client = restconf_client + self._network_id = network_id + self._link_id = link_id + + def create( + self, bandwidth_utilization : float, latency : float, + related_service_ids : List[str] = [] + ) -> None: + endpoint = LinkTelemetry.ENDPOINT.format(self._network_id, self._link_id) + telemetry = { + 'bandwidth-utilization': bandwidth_utilization, + 'latency' : latency, + } + if len(related_service_ids) > 0: telemetry['related-service-ids'] = related_service_ids + link = {'link-id': self._link_id, 'simap-telemetry:simap-telemetry': telemetry} + network = {'network-id': self._network_id, 'ietf-network-topology:link': [link]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.post(endpoint, payload) + + def get(self) -> Dict: + endpoint = LinkTelemetry.ENDPOINT.format(self._network_id, self._link_id) + telemetry : Dict = self._restconf_client.get(endpoint) + return telemetry + + def update( + self, bandwidth_utilization : float, latency : float, + related_service_ids : List[str] = [] + ) -> None: + endpoint = LinkTelemetry.ENDPOINT.format(self._network_id, self._link_id) + telemetry = { + 'bandwidth-utilization': '{:.2f}'.format(bandwidth_utilization), + 'latency' : '{:.3f}'.format(latency), + } + if len(related_service_ids) > 0: telemetry['related-service-ids'] = related_service_ids + link = {'link-id': self._link_id, 'simap-telemetry:simap-telemetry': telemetry} + network = {'network-id': self._network_id, 'ietf-network-topology:link': [link]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.patch(endpoint, payload) + + def delete(self) -> None: + endpoint = LinkTelemetry.ENDPOINT.format(self._network_id, self._link_id) + self._restconf_client.delete(endpoint) + + class Link: ENDPOINT_NO_ID = '/ietf-network:networks/network={:s}' ENDPOINT_ID = ENDPOINT_NO_ID + '/ietf-network-topology:link={:s}' @@ -127,6 +228,13 @@ class Link: self._restconf_client = restconf_client self._network_id = network_id self._link_id = link_id + self._telemetry : Optional[LinkTelemetry] = None + + @property + def telemetry(self) -> LinkTelemetry: + if self._telemetry is None: + self._telemetry = LinkTelemetry(self._restconf_client, self._network_id, self._link_id) + return self._telemetry def create( self, src_node_id : str, src_tp_id : str, dst_node_id : str, dst_tp_id : str, -- GitLab From 43082b902819641f20da66150a9fc356a490009b Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 19 Sep 2025 10:34:15 +0000 Subject: [PATCH 257/367] NBI component - SSE Telemetry: - Updated SIMAP Client --- src/nbi/service/sse_telemetry/SimapClient.py | 110 ++++++++++++++++++- 1 file changed, 109 insertions(+), 1 deletion(-) diff --git a/src/nbi/service/sse_telemetry/SimapClient.py b/src/nbi/service/sse_telemetry/SimapClient.py index 8f457d452..a300aca74 100644 --- a/src/nbi/service/sse_telemetry/SimapClient.py +++ b/src/nbi/service/sse_telemetry/SimapClient.py @@ -13,7 +13,7 @@ # limitations under the License. -from typing import Dict, List, Tuple +from typing import Dict, List, Optional, Tuple from common.tools.rest_conf.client.RestConfClient import RestConfClient @@ -62,6 +62,51 @@ class TerminationPoint: endpoint = TerminationPoint.ENDPOINT_ID.format(self._network_id, self._node_id, self._tp_id) self._restconf_client.delete(endpoint) + +class NodeTelemetry: + ENDPOINT = '/ietf-network:networks/network={:s}/node={:s}/simap-telemetry:simap-telemetry' + + def __init__(self, restconf_client : RestConfClient, network_id : str, node_id : str): + self._restconf_client = restconf_client + self._network_id = network_id + self._node_id = node_id + + def create( + self, cpu_utilization : float, related_service_ids : List[str] = [] + ) -> None: + endpoint = NodeTelemetry.ENDPOINT.format(self._network_id, self._node_id) + telemetry = { + 'cpu-utilization': '{:.2f}'.format(cpu_utilization), + } + if len(related_service_ids) > 0: telemetry['related-service-ids'] = related_service_ids + node = {'node-id': self._node_id, 'simap-telemetry:simap-telemetry': telemetry} + network = {'network-id': self._network_id, 'node': [node]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.post(endpoint, payload) + + def get(self) -> Dict: + endpoint = NodeTelemetry.ENDPOINT.format(self._network_id, self._node_id) + telemetry : Dict = self._restconf_client.get(endpoint) + return telemetry + + def update( + self, cpu_utilization : float, related_service_ids : List[str] = [] + ) -> None: + endpoint = NodeTelemetry.ENDPOINT.format(self._network_id, self._node_id) + telemetry = { + 'cpu-utilization': '{:.2f}'.format(cpu_utilization), + } + if len(related_service_ids) > 0: telemetry['related-service-ids'] = related_service_ids + node = {'node-id': self._node_id, 'simap-telemetry:simap-telemetry': telemetry} + network = {'network-id': self._network_id, 'node': [node]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.patch(endpoint, payload) + + def delete(self) -> None: + endpoint = NodeTelemetry.ENDPOINT.format(self._network_id, self._node_id) + self._restconf_client.delete(endpoint) + + class Node: ENDPOINT_NO_ID = '/ietf-network:networks/network={:s}' ENDPOINT_ID = ENDPOINT_NO_ID + '/node={:s}' @@ -71,6 +116,13 @@ class Node: self._network_id = network_id self._node_id = node_id self._tps : Dict[str, TerminationPoint] = dict() + self._telemetry : Optional[NodeTelemetry] = None + + @property + def telemetry(self) -> NodeTelemetry: + if self._telemetry is None: + self._telemetry = NodeTelemetry(self._restconf_client, self._network_id, self._node_id) + return self._telemetry def termination_points(self) -> List[Dict]: tps : Dict = self._restconf_client.get(TerminationPoint.ENDPOINT_NO_ID) @@ -119,6 +171,55 @@ class Node: endpoint = Node.ENDPOINT_ID.format(self._network_id, self._node_id) self._restconf_client.delete(endpoint) + +class LinkTelemetry: + ENDPOINT = '/ietf-network:networks/network={:s}/ietf-network-topology:link={:s}/simap-telemetry:simap-telemetry' + + def __init__(self, restconf_client : RestConfClient, network_id : str, link_id : str): + self._restconf_client = restconf_client + self._network_id = network_id + self._link_id = link_id + + def create( + self, bandwidth_utilization : float, latency : float, + related_service_ids : List[str] = [] + ) -> None: + endpoint = LinkTelemetry.ENDPOINT.format(self._network_id, self._link_id) + telemetry = { + 'bandwidth-utilization': bandwidth_utilization, + 'latency' : latency, + } + if len(related_service_ids) > 0: telemetry['related-service-ids'] = related_service_ids + link = {'link-id': self._link_id, 'simap-telemetry:simap-telemetry': telemetry} + network = {'network-id': self._network_id, 'ietf-network-topology:link': [link]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.post(endpoint, payload) + + def get(self) -> Dict: + endpoint = LinkTelemetry.ENDPOINT.format(self._network_id, self._link_id) + telemetry : Dict = self._restconf_client.get(endpoint) + return telemetry + + def update( + self, bandwidth_utilization : float, latency : float, + related_service_ids : List[str] = [] + ) -> None: + endpoint = LinkTelemetry.ENDPOINT.format(self._network_id, self._link_id) + telemetry = { + 'bandwidth-utilization': '{:.2f}'.format(bandwidth_utilization), + 'latency' : '{:.3f}'.format(latency), + } + if len(related_service_ids) > 0: telemetry['related-service-ids'] = related_service_ids + link = {'link-id': self._link_id, 'simap-telemetry:simap-telemetry': telemetry} + network = {'network-id': self._network_id, 'ietf-network-topology:link': [link]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.patch(endpoint, payload) + + def delete(self) -> None: + endpoint = LinkTelemetry.ENDPOINT.format(self._network_id, self._link_id) + self._restconf_client.delete(endpoint) + + class Link: ENDPOINT_NO_ID = '/ietf-network:networks/network={:s}' ENDPOINT_ID = ENDPOINT_NO_ID + '/ietf-network-topology:link={:s}' @@ -127,6 +228,13 @@ class Link: self._restconf_client = restconf_client self._network_id = network_id self._link_id = link_id + self._telemetry : Optional[LinkTelemetry] = None + + @property + def telemetry(self) -> LinkTelemetry: + if self._telemetry is None: + self._telemetry = LinkTelemetry(self._restconf_client, self._network_id, self._link_id) + return self._telemetry def create( self, src_node_id : str, src_tp_id : str, dst_node_id : str, dst_tp_id : str, -- GitLab From 7633957c6e8a18ab63623a0fb77ae49f28898d4e Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 19 Sep 2025 11:50:44 +0000 Subject: [PATCH 258/367] SIMAP Connector: - Implemented Logic for Synthetic Samplers and Telemetry Workers --- .../service/simap_updater/SimapUpdater.py | 6 +- .../service/telemetry/Resources.py | 62 +++++++++++++ .../service/telemetry/SyntheticSamplers.py | 87 +++++++++++++++++++ .../service/telemetry/TelemetryPool.py | 17 +++- .../service/telemetry/TelemetryWorker.py | 29 ++++--- 5 files changed, 186 insertions(+), 15 deletions(-) create mode 100644 src/simap_connector/service/telemetry/Resources.py create mode 100644 src/simap_connector/service/telemetry/SyntheticSamplers.py diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 7253c0139..24fd3fdbd 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -30,6 +30,7 @@ from simap_connector.Config import ( SIMAP_SERVER_USERNAME, SIMAP_SERVER_PASSWORD, ) from simap_connector.service.simap_updater.MockSimaps import delete_mock_simap, set_mock_simap +from simap_connector.service.telemetry.Resources import Resources from simap_connector.service.telemetry.TelemetryPool import TelemetryPool from .SimapClient import SimapClient from .ObjectCache import CachedEntities, ObjectCache @@ -511,7 +512,10 @@ class EventDispatcher(BaseEventDispatcher): #dom_link = domain_topo.link(link_name) #dom_link.update(src_dev_name, src_ep_name, dst_dev_name, dst_ep_name) - self._telemetry_pool.start_worker(domain_name) + + resources = Resources() + sampling_interval = 1.0 + self._telemetry_pool.start_worker(domain_name, resources, sampling_interval) return True diff --git a/src/simap_connector/service/telemetry/Resources.py b/src/simap_connector/service/telemetry/Resources.py new file mode 100644 index 000000000..b41977603 --- /dev/null +++ b/src/simap_connector/service/telemetry/Resources.py @@ -0,0 +1,62 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from dataclasses import dataclass, field +from typing import List +from simap_connector.service.simap_updater.SimapClient import SimapClient +from simap_connector.service.telemetry.SyntheticSamplers import SyntheticSampler + + +@dataclass +class ResourceNode: + node_name : str + cpu_utilization_sampler : SyntheticSampler + related_service_ids : List[str] = field(default_factory=list) + + def generate_samples(self, simap_client : SimapClient, domain_name : str) -> None: + cpu_utilization = self.cpu_utilization_sampler.get_sample() + simap_node = simap_client.network(domain_name).node(self.node_name) + simap_node.telemetry.update( + cpu_utilization.value, related_service_ids=self.related_service_ids + ) + +@dataclass +class ResourceLink: + link_name : str + bandwidth_utilization_sampler : SyntheticSampler + latency_sampler : SyntheticSampler + related_service_ids : List[str] = field(default_factory=list) + + def generate_samples(self, simap_client : SimapClient, domain_name : str) -> None: + bandwidth_utilization = self.bandwidth_utilization_sampler.get_sample() + latency = self.latency_sampler.get_sample() + simap_link = simap_client.network(domain_name).link(self.link_name) + simap_link.telemetry.update( + bandwidth_utilization.value, latency.value, + related_service_ids=self.related_service_ids + ) + + +@dataclass +class Resources: + nodes : List[ResourceNode] = field(default_factory=list) + links : List[ResourceLink] = field(default_factory=list) + + def generate_samples(self, simap_client : SimapClient, domain_name : str) -> None: + for resource in self.nodes: + resource.generate_samples(simap_client, domain_name) + + for resource in self.links: + resource.generate_samples(simap_client, domain_name) diff --git a/src/simap_connector/service/telemetry/SyntheticSamplers.py b/src/simap_connector/service/telemetry/SyntheticSamplers.py new file mode 100644 index 000000000..c80d03694 --- /dev/null +++ b/src/simap_connector/service/telemetry/SyntheticSamplers.py @@ -0,0 +1,87 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import math, random, threading +from dataclasses import dataclass +from datetime import datetime +from typing import Dict + + +@dataclass +class Sample: + timestamp : float + value : float + + +@dataclass +class SyntheticSampler: + amplitude : float = 0.0 + phase : float = 0.0 + period : float = 1.0 + offset : float = 0.0 + noise_ratio : float = 0.0 + + @classmethod + def create_random( + cls, amplitude_scale : float, phase_scale : float, period_scale : float, + offset_scale : float, noise_ratio : float + ) -> 'SyntheticSampler': + amplitude = amplitude_scale * random.random() + phase = phase_scale * random.random() + period = period_scale * random.random() + offset = offset_scale * random.random() + amplitude + return cls(amplitude, phase, period, offset, noise_ratio) + + def get_sample(self) -> Sample: + timestamp = datetime.timestamp(datetime.utcnow()) + + waveform = math.sin(2 * math.pi * timestamp / self.period + self.phase) + waveform *= self.amplitude + waveform += self.offset + + noise = self.amplitude * random.random() + value = abs((1.0 - self.noise_ratio) * waveform + self.noise_ratio * noise) + + return Sample(timestamp, value) + + +class SyntheticSamplers: + def __init__(self) -> None: + self._lock = threading.Lock() + self._samplers : Dict[str, SyntheticSampler] = dict() + + def add_sampler( + self, sampler_name : str, amplitude_scale : float, phase_scale : float, + period_scale : float, offset_scale : float, noise_ratio : float + ) -> None: + with self._lock: + if sampler_name in self._samplers: + MSG = 'SyntheticSampler({:s}) already exists' + raise Exception(MSG.format(sampler_name)) + self._samplers[sampler_name] = SyntheticSampler.create_random( + amplitude_scale, phase_scale, period_scale, offset_scale, noise_ratio + ) + + def remove_sampler(self, sampler_name : str) -> None: + with self._lock: + self._samplers.pop(sampler_name, None) + + def get_sample(self, sampler_name : str) -> Sample: + with self._lock: + sampler = self._samplers.get(sampler_name) + if sampler_name not in self._samplers: + MSG = 'SyntheticSampler({:s}) does not exist' + raise Exception(MSG.format(sampler_name)) + return sampler.get_sample() diff --git a/src/simap_connector/service/telemetry/TelemetryPool.py b/src/simap_connector/service/telemetry/TelemetryPool.py index 808b2b468..b40bc96fe 100644 --- a/src/simap_connector/service/telemetry/TelemetryPool.py +++ b/src/simap_connector/service/telemetry/TelemetryPool.py @@ -15,6 +15,8 @@ import logging, threading from typing import Dict, Optional +from simap_connector.service.simap_updater.SimapClient import SimapClient +from .Resources import Resources from .TelemetryWorker import TelemetryWorker @@ -23,27 +25,34 @@ LOGGER = logging.getLogger(__name__) class TelemetryPool: def __init__( - self, terminate : Optional[threading.Event] = None + self, simap_client : SimapClient, terminate : Optional[threading.Event] = None ) -> None: + self._simap_client = simap_client self._workers : Dict[str, TelemetryWorker] = dict() self._lock = threading.Lock() self._terminate = threading.Event() if terminate is None else terminate - def start_worker(self, domain_name : str) -> None: + def start_worker( + self, domain_name : str, resources : Resources, sampling_interval : float + ) -> None: with self._lock: if domain_name in self._workers: MSG = '[start_worker] Worker already running for Domain({:s})' LOGGER.debug(MSG.format(str(domain_name))) return - worker = TelemetryWorker(domain_name, terminate=self._terminate) - self._workers[domain_name] = worker + worker = TelemetryWorker( + domain_name, self._simap_client, resources, sampling_interval, + terminate=self._terminate + ) worker.start() MSG = '[start_worker] Started worker for Domain({:s})' LOGGER.info(MSG.format(str(domain_name))) + self._workers[domain_name] = worker + def stop_worker(self, domain_name : str) -> None: with self._lock: diff --git a/src/simap_connector/service/telemetry/TelemetryWorker.py b/src/simap_connector/service/telemetry/TelemetryWorker.py index 9d04d7a3b..4fa88074e 100644 --- a/src/simap_connector/service/telemetry/TelemetryWorker.py +++ b/src/simap_connector/service/telemetry/TelemetryWorker.py @@ -15,6 +15,8 @@ import logging, threading, time from typing import Optional +from simap_connector.service.simap_updater.SimapClient import SimapClient +from .Resources import Resources LOGGER = logging.getLogger(__name__) @@ -22,38 +24,45 @@ LOGGER = logging.getLogger(__name__) class TelemetryWorker(threading.Thread): def __init__( - self, domain_name : str, terminate : Optional[threading.Event] = None + self, domain_name : str, simap_client : SimapClient, resources : Resources, + sampling_interval : float, terminate : Optional[threading.Event] = None ) -> None: - name = 'TelemetryWorker-{:s}'.format(str(domain_name)) + name = 'TelemetryWorker({:s})'.format(str(domain_name)) super().__init__(name=name, daemon=True) - self.domain_name = domain_name + self._domain_name = domain_name + self._simap_client = simap_client + self._resources = resources + self._sampling_interval = sampling_interval self._stop_event = threading.Event() self._terminate = threading.Event() if terminate is None else terminate def stop(self) -> None: MSG = '[stop][{:s}] Stopping...' - LOGGER.info(MSG.format(str(self.domain_name))) + LOGGER.info(MSG.format(str(self._domain_name))) self._stop_event.set() self.join() def run(self) -> None: MSG = '[run][{:s}] Starting...' - LOGGER.info(MSG.format(str(self.domain_name))) + LOGGER.info(MSG.format(str(self._domain_name))) try: while not self._stop_event.is_set() and not self._terminate.is_set(): + MSG = '[run][{:s}] Sampling...' + LOGGER.info(MSG.format(str(self._domain_name))) - MSG = '[run][{:s}] Heartbeat' - LOGGER.info(MSG.format(str(self.domain_name))) + self._resources.generate_samples(self._simap_client, self._domain_name) - for _ in range(10): + # Make wait responsible to terminations + iterations = self._sampling_interval / 0.1 + for _ in range(iterations): if self._stop_event.is_set(): break if self._terminate.is_set() : break time.sleep(0.1) except Exception: MSG = '[run][{:s}] Unhandled Exception' - LOGGER.info(MSG.format(str(self.domain_name))) + LOGGER.info(MSG.format(str(self._domain_name))) finally: MSG = '[run][{:s}] Terminated' - LOGGER.info(MSG.format(str(self.domain_name))) + LOGGER.info(MSG.format(str(self._domain_name))) -- GitLab From 48698c8df096a0270cbfbabd46822dbbb0d6b0bc Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 19 Sep 2025 12:03:49 +0000 Subject: [PATCH 259/367] SIMAP Connector: - Fixed parameters of TelemetryPool and SimapUpdater --- src/simap_connector/service/__main__.py | 18 +++++++++++-- .../service/simap_updater/SimapUpdater.py | 27 +++++++------------ 2 files changed, 26 insertions(+), 19 deletions(-) diff --git a/src/simap_connector/service/__main__.py b/src/simap_connector/service/__main__.py index 9ef0700f2..1eb5fa819 100644 --- a/src/simap_connector/service/__main__.py +++ b/src/simap_connector/service/__main__.py @@ -14,15 +14,22 @@ import logging, signal, sys, threading from prometheus_client import start_http_server +from common.tools.rest_conf.client.RestConfClient import RestConfClient from common.Constants import ServiceNameEnum from common.Settings import ( ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, get_metrics_port, wait_for_environment_variables ) +from simap_connector.Config import ( + SIMAP_SERVER_SCHEME, SIMAP_SERVER_ADDRESS, SIMAP_SERVER_PORT, + SIMAP_SERVER_USERNAME, SIMAP_SERVER_PASSWORD, +) +from .simap_updater.SimapClient import SimapClient from .simap_updater.SimapUpdater import SimapUpdater from .telemetry.TelemetryPool import TelemetryPool from .SimapConnectorService import SimapConnectorService + TERMINATE = threading.Event() LOG_LEVEL = get_log_level() @@ -56,8 +63,15 @@ def main(): grpc_service = SimapConnectorService() grpc_service.start() - telemetry_pool = TelemetryPool(terminate=TERMINATE) - simap_updater = SimapUpdater(TERMINATE, telemetry_pool) + + restconf_client = RestConfClient( + scheme=SIMAP_SERVER_SCHEME, address=SIMAP_SERVER_ADDRESS, + port=SIMAP_SERVER_PORT, username=SIMAP_SERVER_USERNAME, + password=SIMAP_SERVER_PASSWORD, + ) + simap_client = SimapClient(restconf_client) + telemetry_pool = TelemetryPool(simap_client, terminate=TERMINATE) + simap_updater = SimapUpdater(simap_client, telemetry_pool, TERMINATE) simap_updater.start() LOGGER.info('Running...') diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 24fd3fdbd..cb8e5da2a 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -23,22 +23,16 @@ from common.proto.context_pb2 import ( from common.tools.grpc.BaseEventCollector import BaseEventCollector from common.tools.grpc.BaseEventDispatcher import BaseEventDispatcher from common.tools.grpc.Tools import grpc_message_to_json_string -from common.tools.rest_conf.client.RestConfClient import RestConfClient from context.client.ContextClient import ContextClient -from simap_connector.Config import ( - SIMAP_SERVER_SCHEME, SIMAP_SERVER_ADDRESS, SIMAP_SERVER_PORT, - SIMAP_SERVER_USERNAME, SIMAP_SERVER_PASSWORD, -) from simap_connector.service.simap_updater.MockSimaps import delete_mock_simap, set_mock_simap from simap_connector.service.telemetry.Resources import Resources from simap_connector.service.telemetry.TelemetryPool import TelemetryPool -from .SimapClient import SimapClient from .ObjectCache import CachedEntities, ObjectCache +from .SimapClient import SimapClient from .Tools import get_device_endpoint, get_link_endpoint, get_service_endpoint LOGGER = logging.getLogger(__name__) -RESTCONF_LOGGER = logging.getLogger(__name__ + '.RestConfClient') SKIPPED_DEVICE_TYPES = { @@ -52,21 +46,16 @@ SKIPPED_DEVICE_TYPES = { class EventDispatcher(BaseEventDispatcher): def __init__( self, events_queue : queue.PriorityQueue, + simap_client : SimapClient, context_client : ContextClient, telemetry_pool : TelemetryPool, terminate : Optional[threading.Event] = None ) -> None: super().__init__(events_queue, terminate) + self._simap_client = simap_client self._context_client = context_client self._telemetry_pool = telemetry_pool self._object_cache = ObjectCache(self._context_client) - self._restconf_client = RestConfClient( - scheme=SIMAP_SERVER_SCHEME, address=SIMAP_SERVER_ADDRESS, - port=SIMAP_SERVER_PORT, username=SIMAP_SERVER_USERNAME, - password=SIMAP_SERVER_PASSWORD, logger=RESTCONF_LOGGER, - ) - self._simap_client = SimapClient(self._restconf_client) - self._skipped_devices : Set[str] = set() @@ -618,7 +607,11 @@ class EventDispatcher(BaseEventDispatcher): class SimapUpdater: - def __init__(self, terminate : threading.Event, telemetry_pool : TelemetryPool) -> None: + def __init__( + self, simap_client : SimapClient, telemetry_pool : TelemetryPool, + terminate : threading.Event + ) -> None: + self._simap_client = simap_client self._telemetry_pool = telemetry_pool self._context_client = ContextClient() @@ -628,8 +621,8 @@ class SimapUpdater: ) self._event_dispatcher = EventDispatcher( - self._event_collector.get_events_queue(), self._context_client, - self._telemetry_pool, terminate=terminate + self._event_collector.get_events_queue(), self._simap_client, + self._context_client, self._telemetry_pool, terminate=terminate ) def start(self) -> None: -- GitLab From bbf9e2ebccd5460ea4640a44e11a65d9f5852291 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 19 Sep 2025 14:13:41 +0000 Subject: [PATCH 260/367] SIMAP Connector: - Implemented creation/removal of workers for basic links --- .../service/simap_updater/SimapUpdater.py | 31 +++++++++++++++++-- .../service/telemetry/Resources.py | 16 +++++----- .../service/telemetry/TelemetryPool.py | 31 ++++++++++--------- .../service/telemetry/TelemetryWorker.py | 18 +++++------ 4 files changed, 64 insertions(+), 32 deletions(-) diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index cb8e5da2a..07ddd4319 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -25,7 +25,7 @@ from common.tools.grpc.BaseEventDispatcher import BaseEventDispatcher from common.tools.grpc.Tools import grpc_message_to_json_string from context.client.ContextClient import ContextClient from simap_connector.service.simap_updater.MockSimaps import delete_mock_simap, set_mock_simap -from simap_connector.service.telemetry.Resources import Resources +from simap_connector.service.telemetry.Resources import ResourceLink, Resources, SyntheticSampler from simap_connector.service.telemetry.TelemetryPool import TelemetryPool from .ObjectCache import CachedEntities, ObjectCache from .SimapClient import SimapClient @@ -335,6 +335,30 @@ class EventDispatcher(BaseEventDispatcher): te_link = te_topo.link(link_name) te_link.update(src_device.name, src_endpoint.name, dst_device.name, dst_endpoint.name) + + worker_name = '{:s}:{:s}'.format(topology_name, link_name) + resources = Resources() + resources.links.append(ResourceLink( + domain_name=topology_name, link_name=link_name, + bandwidth_utilization_sampler=SyntheticSampler.create_random( + amplitude_scale = 2.0, + phase_scale = 1e-7, + period_scale = 86_400, + offset_scale = 10_000_000, + noise_ratio = 0.05, + ), + latency_sampler=SyntheticSampler.create_random( + amplitude_scale = 0.5, + phase_scale = 1e-7, + period_scale = 60.0, + offset_scale = 10.0, + noise_ratio = 0.05, + ), + related_service_ids=[], + )) + sampling_interval = 1.0 + self._telemetry_pool.start_worker(worker_name, resources, sampling_interval) + return True @@ -400,7 +424,10 @@ class EventDispatcher(BaseEventDispatcher): self._object_cache.delete(CachedEntities.LINK, link_uuid) self._object_cache.delete(CachedEntities.LINK, link_name) - MSG = 'Link Remove: {:s}' + worker_name = '{:s}:{:s}'.format(topology_name, link_name) + self._telemetry_pool.stop_worker(worker_name) + + MSG = 'Link Removed: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(link_event))) diff --git a/src/simap_connector/service/telemetry/Resources.py b/src/simap_connector/service/telemetry/Resources.py index b41977603..4e81cc4fe 100644 --- a/src/simap_connector/service/telemetry/Resources.py +++ b/src/simap_connector/service/telemetry/Resources.py @@ -21,28 +21,30 @@ from simap_connector.service.telemetry.SyntheticSamplers import SyntheticSampler @dataclass class ResourceNode: + domain_name : str node_name : str cpu_utilization_sampler : SyntheticSampler related_service_ids : List[str] = field(default_factory=list) - def generate_samples(self, simap_client : SimapClient, domain_name : str) -> None: + def generate_samples(self, simap_client : SimapClient) -> None: cpu_utilization = self.cpu_utilization_sampler.get_sample() - simap_node = simap_client.network(domain_name).node(self.node_name) + simap_node = simap_client.network(self.domain_name).node(self.node_name) simap_node.telemetry.update( cpu_utilization.value, related_service_ids=self.related_service_ids ) @dataclass class ResourceLink: + domain_name : str link_name : str bandwidth_utilization_sampler : SyntheticSampler latency_sampler : SyntheticSampler related_service_ids : List[str] = field(default_factory=list) - def generate_samples(self, simap_client : SimapClient, domain_name : str) -> None: + def generate_samples(self, simap_client : SimapClient) -> None: bandwidth_utilization = self.bandwidth_utilization_sampler.get_sample() latency = self.latency_sampler.get_sample() - simap_link = simap_client.network(domain_name).link(self.link_name) + simap_link = simap_client.network(self.domain_name).link(self.link_name) simap_link.telemetry.update( bandwidth_utilization.value, latency.value, related_service_ids=self.related_service_ids @@ -54,9 +56,9 @@ class Resources: nodes : List[ResourceNode] = field(default_factory=list) links : List[ResourceLink] = field(default_factory=list) - def generate_samples(self, simap_client : SimapClient, domain_name : str) -> None: + def generate_samples(self, simap_client : SimapClient) -> None: for resource in self.nodes: - resource.generate_samples(simap_client, domain_name) + resource.generate_samples(simap_client) for resource in self.links: - resource.generate_samples(simap_client, domain_name) + resource.generate_samples(simap_client) diff --git a/src/simap_connector/service/telemetry/TelemetryPool.py b/src/simap_connector/service/telemetry/TelemetryPool.py index b40bc96fe..c8ee4247b 100644 --- a/src/simap_connector/service/telemetry/TelemetryPool.py +++ b/src/simap_connector/service/telemetry/TelemetryPool.py @@ -32,41 +32,44 @@ class TelemetryPool: self._lock = threading.Lock() self._terminate = threading.Event() if terminate is None else terminate + def has_worker(self, worker_name : str) -> bool: + with self._lock: + return worker_name in self._workers def start_worker( - self, domain_name : str, resources : Resources, sampling_interval : float + self, worker_name : str, resources : Resources, sampling_interval : float ) -> None: with self._lock: - if domain_name in self._workers: - MSG = '[start_worker] Worker already running for Domain({:s})' - LOGGER.debug(MSG.format(str(domain_name))) + if worker_name in self._workers: + MSG = '[start_worker] Worker({:s}) already exists' + LOGGER.debug(MSG.format(str(worker_name))) return worker = TelemetryWorker( - domain_name, self._simap_client, resources, sampling_interval, + worker_name, self._simap_client, resources, sampling_interval, terminate=self._terminate ) worker.start() - MSG = '[start_worker] Started worker for Domain({:s})' - LOGGER.info(MSG.format(str(domain_name))) + MSG = '[start_worker] Started Worker({:s})' + LOGGER.info(MSG.format(str(worker_name))) - self._workers[domain_name] = worker + self._workers[worker_name] = worker - def stop_worker(self, domain_name : str) -> None: + def stop_worker(self, worker_name : str) -> None: with self._lock: - worker = self._workers.pop(domain_name, None) + worker = self._workers.pop(worker_name, None) if worker is None: - MSG = '[stop_worker] No worker found for Domain({:s})' - LOGGER.debug(MSG.format(str(domain_name))) + MSG = '[stop_worker] Worker({:s}) not found' + LOGGER.debug(MSG.format(str(worker_name))) return worker.stop() - MSG = '[stop_worker] Stopped worker for Domain({:s})' - LOGGER.info(MSG.format(str(domain_name))) + MSG = '[stop_worker] Stopped Worker({:s})' + LOGGER.info(MSG.format(str(worker_name))) def stop_all(self) -> None: diff --git a/src/simap_connector/service/telemetry/TelemetryWorker.py b/src/simap_connector/service/telemetry/TelemetryWorker.py index 4fa88074e..31df79c7b 100644 --- a/src/simap_connector/service/telemetry/TelemetryWorker.py +++ b/src/simap_connector/service/telemetry/TelemetryWorker.py @@ -24,12 +24,12 @@ LOGGER = logging.getLogger(__name__) class TelemetryWorker(threading.Thread): def __init__( - self, domain_name : str, simap_client : SimapClient, resources : Resources, + self, worker_name : str, simap_client : SimapClient, resources : Resources, sampling_interval : float, terminate : Optional[threading.Event] = None ) -> None: - name = 'TelemetryWorker({:s})'.format(str(domain_name)) + name = 'TelemetryWorker({:s})'.format(str(worker_name)) super().__init__(name=name, daemon=True) - self._domain_name = domain_name + self._worker_name = worker_name self._simap_client = simap_client self._resources = resources self._sampling_interval = sampling_interval @@ -38,20 +38,20 @@ class TelemetryWorker(threading.Thread): def stop(self) -> None: MSG = '[stop][{:s}] Stopping...' - LOGGER.info(MSG.format(str(self._domain_name))) + LOGGER.info(MSG.format(str(self._worker_name))) self._stop_event.set() self.join() def run(self) -> None: MSG = '[run][{:s}] Starting...' - LOGGER.info(MSG.format(str(self._domain_name))) + LOGGER.info(MSG.format(str(self._worker_name))) try: while not self._stop_event.is_set() and not self._terminate.is_set(): MSG = '[run][{:s}] Sampling...' - LOGGER.info(MSG.format(str(self._domain_name))) + LOGGER.info(MSG.format(str(self._worker_name))) - self._resources.generate_samples(self._simap_client, self._domain_name) + self._resources.generate_samples(self._simap_client) # Make wait responsible to terminations iterations = self._sampling_interval / 0.1 @@ -62,7 +62,7 @@ class TelemetryWorker(threading.Thread): except Exception: MSG = '[run][{:s}] Unhandled Exception' - LOGGER.info(MSG.format(str(self._domain_name))) + LOGGER.info(MSG.format(str(self._worker_name))) finally: MSG = '[run][{:s}] Terminated' - LOGGER.info(MSG.format(str(self._domain_name))) + LOGGER.info(MSG.format(str(self._worker_name))) -- GitLab From 27c6cf4036f2cd9f1572e7f1d5c09584261a9767 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 19 Sep 2025 14:17:00 +0000 Subject: [PATCH 261/367] ECOC F5GA Telemetry Demo: - Fixed slice2 VLAN tags - Fixed VLANs and IP addresses in topology descriptors --- .../data/slices/network-slice2.json | 2 +- .../data/topology/topology-agg.json | 6 ++--- .../data/topology/topology-ip.json | 22 +++++++++---------- 3 files changed, 14 insertions(+), 16 deletions(-) diff --git a/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json b/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json index d1030e8ec..f0875e25e 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json @@ -12,7 +12,7 @@ "service-match-criteria": {"match-criterion": [{ "index": 1, "match-type": [ - {"type": "ietf-network-slice-service:vlan", "value": ["21"]}, + {"type": "ietf-network-slice-service:vlan", "value": ["31"]}, {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.16.104.221/24"]}, {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10500"]}, {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.1.201.22/24"]}, diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json index 7231696d6..c761a86dd 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-agg.json @@ -45,12 +45,11 @@ }}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[201]", "resource_value": { "uuid": "201", "name": "201", "type": "optical", - "address_ip": "0.0.0.0", "address_prefix": "24", "site_location": "transport", "mtu": "1500" }}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[500]", "resource_value": { "uuid": "500", "name": "500", "type": "optical", - "address_ip": "172.10.33.2", "address_prefix": "24", + "address_ip": "172.10.44.2", "address_prefix": "24", "vlan_tag": 101, "site_location": "transport", "mtu": "1500" }}} ]}}, @@ -72,12 +71,11 @@ }}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[201]", "resource_value": { "uuid": "201", "name": "201", "type": "optical", - "address_ip": "0.0.0.0", "address_prefix": "24", "site_location": "transport", "mtu": "1500" }}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[500]", "resource_value": { "uuid": "500", "name": "500", "type": "optical", - "address_ip": "172.10.44.2", "address_prefix": "24", + "address_ip": "172.10.44.2", "address_prefix": "24", "vlan_tag": 201, "site_location": "transport", "mtu": "1500" }}} ]}} diff --git a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-ip.json b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-ip.json index d4b72d428..cd7720160 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-ip.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/topology/topology-ip.json @@ -21,17 +21,17 @@ ]}}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[200]", "resource_value": { "uuid": "200", "name": "200", "type": "optical", - "address_ip": "128.32.44.254", "address_prefix": "24", + "address_ip": "128.32.44.254", "address_prefix": "24", "vlan_tag": 21, "site_location": "access", "mtu": "1500" }}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[500]", "resource_value": { "uuid": "500", "name": "500", "type": "optical", - "address_ip": "0.0.0.0", "address_prefix": "24", + "address_ip": "10.44.1.1", "address_prefix": "24", "site_location": "transport", "mtu": "1500" }}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[501]", "resource_value": { "uuid": "501", "name": "501", "type": "optical", - "address_ip": "0.0.0.0", "address_prefix": "24", + "address_ip": "10.44.2.1", "address_prefix": "24", "site_location": "transport", "mtu": "1500" }}} ]} @@ -49,12 +49,12 @@ ]}}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[500]", "resource_value": { "uuid": "500", "name": "500", "type": "optical", - "address_ip": "0.0.0.0", "address_prefix": "24", + "address_ip": "10.44.1.2", "address_prefix": "24", "site_location": "transport", "mtu": "1500" }}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[501]", "resource_value": { "uuid": "501", "name": "501", "type": "optical", - "address_ip": "0.0.0.0", "address_prefix": "24", + "address_ip": "10.44.3.2", "address_prefix": "24", "site_location": "transport", "mtu": "1500" }}} ]} @@ -72,12 +72,12 @@ ]}}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[500]", "resource_value": { "uuid": "500", "name": "500", "type": "optical", - "address_ip": "0.0.0.0", "address_prefix": "24", + "address_ip": "10.44.2.2", "address_prefix": "24", "site_location": "transport", "mtu": "1500" }}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[501]", "resource_value": { "uuid": "501", "name": "501", "type": "optical", - "address_ip": "0.0.0.0", "address_prefix": "24", + "address_ip": "10.44.4.2", "address_prefix": "24", "site_location": "transport", "mtu": "1500" }}} ]} @@ -96,17 +96,17 @@ ]}}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[200]", "resource_value": { "uuid": "200", "name": "200", "type": "optical", - "address_ip": "172.10.44.254", "address_prefix": "24", - "site_location": "cloud", "mtu": "1500", "ce-ip": "172.10.44.2" + "address_ip": "172.10.44.254", "address_prefix": "24", "vlan_tag": 201, + "site_location": "cloud", "mtu": "1500" }}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[500]", "resource_value": { "uuid": "500", "name": "500", "type": "optical", - "address_ip": "0.0.0.0", "address_prefix": "24", + "address_ip": "10.44.3.1", "address_prefix": "24", "site_location": "transport", "mtu": "1500" }}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[501]", "resource_value": { "uuid": "501", "name": "501", "type": "optical", - "address_ip": "0.0.0.0", "address_prefix": "24", + "address_ip": "10.44.4.1", "address_prefix": "24", "site_location": "transport", "mtu": "1500" }}} ]} -- GitLab From dae4477285972aa5f423812ce96eeff67ff32aef Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 19 Sep 2025 14:17:24 +0000 Subject: [PATCH 262/367] Test - Tools - Mock NCE-T /NCE-FAN Ctrl - Updated startup files with VLAN tags --- .../tools/mock_nce_fan_ctrl/startup.json | 24 +++++++++----- src/tests/tools/mock_nce_t_ctrl/startup.json | 31 ++++++++++++------- 2 files changed, 36 insertions(+), 19 deletions(-) diff --git a/src/tests/tools/mock_nce_fan_ctrl/startup.json b/src/tests/tools/mock_nce_fan_ctrl/startup.json index 5c70a2da7..774efb417 100644 --- a/src/tests/tools/mock_nce_fan_ctrl/startup.json +++ b/src/tests/tools/mock_nce_fan_ctrl/startup.json @@ -16,26 +16,34 @@ "node-id": "ONT1", "ietf-te-topology:te-node-id": "172.16.61.10", "ietf-te-topology:te": {"te-node-attributes": {"name": "ONT1", "admin-status": "up"}, "oper-status": "up"}, "ietf-network-topology:termination-point": [ - {"tp-id": "500", "ietf-te-topology:te": {"name": "500"}, "ietf-te-topology:te-tp-id": "0.0.0.0"}, - {"tp-id": "200", "ietf-te-topology:te": {"name": "200"}, "ietf-te-topology:te-tp-id": "0.0.0.0"} + {"tp-id": "500", "ietf-te-topology:te": {"name": "500"}, "ietf-te-topology:te-tp-id": "10.1.1.2"}, + {"tp-id": "200", "ietf-te-topology:te": {"name": "200"}, "ietf-te-topology:te-tp-id": "10.0.0.1"} ] }, { "node-id": "ONT2", "ietf-te-topology:te-node-id": "172.16.61.11", "ietf-te-topology:te": {"te-node-attributes": {"name": "ONT2", "admin-status": "up"}, "oper-status": "up"}, "ietf-network-topology:termination-point": [ - {"tp-id": "500", "ietf-te-topology:te": {"name": "500"}, "ietf-te-topology:te-tp-id": "0.0.0.0"}, - {"tp-id": "200", "ietf-te-topology:te": {"name": "200"}, "ietf-te-topology:te-tp-id": "0.0.0.0"} + {"tp-id": "500", "ietf-te-topology:te": {"name": "500"}, "ietf-te-topology:te-tp-id": "10.1.2.2"}, + {"tp-id": "200", "ietf-te-topology:te": {"name": "200"}, "ietf-te-topology:te-tp-id": "10.0.0.1"} ] }, { "node-id": "OLT", "ietf-te-topology:te-node-id": "172.16.58.10", "ietf-te-topology:te": {"te-node-attributes": {"name": "OLT", "admin-status": "up"}, "oper-status": "up"}, "ietf-network-topology:termination-point": [ - {"tp-id": "500", "ietf-te-topology:te": {"name": "500"}, "ietf-te-topology:te-tp-id": "0.0.0.0"}, - {"tp-id": "501", "ietf-te-topology:te": {"name": "501"}, "ietf-te-topology:te-tp-id": "0.0.0.0"}, - {"tp-id": "200", "ietf-te-topology:te": {"name": "200"}, "ietf-te-topology:te-tp-id": "0.0.0.0"}, - {"tp-id": "201", "ietf-te-topology:te": {"name": "201"}, "ietf-te-topology:te-tp-id": "0.0.0.0"} + {"tp-id": "500", "ietf-te-topology:te": {"name": "500"}, "ietf-te-topology:te-tp-id": "128.32.33.2", + "ietf-eth-te-topology:eth-svc": {"supported-classification": {"port-classification": false, "vlan-classification": {"vlan-tag-classification": true, "outer-tag": { + "supported-tag-types": ["ietf-eth-tran-types:classify-c-vlan"], "vlan-bundling": false, "vlan-range": "31" + }}}} + }, + {"tp-id": "501", "ietf-te-topology:te": {"name": "501"}, "ietf-te-topology:te-tp-id": "128.32.44.2", + "ietf-eth-te-topology:eth-svc": {"supported-classification": {"port-classification": false, "vlan-classification": {"vlan-tag-classification": true, "outer-tag": { + "supported-tag-types": ["ietf-eth-tran-types:classify-c-vlan"], "vlan-bundling": false, "vlan-range": "21" + }}}} + }, + {"tp-id": "200", "ietf-te-topology:te": {"name": "200"}, "ietf-te-topology:te-tp-id": "10.1.1.1"}, + {"tp-id": "201", "ietf-te-topology:te": {"name": "201"}, "ietf-te-topology:te-tp-id": "10.1.2.1"} ] } ], diff --git a/src/tests/tools/mock_nce_t_ctrl/startup.json b/src/tests/tools/mock_nce_t_ctrl/startup.json index 4ae24e51b..8b8b3cbc5 100644 --- a/src/tests/tools/mock_nce_t_ctrl/startup.json +++ b/src/tests/tools/mock_nce_t_ctrl/startup.json @@ -8,7 +8,8 @@ }, "network-types": { "ietf-te-topology:te-topology": { - "ietf-otn-topology:otn-topology": {} + "ietf-otn-topology:otn-topology": {}, + "ietf-eth-te-topology:eth-tran-topology": {} } }, "node": [ @@ -16,34 +17,42 @@ "node-id": "O-PE1", "ietf-te-topology:te-node-id": "172.16.182.25", "ietf-te-topology:te": {"te-node-attributes": {"otn-node": {}, "name": "O-PE1", "admin-status": "up"}, "oper-status": "up"}, "ietf-network-topology:termination-point": [ - {"tp-id": "500", "ietf-te-topology:te": {"name": "500"}, "ietf-te-topology:te-tp-id": "0.0.0.0"}, - {"tp-id": "501", "ietf-te-topology:te": {"name": "501"}, "ietf-te-topology:te-tp-id": "0.0.0.0"}, - {"tp-id": "200", "ietf-te-topology:te": {"name": "200"}, "ietf-te-topology:te-tp-id": "128.32.33.254"} + {"tp-id": "500", "ietf-te-topology:te": {"name": "500"}, "ietf-te-topology:te-tp-id": "10.33.1.1"}, + {"tp-id": "501", "ietf-te-topology:te": {"name": "501"}, "ietf-te-topology:te-tp-id": "10.33.2.1"}, + {"tp-id": "200", "ietf-te-topology:te": {"name": "200"}, "ietf-te-topology:te-tp-id": "128.32.33.254", + "ietf-eth-te-topology:eth-svc": {"supported-classification": {"port-classification": false, "vlan-classification": {"vlan-tag-classification": true, "outer-tag": { + "supported-tag-types": ["ietf-eth-tran-types:classify-c-vlan"], "vlan-bundling": false, "vlan-range": "31" + }}}} + } ] }, { "node-id": "O-P1", "ietf-te-topology:te-node-id": "172.16.185.31", "ietf-te-topology:te": {"te-node-attributes": {"otn-node": {}, "name": "O-P1", "admin-status": "up"}, "oper-status": "up"}, "ietf-network-topology:termination-point": [ - {"tp-id": "500", "ietf-te-topology:te": {"name": "500"}, "ietf-te-topology:te-tp-id": "0.0.0.0"}, - {"tp-id": "501", "ietf-te-topology:te": {"name": "501"}, "ietf-te-topology:te-tp-id": "0.0.0.0"} + {"tp-id": "500", "ietf-te-topology:te": {"name": "500"}, "ietf-te-topology:te-tp-id": "10.33.4.2"}, + {"tp-id": "501", "ietf-te-topology:te": {"name": "501"}, "ietf-te-topology:te-tp-id": "10.33.2.2"} ] }, { "node-id": "O-P2", "ietf-te-topology:te-node-id": "172.16.185.33", "ietf-te-topology:te": {"te-node-attributes": {"otn-node": {}, "name": "O-P2", "admin-status": "up"}, "oper-status": "up"}, "ietf-network-topology:termination-point": [ - {"tp-id": "500", "ietf-te-topology:te": {"name": "500"}, "ietf-te-topology:te-tp-id": "0.0.0.0"}, - {"tp-id": "501", "ietf-te-topology:te": {"name": "501"}, "ietf-te-topology:te-tp-id": "0.0.0.0"} + {"tp-id": "500", "ietf-te-topology:te": {"name": "500"}, "ietf-te-topology:te-tp-id": "10.33.1.2"}, + {"tp-id": "501", "ietf-te-topology:te": {"name": "501"}, "ietf-te-topology:te-tp-id": "10.33.3.2"} ] }, { "node-id": "O-PE2", "ietf-te-topology:te-node-id": "172.16.185.32", "ietf-te-topology:te": {"te-node-attributes": {"otn-node": {}, "name": "O-PE2", "admin-status": "up"}, "oper-status": "up"}, "ietf-network-topology:termination-point": [ - {"tp-id": "500", "ietf-te-topology:te": {"name": "500"}, "ietf-te-topology:te-tp-id": "0.0.0.0"}, - {"tp-id": "501", "ietf-te-topology:te": {"name": "501"}, "ietf-te-topology:te-tp-id": "0.0.0.0"}, - {"tp-id": "200", "ietf-te-topology:te": {"name": "200"}, "ietf-te-topology:te-tp-id": "128.32.33.254"} + {"tp-id": "500", "ietf-te-topology:te": {"name": "500"}, "ietf-te-topology:te-tp-id": "10.33.4.1"}, + {"tp-id": "501", "ietf-te-topology:te": {"name": "501"}, "ietf-te-topology:te-tp-id": "10.33.3.1"}, + {"tp-id": "200", "ietf-te-topology:te": {"name": "200"}, "ietf-te-topology:te-tp-id": "128.32.33.254", + "ietf-eth-te-topology:eth-svc": {"supported-classification": {"port-classification": false, "vlan-classification": {"vlan-tag-classification": true, "outer-tag": { + "supported-tag-types": ["ietf-eth-tran-types:classify-c-vlan"], "vlan-bundling": false, "vlan-range": "101" + }}}} + } ] } ], -- GitLab From f99720fe3aa62da3a4404126b228e053ff3239d0 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 19 Sep 2025 14:34:20 +0000 Subject: [PATCH 263/367] SIMAP Connector: - Fixed exception logging in TelemetryWorker --- src/simap_connector/service/telemetry/TelemetryWorker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/simap_connector/service/telemetry/TelemetryWorker.py b/src/simap_connector/service/telemetry/TelemetryWorker.py index 31df79c7b..530fb3039 100644 --- a/src/simap_connector/service/telemetry/TelemetryWorker.py +++ b/src/simap_connector/service/telemetry/TelemetryWorker.py @@ -62,7 +62,7 @@ class TelemetryWorker(threading.Thread): except Exception: MSG = '[run][{:s}] Unhandled Exception' - LOGGER.info(MSG.format(str(self._worker_name))) + LOGGER.exception(MSG.format(str(self._worker_name))) finally: MSG = '[run][{:s}] Terminated' LOGGER.info(MSG.format(str(self._worker_name))) -- GitLab From e3e23f093f00363c974adc658589249a9f8791d6 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 19 Sep 2025 14:43:00 +0000 Subject: [PATCH 264/367] Device component - IETF ACTN / NCE Drivers: - Added support to discover VLAN tags from IETF Network Topology --- .../ietf_actn/handlers/NetworkTopologyHandler.py | 16 +++++++++++++++- .../nce/handlers/NetworkTopologyHandler.py | 16 +++++++++++++++- 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/src/device/service/drivers/ietf_actn/handlers/NetworkTopologyHandler.py b/src/device/service/drivers/ietf_actn/handlers/NetworkTopologyHandler.py index 691d4b01e..ef0246c06 100644 --- a/src/device/service/drivers/ietf_actn/handlers/NetworkTopologyHandler.py +++ b/src/device/service/drivers/ietf_actn/handlers/NetworkTopologyHandler.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging +import logging, re from typing import Dict, List, Optional from common.Constants import DEFAULT_TOPOLOGY_NAME from common.DeviceTypes import DeviceTypeEnum @@ -145,6 +145,20 @@ class NetworkTopologyHandler: 'mtu' : '1500', 'site_location' : site_location, } + + outer_tag_vlan_range : Optional[str] = ( + tp + .get('ietf-eth-te-topology:eth-svc', dict()) + .get('supported-classification', dict()) + .get('vlan-classification', dict()) + .get('outer-tag', dict()) + .get('vlan-range') + ) + if outer_tag_vlan_range is not None: + RE_NUMBER = re.compile(r'[0-9]+') + if RE_NUMBER.match(outer_tag_vlan_range) is not None: + endpoint_settings['vlan_tag'] = int(outer_tag_vlan_range) + endpoint_data = { 'device_uuid': node_id, 'uuid': tp_id, diff --git a/src/device/service/drivers/nce/handlers/NetworkTopologyHandler.py b/src/device/service/drivers/nce/handlers/NetworkTopologyHandler.py index 6e8d2e555..7ca1b73f6 100644 --- a/src/device/service/drivers/nce/handlers/NetworkTopologyHandler.py +++ b/src/device/service/drivers/nce/handlers/NetworkTopologyHandler.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging +import logging, re from typing import Dict, List, Optional from common.Constants import DEFAULT_TOPOLOGY_NAME from common.DeviceTypes import DeviceTypeEnum @@ -154,6 +154,20 @@ class NetworkTopologyHandler: 'mtu' : '1500', 'site_location' : site_location, } + + outer_tag_vlan_range : Optional[str] = ( + tp + .get('ietf-eth-te-topology:eth-svc', dict()) + .get('supported-classification', dict()) + .get('vlan-classification', dict()) + .get('outer-tag', dict()) + .get('vlan-range') + ) + if outer_tag_vlan_range is not None: + RE_NUMBER = re.compile(r'[0-9]+') + if RE_NUMBER.match(outer_tag_vlan_range) is not None: + endpoint_settings['vlan_tag'] = int(outer_tag_vlan_range) + endpoint_data = { 'device_uuid': node_id, 'uuid': tp_id, -- GitLab From 20281a38e4ab313abefbe210d0e3000a5fb03da1 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 19 Sep 2025 14:55:39 +0000 Subject: [PATCH 265/367] SIMAP Connector: - Fixed bandwidth utilization range for Link telemetry --- src/simap_connector/service/simap_updater/SimapUpdater.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 07ddd4319..6c2c9ea8d 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -341,10 +341,10 @@ class EventDispatcher(BaseEventDispatcher): resources.links.append(ResourceLink( domain_name=topology_name, link_name=link_name, bandwidth_utilization_sampler=SyntheticSampler.create_random( - amplitude_scale = 2.0, + amplitude_scale = 45.0, phase_scale = 1e-7, period_scale = 86_400, - offset_scale = 10_000_000, + offset_scale = 50, noise_ratio = 0.05, ), latency_sampler=SyntheticSampler.create_random( -- GitLab From ac87d8badfee6c31f8497fabad655ceff4ecaae1 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 19 Sep 2025 14:59:03 +0000 Subject: [PATCH 266/367] SIMAP Connector / NCE-T / NCE-FAN / SIMAP: - Fixed formatting of bandwidth utilization and latency --- src/simap_connector/service/simap_updater/SimapClient.py | 4 ++-- src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/SimapClient.py | 4 ++-- src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/SimapClient.py | 4 ++-- src/tests/tools/simap_server/simap_client/SimapClient.py | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/simap_connector/service/simap_updater/SimapClient.py b/src/simap_connector/service/simap_updater/SimapClient.py index a300aca74..725b08bd4 100644 --- a/src/simap_connector/service/simap_updater/SimapClient.py +++ b/src/simap_connector/service/simap_updater/SimapClient.py @@ -186,8 +186,8 @@ class LinkTelemetry: ) -> None: endpoint = LinkTelemetry.ENDPOINT.format(self._network_id, self._link_id) telemetry = { - 'bandwidth-utilization': bandwidth_utilization, - 'latency' : latency, + 'bandwidth-utilization': '{:.2f}'.format(bandwidth_utilization), + 'latency' : '{:.3f}'.format(latency), } if len(related_service_ids) > 0: telemetry['related-service-ids'] = related_service_ids link = {'link-id': self._link_id, 'simap-telemetry:simap-telemetry': telemetry} diff --git a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/SimapClient.py b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/SimapClient.py index a300aca74..725b08bd4 100644 --- a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/SimapClient.py +++ b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/SimapClient.py @@ -186,8 +186,8 @@ class LinkTelemetry: ) -> None: endpoint = LinkTelemetry.ENDPOINT.format(self._network_id, self._link_id) telemetry = { - 'bandwidth-utilization': bandwidth_utilization, - 'latency' : latency, + 'bandwidth-utilization': '{:.2f}'.format(bandwidth_utilization), + 'latency' : '{:.3f}'.format(latency), } if len(related_service_ids) > 0: telemetry['related-service-ids'] = related_service_ids link = {'link-id': self._link_id, 'simap-telemetry:simap-telemetry': telemetry} diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/SimapClient.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/SimapClient.py index a300aca74..725b08bd4 100644 --- a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/SimapClient.py +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/SimapClient.py @@ -186,8 +186,8 @@ class LinkTelemetry: ) -> None: endpoint = LinkTelemetry.ENDPOINT.format(self._network_id, self._link_id) telemetry = { - 'bandwidth-utilization': bandwidth_utilization, - 'latency' : latency, + 'bandwidth-utilization': '{:.2f}'.format(bandwidth_utilization), + 'latency' : '{:.3f}'.format(latency), } if len(related_service_ids) > 0: telemetry['related-service-ids'] = related_service_ids link = {'link-id': self._link_id, 'simap-telemetry:simap-telemetry': telemetry} diff --git a/src/tests/tools/simap_server/simap_client/SimapClient.py b/src/tests/tools/simap_server/simap_client/SimapClient.py index a300aca74..725b08bd4 100644 --- a/src/tests/tools/simap_server/simap_client/SimapClient.py +++ b/src/tests/tools/simap_server/simap_client/SimapClient.py @@ -186,8 +186,8 @@ class LinkTelemetry: ) -> None: endpoint = LinkTelemetry.ENDPOINT.format(self._network_id, self._link_id) telemetry = { - 'bandwidth-utilization': bandwidth_utilization, - 'latency' : latency, + 'bandwidth-utilization': '{:.2f}'.format(bandwidth_utilization), + 'latency' : '{:.3f}'.format(latency), } if len(related_service_ids) > 0: telemetry['related-service-ids'] = related_service_ids link = {'link-id': self._link_id, 'simap-telemetry:simap-telemetry': telemetry} -- GitLab From 55053adf274e9392fd3dd4e13ec2c81f0c6baa8e Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 19 Sep 2025 15:34:26 +0000 Subject: [PATCH 267/367] Service component - IETF ACTN Service Handler: - Fixed OSU tunnel / Etht Service settings --- .../service_handlers/l3nm_ietfactn/Constants.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/service/service/service_handlers/l3nm_ietfactn/Constants.py b/src/service/service/service_handlers/l3nm_ietfactn/Constants.py index d09790bdf..5f72bfbbc 100644 --- a/src/service/service/service_handlers/l3nm_ietfactn/Constants.py +++ b/src/service/service/service_handlers/l3nm_ietfactn/Constants.py @@ -15,8 +15,8 @@ # These hardcoded values will be updated with proper logic in second phase of the PoC VPN_VLAN_TAGS_TO_SERVICE_NAME = { - (21, 101): ('osu_tunnel_1', 'etht_service_1'), - (31, 201): ('osu_tunnel_2', 'etht_service_2'), + (21, 201): ('osu_tunnel_1', 'etht_service_1'), + (31, 101): ('osu_tunnel_2', 'etht_service_2'), } OSU_TUNNEL_SETTINGS = { @@ -26,8 +26,8 @@ OSU_TUNNEL_SETTINGS = { 'bidirectional': True, 'delay': 20, 'ttp_channel_names': { - ('10.0.10.1', '200'): 'och:1-odu2:1-oduflex:1-osuflex:2', - ('10.0.30.1', '200'): 'och:1-odu2:1-oduflex:3-osuflex:1', + ('O-PE1', '200'): 'och:1-odu2:1-oduflex:1-osuflex:2', + ('O-PE2', '200'): 'och:1-odu2:1-oduflex:3-osuflex:1', } }, 'osu_tunnel_2': { @@ -36,8 +36,8 @@ OSU_TUNNEL_SETTINGS = { 'bidirectional': True, 'delay': 20, 'ttp_channel_names': { - ('10.0.10.1', '200'): 'och:1-odu2:1-oduflex:1-osuflex:2', - ('10.0.30.1', '200'): 'och:1-odu2:1-oduflex:3-osuflex:1', + ('O-PE1', '200'): 'och:1-odu2:1-oduflex:1-osuflex:2', + ('O-PE2', '200'): 'och:1-odu2:1-oduflex:3-osuflex:1', } }, } -- GitLab From d5dab8a03eea32e0d5226ff70ded663ad8c901c4 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 19 Sep 2025 15:45:05 +0000 Subject: [PATCH 268/367] SIMAP Connector: - Fixed TelemetryWorker wait loop granularity --- src/simap_connector/service/telemetry/TelemetryWorker.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/simap_connector/service/telemetry/TelemetryWorker.py b/src/simap_connector/service/telemetry/TelemetryWorker.py index 530fb3039..cd590ad19 100644 --- a/src/simap_connector/service/telemetry/TelemetryWorker.py +++ b/src/simap_connector/service/telemetry/TelemetryWorker.py @@ -13,7 +13,7 @@ # limitations under the License. -import logging, threading, time +import logging, math, threading, time from typing import Optional from simap_connector.service.simap_updater.SimapClient import SimapClient from .Resources import Resources @@ -22,6 +22,9 @@ from .Resources import Resources LOGGER = logging.getLogger(__name__) +WAIT_LOOP_GRANULARITY = 0.5 + + class TelemetryWorker(threading.Thread): def __init__( self, worker_name : str, simap_client : SimapClient, resources : Resources, @@ -54,11 +57,11 @@ class TelemetryWorker(threading.Thread): self._resources.generate_samples(self._simap_client) # Make wait responsible to terminations - iterations = self._sampling_interval / 0.1 + iterations = int(math.ceil(self._sampling_interval / WAIT_LOOP_GRANULARITY)) for _ in range(iterations): if self._stop_event.is_set(): break if self._terminate.is_set() : break - time.sleep(0.1) + time.sleep(WAIT_LOOP_GRANULARITY) except Exception: MSG = '[run][{:s}] Unhandled Exception' -- GitLab From c0a496d3014b67ec5a48773bf0ebea3ff76dfa10 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 19 Sep 2025 16:10:16 +0000 Subject: [PATCH 269/367] NBI component - IETF Slice: - Added static routing table placeholder --- .../ietf_network_slice/ietf_slice_handler.py | 25 +++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/src/nbi/service/ietf_network_slice/ietf_slice_handler.py b/src/nbi/service/ietf_network_slice/ietf_slice_handler.py index 56061615b..e3e8ea6f8 100644 --- a/src/nbi/service/ietf_network_slice/ietf_slice_handler.py +++ b/src/nbi/service/ietf_network_slice/ietf_slice_handler.py @@ -370,6 +370,31 @@ class IETFSliceHandler: vlan_tag = int(match_type_item['value'][0]) break + update_config_rule_custom( + slice_request.slice_config.config_rules, + '/settings', + { + 'address_families': (['IPV4'], True), + 'mtu' : (1500, True), + } + ) + + static_routing_table = { + #'{:d}-{:s}/{:d}'.format(lan_tag, ip_range, ip_prefix_len): ( + # { + # 'vlan-id': lan_tag, + # 'ip-network': '{:s}/{:d}'.format(ip_range, ip_prefix_len), + # 'next-hop': next_hop + # }, + # True + #) + #for (ip_range, ip_prefix_len, lan_tag), next_hop in static_routing.items() + } + update_config_rule_custom( + slice_request.slice_config.config_rules, + '/static_routing', static_routing_table + ) + # Endpoint-specific config rule fields endpoint_config_rule_fields = { 'address_ip': (sdp_ip_address, RAISE_IF_DIFFERS), -- GitLab From 47d85d5fd983da528c0ee93f6cb9cafa144a954d Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 19 Sep 2025 16:12:07 +0000 Subject: [PATCH 270/367] Service component - IETF ACTN Service Handler: - Fixed Routing table discovery --- .../l3nm_ietfactn/L3NM_IETFACTN_ServiceHandler.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/service/service/service_handlers/l3nm_ietfactn/L3NM_IETFACTN_ServiceHandler.py b/src/service/service/service_handlers/l3nm_ietfactn/L3NM_IETFACTN_ServiceHandler.py index acaf31b87..4b75e7ec2 100644 --- a/src/service/service/service_handlers/l3nm_ietfactn/L3NM_IETFACTN_ServiceHandler.py +++ b/src/service/service/service_handlers/l3nm_ietfactn/L3NM_IETFACTN_ServiceHandler.py @@ -119,8 +119,7 @@ class L3NM_IETFACTN_ServiceHandler(_ServiceHandler): self, src_vlan_tag : int, dst_vlan_tag : int ) -> Tuple[List[Dict], List[Dict]]: static_routing = self.__settings_handler.get('/static_routing') - if static_routing is None: raise Exception('static_routing not found') - static_routing_dict : Dict = static_routing.value + static_routing_dict : Dict = dict() if static_routing is None else static_routing.value src_static_routes = list() dst_static_routes = list() for _, static_route in static_routing_dict.items(): -- GitLab From be8e7bb66d78eee3250537004bcefd7f060f7077 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 19 Sep 2025 16:46:30 +0000 Subject: [PATCH 271/367] Device component - IETF ACTN Drivers: - Fixed handlers for OSU Tunnel and EthT Services to use alternative (v2) RestConf endpoints as they are not standardized --- .../drivers/ietf_actn/IetfActnDriver.py | 28 +++++++++++++------ 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/src/device/service/drivers/ietf_actn/IetfActnDriver.py b/src/device/service/drivers/ietf_actn/IetfActnDriver.py index 11f2a69bf..12387e101 100644 --- a/src/device/service/drivers/ietf_actn/IetfActnDriver.py +++ b/src/device/service/drivers/ietf_actn/IetfActnDriver.py @@ -44,20 +44,30 @@ class IetfActnDriver(_Driver): self.__started = threading.Event() self.__terminate = threading.Event() - restconf_settings = copy.deepcopy(settings) - restconf_settings.pop('base_url', None) - restconf_settings.pop('import_topology', None) - restconf_settings['logger'] = logging.getLogger(__name__ + '.RestConfClient') - self._rest_conf_client = RestConfClient(address, port=port, **restconf_settings) - self._handler_etht_service = EthtServiceHandler(self._rest_conf_client) - self._handler_net_topology = NetworkTopologyHandler(self._rest_conf_client, **settings) - self._handler_osu_tunnel = OsuTunnelHandler(self._rest_conf_client) + restconf_v1_settings = copy.deepcopy(settings) + restconf_v1_settings.pop('base_url', None) + restconf_v1_settings.pop('import_topology', None) + restconf_v1_settings['logger'] = logging.getLogger(__name__ + '.RestConfClient_v1') + + self._rest_conf_v1_client = RestConfClient(address, port=port, **restconf_v1_settings) + self._handler_net_topology = NetworkTopologyHandler(self._rest_conf_v1_client, **settings) + + restconf_v2_settings = copy.deepcopy(settings) + restconf_v2_settings.pop('base_url', None) + restconf_v2_settings.pop('import_topology', None) + restconf_v2_settings['restconf_version'] = 'v2' + restconf_v2_settings['logger'] = logging.getLogger(__name__ + '.RestConfClient_v2') + + self._rest_conf_v2_client = RestConfClient(address, port=port, **restconf_v2_settings) + self._handler_etht_service = EthtServiceHandler(self._rest_conf_v2_client) + self._handler_osu_tunnel = OsuTunnelHandler(self._rest_conf_v2_client) def Connect(self) -> bool: with self.__lock: if self.__started.is_set(): return True try: - self._rest_conf_client._discover_base_url() + self._rest_conf_v1_client._discover_base_url() + self._rest_conf_v2_client._discover_base_url() except requests.exceptions.Timeout: LOGGER.exception('Timeout exception checking connectivity') return False -- GitLab From 7f77c32087b5509f0f69d8d21142da2d1da94298 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 19 Sep 2025 16:58:18 +0000 Subject: [PATCH 272/367] ECOC F5GA Telemetry Demo: - Updated subscribe telemetry script to stream telemetry data --- .../subscribe-telemetry-slice1.sh | 51 +++++++++++++++++-- 1 file changed, 48 insertions(+), 3 deletions(-) mode change 100755 => 100644 src/tests/ecoc25-f5ga-telemetry/subscribe-telemetry-slice1.sh diff --git a/src/tests/ecoc25-f5ga-telemetry/subscribe-telemetry-slice1.sh b/src/tests/ecoc25-f5ga-telemetry/subscribe-telemetry-slice1.sh old mode 100755 new mode 100644 index 34de3b3bf..3fa27ef28 --- a/src/tests/ecoc25-f5ga-telemetry/subscribe-telemetry-slice1.sh +++ b/src/tests/ecoc25-f5ga-telemetry/subscribe-telemetry-slice1.sh @@ -19,10 +19,55 @@ cd $(dirname $0) echo "[E2E] Subscribe Telemetry slice1..." -curl --request POST --location --header 'Content-Type: application/json' \ +# POST to create subscription and capture response +resp=$(curl -sS --request POST --location --header 'Content-Type: application/json' \ --data @data/telemetry/subscription-slice1.json \ - http://0.0.0.0:80/restconf/operations/subscriptions:establish-subscription -echo + http://0.0.0.0:80/restconf/operations/subscriptions:establish-subscription) +echo "$resp" + +# Ensure `jq` is available for JSON parsing +if ! command -v jq >/dev/null 2>&1; then + echo "Error: jq is required but not installed. Install jq and retry." >&2 + exit 1 +fi + +# Extract the subscription URI from the JSON response +# Example response: {"identifier":"4086","uri":"/restconf/data/subscriptions/4086"} +uri=$(echo "$resp" | jq -r '.uri // empty') + +if [ -z "$uri" ]; then + echo "Failed to extract subscription URI from response" >&2 + exit 1 +fi + +# Build full URL (use http for RESTCONF chunked/SSE-style streaming) +full_url="http://0.0.0.0:80${uri}" +echo "Streaming telemetry from '$full_url' (press Ctrl+C to stop)..." + +# Attempt a long-lived HTTP GET that will dump data as it arrives. +# Many RESTCONF subscription implementations use chunked responses / SSE +# and this curl invocation will keep printing incoming data. +curl -N -sS -H 'Accept: application/yang-data+json' "$full_url" + +# If your server exposes a WebSocket endpoint instead, use a websocket client +# such as `websocat` or `wscat`. Example (requires websocat): +# websocat "ws://0.0.0.0:80${uri}" +# Or using node's wscat: +# npx wscat -c "ws://0.0.0.0:80${uri}" + +# If you need a Python websocket client (requires `websocket-client`): +# python3 - <<'PY' +#from websocket import create_connection +#ws = create_connection('ws://0.0.0.0:80' + '${uri}') +#try: +# while True: +# msg = ws.recv() +# print(msg) +#finally: +# ws.close() +#PY + +echo echo "Done!" -- GitLab From 847e003f35fb9eae74c3af87379eccbd5d7cb3be Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 19 Sep 2025 18:08:48 +0000 Subject: [PATCH 273/367] NBI component: - Added Redis to Manifest --- manifests/nbiservice.yaml | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/manifests/nbiservice.yaml b/manifests/nbiservice.yaml index ec6db58b7..0cf45401f 100644 --- a/manifests/nbiservice.yaml +++ b/manifests/nbiservice.yaml @@ -56,6 +56,11 @@ spec: value: "tfs123" - name: CRDB_SSLMODE value: "require" + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: redis-secrets + key: REDIS_PASSWORD envFrom: - secretRef: name: kfk-kpi-data @@ -80,6 +85,28 @@ spec: limits: cpu: 1000m memory: 2048Mi + - name: redis + image: redis:7.0-alpine + env: + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: redis-secrets + key: REDIS_PASSWORD + ports: + - containerPort: 6379 + name: client + command: ["redis-server"] + args: + - --requirepass + - $(REDIS_PASSWORD) + resources: + requests: + cpu: 50m + memory: 64Mi + limits: + cpu: 500m + memory: 512Mi --- apiVersion: v1 kind: Service @@ -103,3 +130,7 @@ spec: # protocol: TCP # port: 9192 # targetPort: 9192 + - name: redis + protocol: TCP + port: 6379 + targetPort: 6379 -- GitLab From 9406c370d83aaf7487d22d402e155d41004c2904 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 19 Sep 2025 18:11:04 +0000 Subject: [PATCH 274/367] NBI component: - Added dependencies to requirements - Added Redis config settings - Adde configuration of flask-sse --- src/nbi/Config.py | 7 +++++++ src/nbi/requirements.in | 6 ++++-- src/nbi/service/NbiApplication.py | 5 ++++- 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/src/nbi/Config.py b/src/nbi/Config.py index 14f12af68..8df9a7a3d 100644 --- a/src/nbi/Config.py +++ b/src/nbi/Config.py @@ -13,6 +13,8 @@ # limitations under the License. from werkzeug.security import generate_password_hash +from common.Constants import ServiceNameEnum +from common.Settings import get_service_host, get_setting # REST-API users RESTAPI_USERS = { # TODO: implement a database of credentials and permissions @@ -21,3 +23,8 @@ RESTAPI_USERS = { # TODO: implement a database of credentials and permissions # Rebuild using: "python -c 'import secrets; print(secrets.token_hex())'" SECRET_KEY = '2b8ab76763d81f7bced786de8ba40bd67eea6ff79217a711eb5f8d1f19c145c1' + +redis_host = get_service_host(ServiceNameEnum.NBI) +redis_port = 6379 +redis_pass = get_setting('REDIS_PASSWORD') +REDIS_URL = 'redis://:{:s}@{:s}:{:d}'.format(redis_pass, redis_host, redis_port) diff --git a/src/nbi/requirements.in b/src/nbi/requirements.in index 72ca62b1e..73c9df815 100644 --- a/src/nbi/requirements.in +++ b/src/nbi/requirements.in @@ -12,16 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. +asgiref==3.9.* deepdiff==6.7.* deepmerge==1.1.* eventlet==0.39.0 -Flask==2.1.3 +Flask[async]==2.1.3 Flask-HTTPAuth==4.5.0 Flask-RESTful==0.3.9 flask-socketio==5.5.1 +flask-sse==1.0.0 #gevent==24.11.1 #gevent-websocket==0.10.1 -#greenlet==3.1.1 +greenlet==3.1.* gunicorn==23.0.0 jsonschema==4.4.0 kafka-python==2.0.6 diff --git a/src/nbi/service/NbiApplication.py b/src/nbi/service/NbiApplication.py index 8d9e7a879..caed3ce1a 100644 --- a/src/nbi/service/NbiApplication.py +++ b/src/nbi/service/NbiApplication.py @@ -18,8 +18,9 @@ from typing import Any, List, Optional, Tuple from flask import Flask, request from flask_restful import Api, Resource from flask_socketio import Namespace, SocketIO +from flask_sse import sse from common.tools.kafka.Variables import KafkaConfig, KafkaTopic -from nbi.Config import SECRET_KEY +from nbi.Config import REDIS_URL, SECRET_KEY from nbi.service.database.base import rebuild_database from .database.Engine import Engine @@ -41,6 +42,8 @@ class NbiApplication: self._app = Flask(__name__) self._app.config['SECRET_KEY'] = SECRET_KEY + self._app.config['REDIS_URL'] = REDIS_URL + self._app.register_blueprint(sse, url_prefix='/restconf/stream') self._app.after_request(log_request) self._api = Api(self._app, prefix=base_url) -- GitLab From ae880199d52cbc9535e152e485f9bb4e13b54ddd Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 19 Sep 2025 18:12:26 +0000 Subject: [PATCH 275/367] NBI component - SSE Telemetry: - Renamed old classes - Added sampling interval to parameters of subscriptions --- ...create_subscription.py => CreateSubscription.py} | 2 ++ ...delete_subscription.py => DeleteSubscription.py} | 0 src/nbi/service/sse_telemetry/__init__.py | 4 ++-- .../service/sse_telemetry/database/Subscription.py | 13 +++++++++---- .../sse_telemetry/database/models/Subscription.py | 3 ++- 5 files changed, 15 insertions(+), 7 deletions(-) rename src/nbi/service/sse_telemetry/{create_subscription.py => CreateSubscription.py} (98%) rename src/nbi/service/sse_telemetry/{delete_subscription.py => DeleteSubscription.py} (100%) diff --git a/src/nbi/service/sse_telemetry/create_subscription.py b/src/nbi/service/sse_telemetry/CreateSubscription.py similarity index 98% rename from src/nbi/service/sse_telemetry/create_subscription.py rename to src/nbi/service/sse_telemetry/CreateSubscription.py index 997e7b455..dcabe93e0 100644 --- a/src/nbi/service/sse_telemetry/create_subscription.py +++ b/src/nbi/service/sse_telemetry/CreateSubscription.py @@ -143,6 +143,7 @@ class CreateSubscription(Resource): identifier=r.identifier, uri=r.uri, xpath=xpath_filter, + sampling_interval=sampling_interval, main_subscription=False, main_subscription_id=request_identifier, ) @@ -157,6 +158,7 @@ class CreateSubscription(Resource): xpath=request_data['ietf-subscribed-notifications:input'][ 'ietf-yang-push:datastore-xpath-filter' ], + sampling_interval=sampling_interval, main_subscription=True, main_subscription_id=None, ) diff --git a/src/nbi/service/sse_telemetry/delete_subscription.py b/src/nbi/service/sse_telemetry/DeleteSubscription.py similarity index 100% rename from src/nbi/service/sse_telemetry/delete_subscription.py rename to src/nbi/service/sse_telemetry/DeleteSubscription.py diff --git a/src/nbi/service/sse_telemetry/__init__.py b/src/nbi/service/sse_telemetry/__init__.py index a27686b95..72b6a4fb5 100644 --- a/src/nbi/service/sse_telemetry/__init__.py +++ b/src/nbi/service/sse_telemetry/__init__.py @@ -17,8 +17,8 @@ from nbi.service.NbiApplication import NbiApplication -from .create_subscription import CreateSubscription -from .delete_subscription import DeleteSubscription +from .CreateSubscription import CreateSubscription +from .DeleteSubscription import DeleteSubscription def register_telemetry_subscription(nbi_app: NbiApplication): diff --git a/src/nbi/service/sse_telemetry/database/Subscription.py b/src/nbi/service/sse_telemetry/database/Subscription.py index c42c0aa78..f417f8598 100644 --- a/src/nbi/service/sse_telemetry/database/Subscription.py +++ b/src/nbi/service/sse_telemetry/database/Subscription.py @@ -30,6 +30,7 @@ class SSESubsciprionDict(TypedDict): identifier: str uri: str xpath: str + sampling_interval : float main_subscription: bool main_subscription_id: Optional[str] @@ -44,6 +45,7 @@ def set_subscription(db_engine: Engine, request: SSESubsciprionDict) -> None: identifier=stmt.excluded.identifier, uri=stmt.excluded.uri, xpath=stmt.excluded.xpath, + sampling_interval=stmt.excluded.sampling_interval, main_subscription=stmt.excluded.main_subscription, main_subscription_id=stmt.excluded.main_subscription_id, ), @@ -67,11 +69,11 @@ def delete_subscription(db_engine: Engine, request: str, main_subscription: bool _ = run_transaction(sessionmaker(bind=db_engine), callback) -def get_main_subscription(db_engine: Engine, request: str) -> Optional[SSESubsciprionDict]: +def get_main_subscription(db_engine: Engine, subscription_id: str) -> Optional[SSESubsciprionDict]: def callback(session: Session) -> Optional[SSESubsciprionDict]: obj: Optional[SSESubscriptionModel] = ( session.query(SSESubscriptionModel) - .filter_by(identifier=request, main_subscription=True) + .filter_by(identifier=subscription_id, main_subscription=True) .one_or_none() ) return ( @@ -82,6 +84,7 @@ def get_main_subscription(db_engine: Engine, request: str) -> Optional[SSESubsci identifier=obj.identifier, uri=obj.uri, xpath=obj.xpath, + sampling_interval=obj.sampling_interval, main_subscription=obj.main_subscription, main_subscription_id=obj.main_subscription_id, ) @@ -90,11 +93,11 @@ def get_main_subscription(db_engine: Engine, request: str) -> Optional[SSESubsci return run_transaction(sessionmaker(bind=db_engine), callback) -def get_sub_subscription(db_engine: Engine, request: str) -> List[SSESubsciprionDict]: +def get_sub_subscription(db_engine: Engine, subscription_id: str) -> List[SSESubsciprionDict]: def callback(session: Session) -> List[SSESubsciprionDict]: obj: List[SSESubscriptionModel] = ( session.query(SSESubscriptionModel) - .filter_by(main_subscription_id=request, main_subscription=False) + .filter_by(main_subscription_id=subscription_id, main_subscription=False) .all() ) return [ @@ -103,6 +106,7 @@ def get_sub_subscription(db_engine: Engine, request: str) -> List[SSESubsciprion identifier=o.identifier, uri=o.uri, xpath=o.xpath, + sampling_interval=obj.sampling_interval, main_subscription=o.main_subscription, main_subscription_id=o.main_subscription_id, ) @@ -121,6 +125,7 @@ def get_subscriptions(db_engine: Engine) -> List[SSESubsciprionDict]: identifier=obj.identifier, uri=obj.uri, xpath=obj.xpath, + sampling_interval=obj.sampling_interval, main_subscription=obj.main_subscription, main_subscription_id=obj.main_subscription_id, ) diff --git a/src/nbi/service/sse_telemetry/database/models/Subscription.py b/src/nbi/service/sse_telemetry/database/models/Subscription.py index 8a246e7a2..f8f6dcd17 100644 --- a/src/nbi/service/sse_telemetry/database/models/Subscription.py +++ b/src/nbi/service/sse_telemetry/database/models/Subscription.py @@ -14,7 +14,7 @@ import sqlalchemy -from sqlalchemy import Column, Integer, String, JSON, Boolean +from sqlalchemy import Column, Float, Integer, String, JSON, Boolean from sqlalchemy.dialects.postgresql import UUID @@ -32,6 +32,7 @@ class SSESubscriptionModel(_Base): identifier = Column(String, nullable=False, unique=False) uri = Column(String, nullable=False, unique=False) xpath = Column(String, nullable=False, unique=False) + sampling_interval = Column(Float, nullable=True) main_subscription = Column(Boolean, default=False) main_subscription_id = Column(String, nullable=True) -- GitLab From 4490a45b2c022d8bcac59a8cddd2d353d2c8149a Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 21 Sep 2025 15:56:13 +0000 Subject: [PATCH 276/367] Proto: - Added SIMAP Connector proto file --- proto/simap_connector.proto | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 proto/simap_connector.proto diff --git a/proto/simap_connector.proto b/proto/simap_connector.proto new file mode 100644 index 000000000..19ed89751 --- /dev/null +++ b/proto/simap_connector.proto @@ -0,0 +1,35 @@ +// Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; +package simap_connector; + +import "context.proto"; + +// Subscription handling according to https://datatracker.ietf.org/doc/html/rfc8641 + +service SimapConnectorService { + rpc EstablishSubscription(Subscription ) returns (SubscriptionId) {} + rpc DeleteSubscription (SubscriptionId) returns (context.Empty ) {} +} + +message SubscriptionId { + uint32 subscription_id = 1; +} + +message Subscription { + string datastore = 1; + string xpath_filter = 2; + float period = 3; +} -- GitLab From 7757182ecda67811eed1f7e34b73821014b10178 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 21 Sep 2025 15:58:44 +0000 Subject: [PATCH 277/367] Manifests: - Updated SIMAP Connectr manifest with Kafka/CockroachDB data --- manifests/simap_connectorservice.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/manifests/simap_connectorservice.yaml b/manifests/simap_connectorservice.yaml index a1cfe5324..a061e1f7f 100644 --- a/manifests/simap_connectorservice.yaml +++ b/manifests/simap_connectorservice.yaml @@ -54,6 +54,13 @@ spec: value: "admin" - name: SIMAP_SERVER_PASSWORD value: "admin" + - name: CRDB_DATABASE + value: "tfs_simap_connector" + envFrom: + - secretRef: + name: crdb-data + - secretRef: + name: kfk-kpi-data startupProbe: grpc: port: 9090 -- GitLab From 1fab7278cc54df2a0de20f70adbdf41b481d23f7 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 21 Sep 2025 16:14:23 +0000 Subject: [PATCH 278/367] Simap Connector: - Store subscriptions in persistent CRDB database - gRPC Server methods to establish/delete subscriptions --- .../client/SimapConnectorClient.py | 24 +++-- .../service/SimapConnectorService.py | 17 +++- .../SimapConnectorServiceServicerImpl.py | 43 +++++++++ src/simap_connector/service/__main__.py | 18 +++- .../service/database/Engine.py | 55 ++++++++++++ .../service/database/Subscription.py | 90 +++++++++++++++++++ .../service/database/__init__.py | 13 +++ .../database/models/SubscriptionModel.py | 41 +++++++++ .../service/database/models/_Base.py | 42 +++++++++ .../service/database/models/__init__.py | 13 +++ 10 files changed, 348 insertions(+), 8 deletions(-) create mode 100644 src/simap_connector/service/SimapConnectorServiceServicerImpl.py create mode 100644 src/simap_connector/service/database/Engine.py create mode 100644 src/simap_connector/service/database/Subscription.py create mode 100644 src/simap_connector/service/database/__init__.py create mode 100644 src/simap_connector/service/database/models/SubscriptionModel.py create mode 100644 src/simap_connector/service/database/models/_Base.py create mode 100644 src/simap_connector/service/database/models/__init__.py diff --git a/src/simap_connector/client/SimapConnectorClient.py b/src/simap_connector/client/SimapConnectorClient.py index 299b3bada..3b9f941cc 100644 --- a/src/simap_connector/client/SimapConnectorClient.py +++ b/src/simap_connector/client/SimapConnectorClient.py @@ -14,12 +14,12 @@ import grpc, logging from common.Constants import ServiceNameEnum -from common.proto.context_pb2 import Empty -#from common.proto.e2eorchestrator_pb2_grpc import E2EOrchestratorServiceStub from common.Settings import get_service_host, get_service_port_grpc +from common.proto.context_pb2 import Empty +from common.proto.simap_connector_pb2 import Subscription, SubscriptionId +from common.proto.simap_connector_pb2_grpc import SimapConnectorServiceStub from common.tools.client.RetryDecorator import delay_exponential, retry -#from common.tools.grpc.Tools import grpc_message_to_json -#from common.proto.e2eorchestrator_pb2 import E2EOrchestratorRequest, E2EOrchestratorReply +from common.tools.grpc.Tools import grpc_message_to_json_string LOGGER = logging.getLogger(__name__) MAX_RETRIES = 15 @@ -42,9 +42,23 @@ class SimapConnectorClient: def connect(self): self.channel = grpc.insecure_channel(self.endpoint) - #self.stub = E2EOrchestratorServiceStub(self.channel) + self.stub = SimapConnectorServiceStub(self.channel) def close(self): if self.channel is not None: self.channel.close() self.channel = None self.stub = None + + @RETRY_DECORATOR + def EstablishSubscription(self, request : Subscription) -> SubscriptionId: + LOGGER.debug('EstablishSubscription request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.EstablishSubscription(request) + LOGGER.debug('EstablishSubscription result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def DeleteSubscription(self, request : SubscriptionId) -> Empty: + LOGGER.debug('DeleteSubscription request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.DeleteSubscription(request) + LOGGER.debug('DeleteSubscription result: {:s}'.format(grpc_message_to_json_string(response))) + return response diff --git a/src/simap_connector/service/SimapConnectorService.py b/src/simap_connector/service/SimapConnectorService.py index 8555f3f9f..94457397e 100644 --- a/src/simap_connector/service/SimapConnectorService.py +++ b/src/simap_connector/service/SimapConnectorService.py @@ -12,14 +12,27 @@ # See the License for the specific language governing permissions and # limitations under the License. + +import logging, sqlalchemy from common.Constants import ServiceNameEnum from common.Settings import get_service_port_grpc +from common.proto.simap_connector_pb2 import DESCRIPTOR as SIMAP_CONNECTOR_DESCRIPTOR +from common.proto.simap_connector_pb2_grpc import add_SimapConnectorServiceServicer_to_server from common.tools.service.GenericGrpcService import GenericGrpcService +from .SimapConnectorServiceServicerImpl import SimapConnectorServiceServicerImpl + + +LOGGER = logging.getLogger(__name__) + class SimapConnectorService(GenericGrpcService): - def __init__(self, cls_name: str = __name__) -> None: + def __init__( + self, db_engine : sqlalchemy.engine.Engine, cls_name : str = __name__ + ) -> None: port = get_service_port_grpc(ServiceNameEnum.SIMAP_CONNECTOR) super().__init__(port, cls_name=cls_name) + self.simap_connector_servicer = SimapConnectorServiceServicerImpl(db_engine) def install_servicers(self): - pass + add_SimapConnectorServiceServicer_to_server(self.simap_connector_servicer, self.server) + self.add_reflection_service_name(SIMAP_CONNECTOR_DESCRIPTOR, 'SimapConnectorService') diff --git a/src/simap_connector/service/SimapConnectorServiceServicerImpl.py b/src/simap_connector/service/SimapConnectorServiceServicerImpl.py new file mode 100644 index 000000000..49dcaf704 --- /dev/null +++ b/src/simap_connector/service/SimapConnectorServiceServicerImpl.py @@ -0,0 +1,43 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import grpc, logging, sqlalchemy +from common.proto.context_pb2 import Empty +from common.proto.simap_connector_pb2 import Subscription, SubscriptionId +from common.proto.simap_connector_pb2_grpc import SimapConnectorServiceServicer +from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method +from .database.Subscription import subscription_set, subscription_delete + + +LOGGER = logging.getLogger(__name__) + +METRICS_POOL = MetricsPool('SimapConnector', 'RPC') + + +class SimapConnectorServiceServicerImpl(SimapConnectorServiceServicer): + def __init__(self, db_engine : sqlalchemy.engine.Engine) -> None: + LOGGER.debug('Creating Servicer...') + self.db_engine = db_engine + LOGGER.debug('Servicer Created') + + def _get_metrics(self) -> MetricsPool: return METRICS_POOL + + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) + def EstablishSubscription(self, request : Subscription, context : grpc.ServicerContext) -> SubscriptionId: + return subscription_set(self.db_engine, request) + + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) + def DeleteSubscription(self, request : SubscriptionId, context : grpc.ServicerContext) -> Empty: + return subscription_delete(self.db_engine, request) diff --git a/src/simap_connector/service/__main__.py b/src/simap_connector/service/__main__.py index 1eb5fa819..6e1741255 100644 --- a/src/simap_connector/service/__main__.py +++ b/src/simap_connector/service/__main__.py @@ -24,6 +24,8 @@ from simap_connector.Config import ( SIMAP_SERVER_SCHEME, SIMAP_SERVER_ADDRESS, SIMAP_SERVER_PORT, SIMAP_SERVER_USERNAME, SIMAP_SERVER_PASSWORD, ) +from .database.Engine import Engine +from .database.models._Base import rebuild_database from .simap_updater.SimapClient import SimapClient from .simap_updater.SimapUpdater import SimapUpdater from .telemetry.TelemetryPool import TelemetryPool @@ -59,8 +61,22 @@ def main(): metrics_port = get_metrics_port() start_http_server(metrics_port) + # Get Database Engine instance and initialize database, if needed + LOGGER.info('Getting SQLAlchemy DB Engine...') + db_engine = Engine.get_engine() + if db_engine is None: + LOGGER.error('Unable to get SQLAlchemy DB Engine...') + return -1 + + try: + Engine.create_database(db_engine) + except: # pylint: disable=bare-except # pragma: no cover + LOGGER.exception('Failed to check/create the database: {:s}'.format(str(db_engine.url))) + + rebuild_database(db_engine) + # Starting service - grpc_service = SimapConnectorService() + grpc_service = SimapConnectorService(db_engine) grpc_service.start() diff --git a/src/simap_connector/service/database/Engine.py b/src/simap_connector/service/database/Engine.py new file mode 100644 index 000000000..43690382e --- /dev/null +++ b/src/simap_connector/service/database/Engine.py @@ -0,0 +1,55 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, sqlalchemy, sqlalchemy_utils +from common.Settings import get_setting + +LOGGER = logging.getLogger(__name__) + +APP_NAME = 'tfs' +ECHO = False # true: dump SQL commands and transactions executed +CRDB_URI_TEMPLATE = 'cockroachdb://{:s}:{:s}@cockroachdb-public.{:s}.svc.cluster.local:{:s}/{:s}?sslmode={:s}' + +class Engine: + @staticmethod + def get_engine() -> sqlalchemy.engine.Engine: + crdb_uri = get_setting('CRDB_URI', default=None) + if crdb_uri is None: + CRDB_NAMESPACE = get_setting('CRDB_NAMESPACE') + CRDB_SQL_PORT = get_setting('CRDB_SQL_PORT') + CRDB_DATABASE = get_setting('CRDB_DATABASE') + CRDB_USERNAME = get_setting('CRDB_USERNAME') + CRDB_PASSWORD = get_setting('CRDB_PASSWORD') + CRDB_SSLMODE = get_setting('CRDB_SSLMODE') + crdb_uri = CRDB_URI_TEMPLATE.format( + CRDB_USERNAME, CRDB_PASSWORD, CRDB_NAMESPACE, CRDB_SQL_PORT, CRDB_DATABASE, CRDB_SSLMODE) + + try: + engine = sqlalchemy.create_engine( + crdb_uri, connect_args={'application_name': APP_NAME}, echo=ECHO, future=True) + except: # pylint: disable=bare-except # pragma: no cover + LOGGER.exception('Failed to connect to database: {:s}'.format(str(crdb_uri))) + return None + + return engine + + @staticmethod + def create_database(engine : sqlalchemy.engine.Engine) -> None: + if not sqlalchemy_utils.database_exists(engine.url): + sqlalchemy_utils.create_database(engine.url) + + @staticmethod + def drop_database(engine : sqlalchemy.engine.Engine) -> None: + if sqlalchemy_utils.database_exists(engine.url): + sqlalchemy_utils.drop_database(engine.url) diff --git a/src/simap_connector/service/database/Subscription.py b/src/simap_connector/service/database/Subscription.py new file mode 100644 index 000000000..2e6e83eb6 --- /dev/null +++ b/src/simap_connector/service/database/Subscription.py @@ -0,0 +1,90 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime, logging, uuid +from sqlalchemy.dialects.postgresql import insert +from sqlalchemy.engine import Engine +from sqlalchemy.orm import Session, sessionmaker +from sqlalchemy_cockroachdb import run_transaction +from typing import Dict, Optional, Tuple +from common.proto.context_pb2 import Empty +from common.proto.simap_connector_pb2 import Subscription, SubscriptionId +from common.method_wrappers.ServiceExceptions import NotFoundException +from .models.SubscriptionModel import SubscriptionModel + + +LOGGER = logging.getLogger(__name__) + + +def get_random_uuid() -> str: + return str(uuid.uuid4()) + +def subscription_get(db_engine : Engine, request : SubscriptionId) -> Subscription: + subscription_id = request.subscription_id + def callback(session : Session) -> Optional[Dict]: + obj : Optional[SubscriptionModel] = ( + session + .query(SubscriptionModel) + .filter_by(subscription_id=subscription_id) + .one_or_none() + ) + return None if obj is None else obj.dump() + obj = run_transaction(sessionmaker(bind=db_engine), callback) + if obj is None: + raise NotFoundException('Subscription', str(subscription_id)) + return Subscription(**obj) + +def subscription_set(db_engine : Engine, request : Subscription) -> SubscriptionId: + subscription_uuid = get_random_uuid() + + now = datetime.datetime.now(datetime.timezone.utc) + + subscription_data = [{ + 'subscription_uuid': subscription_uuid, + #'subscription_id' : subscription_id, + 'datastore' : request.datastore, + 'filter_xpath' : request.filter_xpath, + 'sampling_interval': request.sampling_interval, + 'created_at' : now, + 'updated_at' : now, + }] + + def callback(session : Session) -> Tuple[bool, int]: + stmt = insert(SubscriptionModel).values(subscription_data) + stmt = stmt.on_conflict_do_update( + index_elements=[SubscriptionModel.subscription_uuid], + set_=dict( + datastore = stmt.excluded.datastore, + filter_xpath = stmt.excluded.filter_xpath, + sampling_interval = stmt.excluded.sampling_interval, + updated_at = stmt.excluded.updated_at, + ) + ) + stmt = stmt.returning( + SubscriptionModel.created_at, SubscriptionModel.updated_at, + SubscriptionModel.subscription_id + ) + created_at,updated_at,subscription_id = session.execute(stmt).fetchone() + return updated_at > created_at, subscription_id + + _,subscription_id = run_transaction(sessionmaker(bind=db_engine), callback) + return SubscriptionId(subscription_id=subscription_id) + +def subscription_delete(db_engine : Engine, request : SubscriptionId) -> Empty: + subscription_id = request.subscription_id + def callback(session : Session) -> bool: + num_deleted = session.query(SubscriptionModel).filter_by(subscription_id=subscription_id).delete() + return num_deleted > 0 + run_transaction(sessionmaker(bind=db_engine), callback) + return Empty() diff --git a/src/simap_connector/service/database/__init__.py b/src/simap_connector/service/database/__init__.py new file mode 100644 index 000000000..7363515f0 --- /dev/null +++ b/src/simap_connector/service/database/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/simap_connector/service/database/models/SubscriptionModel.py b/src/simap_connector/service/database/models/SubscriptionModel.py new file mode 100644 index 000000000..6a506149f --- /dev/null +++ b/src/simap_connector/service/database/models/SubscriptionModel.py @@ -0,0 +1,41 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy import Column, DateTime, String +from sqlalchemy import Column, Float, Integer, String +from sqlalchemy.dialects.postgresql import UUID +from typing import Dict +from ._Base import _Base + + +class SubscriptionModel(_Base): + __tablename__ = 'subscription' + + subscription_uuid = Column(UUID(as_uuid=False), primary_key=True) + subscription_id = Column(Integer, nullable=False, unique=True, autoincrement=True) + datastore = Column(String, nullable=False, unique=False) + filter_xpath = Column(String, nullable=False, unique=False) + sampling_interval = Column(Float, nullable=False, unique=False) + created_at = Column(DateTime, nullable=False) + updated_at = Column(DateTime, nullable=False) + + def dump_id(self) -> Dict: + return {'subscription_uuid': {'uuid': self.subscription_uuid}} + + def dump(self) -> Dict: + return { + 'datastore' : self.datastore, + 'filter_xpath' : self.filter_xpath, + 'sampling_interval': self.sampling_interval, + } diff --git a/src/simap_connector/service/database/models/_Base.py b/src/simap_connector/service/database/models/_Base.py new file mode 100644 index 000000000..a67909275 --- /dev/null +++ b/src/simap_connector/service/database/models/_Base.py @@ -0,0 +1,42 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sqlalchemy +#from typing import Any, List +#from sqlalchemy.orm import Session, sessionmaker +from sqlalchemy.orm import declarative_base +#from sqlalchemy.sql import text +#from sqlalchemy_cockroachdb import run_transaction + +_Base = declarative_base() + +#def create_performance_enhancers(db_engine : sqlalchemy.engine.Engine) -> None: +# def index_storing( +# index_name : str, table_name : str, index_fields : List[str], storing_fields : List[str] +# ) -> Any: +# str_index_fields = ','.join(['"{:s}"'.format(index_field) for index_field in index_fields]) +# str_storing_fields = ','.join(['"{:s}"'.format(storing_field) for storing_field in storing_fields]) +# INDEX_STORING = 'CREATE INDEX IF NOT EXISTS {:s} ON "{:s}" ({:s}) STORING ({:s});' +# return text(INDEX_STORING.format(index_name, table_name, str_index_fields, str_storing_fields)) +# +# statements = [ +# ] +# def callback(session : Session) -> bool: +# for stmt in statements: session.execute(stmt) +# run_transaction(sessionmaker(bind=db_engine), callback) + +def rebuild_database(db_engine : sqlalchemy.engine.Engine, drop_if_exists : bool = False): + if drop_if_exists: _Base.metadata.drop_all(db_engine) + _Base.metadata.create_all(db_engine) + #create_performance_enhancers(db_engine) diff --git a/src/simap_connector/service/database/models/__init__.py b/src/simap_connector/service/database/models/__init__.py new file mode 100644 index 000000000..7363515f0 --- /dev/null +++ b/src/simap_connector/service/database/models/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. -- GitLab From 8621a7b3f7b91869365196b1cd7174b87249468a Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 21 Sep 2025 16:15:46 +0000 Subject: [PATCH 279/367] NBI component - SSE Telemetry: - Updated to delegate establish/delete subscriptions to SIMAP Connector - Implemented SSE-based resource --- .../sse_telemetry/CreateSubscription.py | 169 --------------- .../sse_telemetry/DeleteSubscription.py | 173 ++++++++------- .../sse_telemetry/EstablishSubscription.py | 198 ++++++++++++++++++ .../sse_telemetry/StreamSubscription.py | 154 ++++++++++++++ src/nbi/service/sse_telemetry/__init__.py | 18 +- 5 files changed, 462 insertions(+), 250 deletions(-) delete mode 100644 src/nbi/service/sse_telemetry/CreateSubscription.py create mode 100644 src/nbi/service/sse_telemetry/EstablishSubscription.py create mode 100644 src/nbi/service/sse_telemetry/StreamSubscription.py diff --git a/src/nbi/service/sse_telemetry/CreateSubscription.py b/src/nbi/service/sse_telemetry/CreateSubscription.py deleted file mode 100644 index dcabe93e0..000000000 --- a/src/nbi/service/sse_telemetry/CreateSubscription.py +++ /dev/null @@ -1,169 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import json, logging -from random import choice -from sys import warnoptions -from typing import Dict, List, Optional, Set -from uuid import uuid4 -from typing_extensions import TypedDict -from flask import jsonify, request -from flask_restful import Resource -from werkzeug.exceptions import BadRequest, NotFound, UnsupportedMediaType, InternalServerError -from common.proto.monitoring_pb2 import SSEMonitoringSubscriptionConfig -from common.tools.context_queries.Device import get_device -from common.tools.grpc.Tools import grpc_message_to_json_string -from common.proto.monitoring_pb2 import ( - SSEMonitoringSubscriptionConfig, - SSEMonitoringSubscriptionResponse, -) -from common.tools.rest_conf.client.RestConfClient import RestConfClient -from context.client.ContextClient import ContextClient -from device.client.DeviceClient import DeviceClient -from nbi.service._tools.Authentication import HTTP_AUTH -from nbi.service.database.Engine import Engine -from nbi.service.sse_telemetry.database.Subscription import ( - SSESubsciprionDict, - list_identifiers, - set_subscription, -) -from .topology import ( - Controllers, - SubscribedNotificationsSchema, - decompose_subscription, - get_controller_name, -) - - - -class SubscriptionId(TypedDict): - identifier: str - uri: str - - -LOGGER = logging.getLogger(__name__) - - -class CreateSubscription(Resource): - # @HTTP_AUTH.login_required - def post(self): - db = Engine.get_engine() - if db is None: - LOGGER.error('Database engine is not initialized') - raise InternalServerError('Database engine is not initialized') - if not request.is_json: - LOGGER.error('JSON payload is required') - raise UnsupportedMediaType('JSON payload is required') - request_data: Optional[SubscribedNotificationsSchema] = request.json - if request_data is None: - LOGGER.error('JSON payload is required') - raise UnsupportedMediaType('JSON payload is required') - LOGGER.debug('Received subscription request data: {:s}'.format(str(request_data))) - - rest_conf_client = RestConfClient( - '10.254.0.9', port=8080, scheme='http', username='admin', password='admin', - logger=logging.getLogger('RestConfClient') - ) - - # break the request into its abstract components for telemetry subscription - list_db_ids = list_identifiers(db) - request_identifier = str( - choice([x for x in range(1000, 10000) if x not in list_db_ids]) - ) - sub_subs = decompose_subscription(rest_conf_client, request_data) - - # subscribe to each component - device_client = DeviceClient() - context_client = ContextClient() - for s in sub_subs: - xpath_filter = s['ietf-subscribed-notifications:input'][ - 'ietf-yang-push:datastore-xpath-filter' - ] - xpath_filter_prefix = xpath_filter.split('/ietf-network-topology:link')[0] - xpath_network = rest_conf_client.get(xpath_filter_prefix) - if not xpath_network: - MSG = 'Resource({:s} => {:s}) not found in SIMAP Server' - raise Exception(MSG.format(str(xpath_filter), str(xpath_filter_prefix))) - networks = xpath_network.get('ietf-network:network', list()) - if len(networks) != 1: - MSG = 'Resource({:s} => {:s}) wrong number of entries: {:s}' - raise Exception(MSG.format( - str(xpath_filter), str(xpath_filter_prefix), str(xpath_network) - )) - network = networks[0] - network_id = network['network-id'] - - controller_name_map = { - 'e2e' : 'TFS-E2E', - 'agg' : 'TFS-AGG', - 'trans-pkt': 'TFS-IP', - 'trans-opt': 'NCE-T', - 'access' : 'NCE-FAN', - } - controller_name = controller_name_map.get(network_id) - if controller_name is None: - LOGGER.warning( - 'Controllerless device detected, skipping subscription for: {:s}'.format(xpath_filter) - ) - continue - - #SERVICE_ID = '' - #device_controller = get_controller_name(xpath, SERVICE_ID, context_client) - #if device_controller == Controllers.CONTROLLERLESS: - # LOGGER.warning( - # 'Controllerless device detected, skipping subscription for: {:s}'.format(xpath) - # ) - # continue - - sampling_interval = s['ietf-subscribed-notifications:input'][ - 'ietf-yang-push:periodic' - ]['ietf-yang-push:period'] - - s_req = SSEMonitoringSubscriptionConfig() - #s_req.device_id.device_uuid.uuid = device_controller.value - s_req.device_id.device_uuid.uuid = controller_name - s_req.config_type = SSEMonitoringSubscriptionConfig.Subscribe - s_req.uri = xpath_filter - s_req.sampling_interval = str(sampling_interval) - r: SSEMonitoringSubscriptionResponse = device_client.SSETelemetrySubscribe(s_req) - s = SSESubsciprionDict( - uuid=str(uuid4()), - identifier=r.identifier, - uri=r.uri, - xpath=xpath_filter, - sampling_interval=sampling_interval, - main_subscription=False, - main_subscription_id=request_identifier, - ) - _ = set_subscription(db, s) - - # save the main subscription to the database - r_uri = f'/restconf/data/subscriptions/{request_identifier}' - s = SSESubsciprionDict( - uuid=str(uuid4()), - identifier=request_identifier, - uri=r_uri, - xpath=request_data['ietf-subscribed-notifications:input'][ - 'ietf-yang-push:datastore-xpath-filter' - ], - sampling_interval=sampling_interval, - main_subscription=True, - main_subscription_id=None, - ) - _ = set_subscription(db, s) - - # Return the subscription ID - sub_id = SubscriptionId(identifier=request_identifier, uri=r_uri) - return jsonify(sub_id) diff --git a/src/nbi/service/sse_telemetry/DeleteSubscription.py b/src/nbi/service/sse_telemetry/DeleteSubscription.py index d68fef4bc..2c1f7be9c 100644 --- a/src/nbi/service/sse_telemetry/DeleteSubscription.py +++ b/src/nbi/service/sse_telemetry/DeleteSubscription.py @@ -14,28 +14,30 @@ import logging -from typing import Optional +#from typing import Optional from flask import jsonify, request from flask_restful import Resource -from werkzeug.exceptions import NotFound, InternalServerError, UnsupportedMediaType -from common.proto.monitoring_pb2 import ( - SSEMonitoringSubscriptionConfig, - SSEMonitoringSubscriptionResponse, -) -from device.client.DeviceClient import DeviceClient -from context.client.ContextClient import ContextClient -from nbi.service._tools.Authentication import HTTP_AUTH -from nbi.service.database.Engine import Engine -from nbi.service.sse_telemetry.database.Subscription import ( - get_main_subscription, - get_sub_subscription, - delete_subscription, -) -from nbi.service.sse_telemetry.topology import ( - Controllers, - UnsubscribedNotificationsSchema, - get_controller_name, -) +from werkzeug.exceptions import BadRequest, UnsupportedMediaType #, NotFound, InternalServerError +from common.proto.simap_connector_pb2 import SubscriptionId +from simap_connector.client.SimapConnectorClient import SimapConnectorClient +#from common.proto.monitoring_pb2 import ( +# SSEMonitoringSubscriptionConfig, +# SSEMonitoringSubscriptionResponse, +#) +#from device.client.DeviceClient import DeviceClient +#from context.client.ContextClient import ContextClient +#from nbi.service._tools.Authentication import HTTP_AUTH +#from nbi.service.database.Engine import Engine +#from nbi.service.sse_telemetry.database.Subscription import ( +# get_main_subscription, +# get_sub_subscription, +# delete_subscription, +#) +#from nbi.service.sse_telemetry.topology import ( +# Controllers, +# UnsubscribedNotificationsSchema, +# get_controller_name, +#) LOGGER = logging.getLogger(__name__) @@ -44,69 +46,86 @@ LOGGER = logging.getLogger(__name__) class DeleteSubscription(Resource): # @HTTP_AUTH.login_required def post(self): - db = Engine.get_engine() - if db is None: - LOGGER.error('Database engine is not initialized') - raise InternalServerError('Database engine is not initialized') +# db = Engine.get_engine() +# if db is None: +# LOGGER.error('Database engine is not initialized') +# raise InternalServerError('Database engine is not initialized') + if not request.is_json: - LOGGER.error('JSON payload is required') - raise UnsupportedMediaType('JSON payload is required') - request_data: Optional[UnsubscribedNotificationsSchema] = request.json - if request_data is None: - LOGGER.error('JSON payload is required') +# LOGGER.error('JSON payload is required') raise UnsupportedMediaType('JSON payload is required') - main_subscription_id = request_data['delete-subscription']['identifier'] - LOGGER.debug( - 'Received delete subscription request for ID: {:s}'.format(main_subscription_id) - ) - - # Get the main subscription - main_subscription = get_main_subscription(db, main_subscription_id) - if main_subscription is None: - LOGGER.error('Subscription not found: {:s}'.format(main_subscription_id)) - raise NotFound('Subscription not found') - - # Get all sub-subscriptions associated with this main subscription - sub_subscriptions = get_sub_subscription(db, main_subscription_id) - - device_client = DeviceClient() - context_client = ContextClient() - # Unsubscribe from each sub-subscription - for sub_sub in sub_subscriptions: - # Create unsubscribe request - SERVICE_ID = '' - device_controller = get_controller_name(sub_sub['xpath'], SERVICE_ID, context_client) - if device_controller == Controllers.CONTROLLERLESS: - LOGGER.warning( - 'Controllerless device detected, skipping subscription for: {:s}'.format( - sub_sub['xpath'] - ) - ) - continue - unsub_req = SSEMonitoringSubscriptionConfig() - unsub_req.device_id.device_uuid.uuid = device_controller.value - unsub_req.config_type = SSEMonitoringSubscriptionConfig.Unsubscribe - unsub_req.uri = sub_sub['xpath'] - unsub_req.identifier = sub_sub['identifier'] + request_data = request.json + LOGGER.debug('[post] Unsubscription request: {:s}'.format(str(request_data))) +# if request_data is None: +# LOGGER.error('JSON payload is required') +# raise UnsupportedMediaType('JSON payload is required') - # Send unsubscribe request to device - device_client.SSETelemetrySubscribe(unsub_req) + if 'ietf-subscribed-notifications:input' not in request_data: + raise BadRequest('Missing field(ietf-subscribed-notifications:input)') + input_data = request_data['ietf-subscribed-notifications:input'] - delete_subscription(db, sub_sub['identifier'], False) + subscription_id = SubscriptionId() - LOGGER.info('Unsubscribed from {:s} successfully'.format(sub_sub.get('uri', ''))) + if 'id' not in input_data: + raise BadRequest('Missing field(ietf-subscribed-notifications:input/id)') + subscription_id.subscription_id = input_data['id'] - # Delete the main subscription from database - delete_subscription(db, main_subscription_id, True) - - LOGGER.info('Successfully deleted main subscription: {:s}'.format(main_subscription_id)) - - #if SERVICE_ID == 'simap1': - # SERVICE_ID = 'simap2' - #elif SERVICE_ID == 'simap2': - # SERVICE_ID = 'simap1' - #else: - # LOGGER.warning('Unknown service ID, not switching: {:s}'.format(SERVICE_ID)) + simap_connector_client = SimapConnectorClient() + simap_connector_client.DeleteSubscription(subscription_id) +# main_subscription_id = request_data['delete-subscription']['identifier'] +# LOGGER.debug( +# 'Received delete subscription request for ID: {:s}'.format(main_subscription_id) +# ) +# +# # Get the main subscription +# main_subscription = get_main_subscription(db, main_subscription_id) +# if main_subscription is None: +# LOGGER.error('Subscription not found: {:s}'.format(main_subscription_id)) +# raise NotFound('Subscription not found') +# +# # Get all sub-subscriptions associated with this main subscription +# sub_subscriptions = get_sub_subscription(db, main_subscription_id) +# +# device_client = DeviceClient() +# context_client = ContextClient() +# +# # Unsubscribe from each sub-subscription +# for sub_sub in sub_subscriptions: +# # Create unsubscribe request +# SERVICE_ID = '' +# device_controller = get_controller_name(sub_sub['xpath'], SERVICE_ID, context_client) +# if device_controller == Controllers.CONTROLLERLESS: +# LOGGER.warning( +# 'Controllerless device detected, skipping subscription for: {:s}'.format( +# sub_sub['xpath'] +# ) +# ) +# continue +# unsub_req = SSEMonitoringSubscriptionConfig() +# unsub_req.device_id.device_uuid.uuid = device_controller.value +# unsub_req.config_type = SSEMonitoringSubscriptionConfig.Unsubscribe +# unsub_req.uri = sub_sub['xpath'] +# unsub_req.identifier = sub_sub['identifier'] +# +# # Send unsubscribe request to device +# device_client.SSETelemetrySubscribe(unsub_req) +# +# delete_subscription(db, sub_sub['identifier'], False) +# +# LOGGER.info('Unsubscribed from {:s} successfully'.format(sub_sub.get('uri', ''))) +# +# # Delete the main subscription from database +# delete_subscription(db, main_subscription_id, True) +# +# LOGGER.info('Successfully deleted main subscription: {:s}'.format(main_subscription_id)) +# +# #if SERVICE_ID == 'simap1': +# # SERVICE_ID = 'simap2' +# #elif SERVICE_ID == 'simap2': +# # SERVICE_ID = 'simap1' +# #else: +# # LOGGER.warning('Unknown service ID, not switching: {:s}'.format(SERVICE_ID)) +# return jsonify({}) diff --git a/src/nbi/service/sse_telemetry/EstablishSubscription.py b/src/nbi/service/sse_telemetry/EstablishSubscription.py new file mode 100644 index 000000000..c9b553e48 --- /dev/null +++ b/src/nbi/service/sse_telemetry/EstablishSubscription.py @@ -0,0 +1,198 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging #, json +#from random import choice +#from typing import Dict, List, Optional, Set +#from uuid import uuid4 +#from typing_extensions import TypedDict +from flask import jsonify, request, url_for +from flask_restful import Resource +from werkzeug.exceptions import BadRequest, UnsupportedMediaType #, NotFound, InternalServerError +from common.proto.simap_connector_pb2 import Subscription #, SubscriptionId +from simap_connector.client.SimapConnectorClient import SimapConnectorClient +#from common.proto.monitoring_pb2 import SSEMonitoringSubscriptionConfig +#from common.tools.context_queries.Device import get_device +#from common.tools.grpc.Tools import grpc_message_to_json_string +#from common.proto.monitoring_pb2 import ( +# SSEMonitoringSubscriptionConfig, +# SSEMonitoringSubscriptionResponse, +#) +#from common.tools.rest_conf.client.RestConfClient import RestConfClient +#from context.client.ContextClient import ContextClient +#from device.client.DeviceClient import DeviceClient +#from nbi.service._tools.Authentication import HTTP_AUTH +#from nbi.service.database.Engine import Engine +#from nbi.service.sse_telemetry.database.Subscription import ( +# SSESubsciprionDict, +# list_identifiers, +# set_subscription, +#) +#from .topology import ( +# Controllers, +# SubscribedNotificationsSchema, +# decompose_subscription, +# get_controller_name, +#) + + + +#class SubscriptionId(TypedDict): +# identifier: str +# uri: str + + +LOGGER = logging.getLogger(__name__) + + +class CreateSubscription(Resource): + # @HTTP_AUTH.login_required + def post(self): + if not request.is_json: + raise UnsupportedMediaType('JSON payload is required') + + request_data = request.json + LOGGER.debug('[post] Subscription request: {:s}'.format(str(request_data))) + + if 'ietf-subscribed-notifications:input' not in request_data: + raise BadRequest('Missing field(ietf-subscribed-notifications:input)') + input_data = request_data['ietf-subscribed-notifications:input'] + + subscription = Subscription() + + if 'datastore' not in input_data: + raise BadRequest('Missing field(ietf-subscribed-notifications:input/datastore)') + subscription.datastore = input_data['datastore'] + + if 'ietf-yang-push:datastore-xpath-filter' not in input_data: + raise BadRequest('Missing field(ietf-subscribed-notifications:input/ietf-yang-push:datastore-xpath-filter)') + subscription.xpath_filter = input_data['ietf-yang-push:datastore-xpath-filter'] + + if 'ietf-yang-push:periodic' not in input_data: + raise BadRequest('Missing field(ietf-subscribed-notifications:input/ietf-yang-push:periodic)') + periodic = input_data['ietf-yang-push:periodic'] + + if 'ietf-yang-push:period' not in periodic: + raise BadRequest('Missing field(ietf-subscribed-notifications:input/ietf-yang-push:periodic/ietf-yang-push:period)') + subscription.period = float(periodic['ietf-yang-push:period']) + + simap_connector_client = SimapConnectorClient() + subscription_id = simap_connector_client.EstablishSubscription(subscription) + subscription_id = subscription_id.subscription_id + + subscription_uri = url_for('sse.stream', subscription_id=subscription_id) + sub_id = {'identifier': subscription_id, 'uri': subscription_uri} + return jsonify(sub_id) + + +# db = Engine.get_engine() +# if db is None: +# LOGGER.error('Database engine is not initialized') +# raise InternalServerError('Database engine is not initialized') +# rest_conf_client = RestConfClient( +# '10.254.0.9', port=8080, scheme='http', username='admin', password='admin', +# logger=logging.getLogger('RestConfClient') +# ) +# +# # break the request into its abstract components for telemetry subscription +# list_db_ids = list_identifiers(db) +# request_identifier = str( +# choice([x for x in range(1000, 10000) if x not in list_db_ids]) +# ) +# sub_subs = decompose_subscription(rest_conf_client, request_data) +# +# # subscribe to each component +# device_client = DeviceClient() +# context_client = ContextClient() +# for s in sub_subs: +# xpath_filter = s['ietf-subscribed-notifications:input'][ +# 'ietf-yang-push:datastore-xpath-filter' +# ] +# xpath_filter_prefix = xpath_filter.split('/ietf-network-topology:link')[0] +# xpath_network = rest_conf_client.get(xpath_filter_prefix) +# if not xpath_network: +# MSG = 'Resource({:s} => {:s}) not found in SIMAP Server' +# raise Exception(MSG.format(str(xpath_filter), str(xpath_filter_prefix))) +# networks = xpath_network.get('ietf-network:network', list()) +# if len(networks) != 1: +# MSG = 'Resource({:s} => {:s}) wrong number of entries: {:s}' +# raise Exception(MSG.format( +# str(xpath_filter), str(xpath_filter_prefix), str(xpath_network) +# )) +# network = networks[0] +# network_id = network['network-id'] +# +# controller_name_map = { +# 'e2e' : 'TFS-E2E', +# 'agg' : 'TFS-AGG', +# 'trans-pkt': 'TFS-IP', +# 'trans-opt': 'NCE-T', +# 'access' : 'NCE-FAN', +# } +# controller_name = controller_name_map.get(network_id) +# if controller_name is None: +# LOGGER.warning( +# 'Controllerless device detected, skipping subscription for: {:s}'.format(xpath_filter) +# ) +# continue +# +# #SERVICE_ID = '' +# #device_controller = get_controller_name(xpath, SERVICE_ID, context_client) +# #if device_controller == Controllers.CONTROLLERLESS: +# # LOGGER.warning( +# # 'Controllerless device detected, skipping subscription for: {:s}'.format(xpath) +# # ) +# # continue +# +# sampling_interval = s['ietf-subscribed-notifications:input'][ +# 'ietf-yang-push:periodic' +# ]['ietf-yang-push:period'] +# +# s_req = SSEMonitoringSubscriptionConfig() +# #s_req.device_id.device_uuid.uuid = device_controller.value +# s_req.device_id.device_uuid.uuid = controller_name +# s_req.config_type = SSEMonitoringSubscriptionConfig.Subscribe +# s_req.uri = xpath_filter +# s_req.sampling_interval = str(sampling_interval) +# r: SSEMonitoringSubscriptionResponse = device_client.SSETelemetrySubscribe(s_req) +# s = SSESubsciprionDict( +# uuid=str(uuid4()), +# identifier=r.identifier, +# uri=r.uri, +# xpath=xpath_filter, +# sampling_interval=sampling_interval, +# main_subscription=False, +# main_subscription_id=request_identifier, +# ) +# _ = set_subscription(db, s) +# +# # save the main subscription to the database +# r_uri = f'/restconf/data/subscriptions/{request_identifier}' +# s = SSESubsciprionDict( +# uuid=str(uuid4()), +# identifier=request_identifier, +# uri=r_uri, +# xpath=request_data['ietf-subscribed-notifications:input'][ +# 'ietf-yang-push:datastore-xpath-filter' +# ], +# sampling_interval=sampling_interval, +# main_subscription=True, +# main_subscription_id=None, +# ) +# _ = set_subscription(db, s) + +# # Return the subscription ID +# sub_id = SubscriptionId(identifier=request_identifier, uri=r_uri) +# return jsonify(sub_id) diff --git a/src/nbi/service/sse_telemetry/StreamSubscription.py b/src/nbi/service/sse_telemetry/StreamSubscription.py new file mode 100644 index 000000000..0a598d9e0 --- /dev/null +++ b/src/nbi/service/sse_telemetry/StreamSubscription.py @@ -0,0 +1,154 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging, time #, asyncio, json, uuid +#from dataclasses import dataclass +#from datetime import datetime +from typing import Dict, List #, Optional, Union +from flask import Response #, jsonify, request +#from flask_restful import Resource +#from flask_sse import sse +from kafka import KafkaConsumer, TopicPartition +from kafka.admin import KafkaAdminClient, NewTopic +from kafka.consumer.fetcher import ConsumerRecord +from kafka.errors import TopicAlreadyExistsError +#from werkzeug.exceptions import NotFound, InternalServerError, UnsupportedMediaType +#from common.proto.monitoring_pb2 import ( +# SSEMonitoringSubscriptionConfig, +# SSEMonitoringSubscriptionResponse, +#) +from common.tools.kafka.Variables import KafkaConfig +#from device.client.DeviceClient import DeviceClient +#from context.client.ContextClient import ContextClient +#from nbi.service._tools.Authentication import HTTP_AUTH +#from nbi.service.database.Engine import Engine +#from nbi.service.sse_telemetry.database.Subscription import ( +# get_main_subscription, +# get_sub_subscription, +# delete_subscription, +#) +#from nbi.service.sse_telemetry.topology import ( +# Controllers, +# UnsubscribedNotificationsSchema, +# get_controller_name, +#) + + +LOGGER = logging.getLogger(__name__) + + +KAFKA_BOOT_SERVERS = KafkaConfig.get_kafka_address() + + +class StreamSubscription(Resource): + # @HTTP_AUTH.login_required + def get(self, subscription_id : int): + LOGGER.warning('[get] begin') + + #db = Engine.get_engine() + #if db is None: + # LOGGER.error('Database engine is not initialized') + # raise InternalServerError('Database engine is not initialized') + + ## Get the main subscription + #main_subscription = get_main_subscription(db, subscription_id) + #if main_subscription is None: + # MSG = 'Subscription({:s}) not found' + # msg = MSG.format(str(subscription_id)) + # LOGGER.error(msg) + # raise NotFound(description=msg) + + def event_stream(): + LOGGER.warning('[stream:event_stream] begin') + topic = 'subscription.{:s}'.format(str(subscription_id)) + + LOGGER.warning('[stream:event_stream] Checking Topics...') + kafka_admin = KafkaAdminClient(bootstrap_servers=KAFKA_BOOT_SERVERS) + existing_topics = set(kafka_admin.list_topics()) + LOGGER.warning('[stream:event_stream] existing_topics={:s}'.format(str(existing_topics))) + if topic not in existing_topics: + LOGGER.warning('[stream:event_stream] Creating Topic...') + to_create = [NewTopic(topic, num_partitions=3, replication_factor=1)] + try: + kafka_admin.create_topics(to_create, validate_only=False) + LOGGER.warning('[stream:event_stream] Topic Created') + except TopicAlreadyExistsError: + pass + + LOGGER.warning('[stream:event_stream] Connecting Consumer...') + kafka_consumer = KafkaConsumer( + bootstrap_servers = KAFKA_BOOT_SERVERS, + group_id = None, # consumer dispatch all messages sent to subscribed topics + auto_offset_reset = 'latest', + ) + LOGGER.warning('[stream:event_stream] Subscribing topic={:s}...'.format(str(topic))) + kafka_consumer.subscribe(topics=[topic]) + LOGGER.warning('[stream:event_stream] Subscribed') + + while True: + LOGGER.warning('[stream:event_stream] Waiting...') + topic_records : Dict[TopicPartition, List[ConsumerRecord]] = \ + kafka_consumer.poll(timeout_ms=1000, max_records=1) + if len(topic_records) == 0: + time.sleep(0.5) + continue # no pending records + + LOGGER.warning('[stream:event_stream] topic_records={:s}'.format(str(topic_records))) + for _topic, records in topic_records.items(): + if _topic.topic != topic: continue + for record in records: + message_key = record.key.decode('utf-8') + message_value = record.value.decode('utf-8') + + MSG = '[stream:event_stream] message_key={:s} message_value={:s}' + LOGGER.warning(MSG.format(str(message_key), str(message_value))) + yield message_value + LOGGER.warning('[stream:event_stream] sent') + + LOGGER.info('[stream:event_stream] Closing...') + kafka_consumer.close() + + LOGGER.warning('[stream] ready to stream...') + return Response(event_stream(), mimetype='text/event-stream') + + #update_counter = 1 + #sampling_interval = float(main_subscription['sampling_interval']) + + #try: + # while True: + # simap_telemetry = { + # 'bandwidth-utilization': str(round(bandwidth, 2)), + # 'latency': str(round(delay, 2)), + # 'related-service-ids': [service_name], + # } + # telemetry_data = {'ietf-restconf:notification': { + # 'eventTime': datetime.utcnow().isoformat() + 'Z', + # 'push-update': { + # 'id': update_counter, + # 'datastore-contents': { + # 'simap-telemetry:simap-telemetry': simap_telemetry + # } + # } + # }} + # sse.publish(telemetry_data, id=update_counter, channel=str(subscription_id)) + # update_counter += 1 + # await asyncio.sleep(sampling_interval) + # + # # Send termination event + # sse.publish({}, id=update_counter, channel=str(subscription_id)) + # + #except Exception: + # MSG = 'Unhandled Exception event generator for Subscription({:s})' + # LOGGER.exception(MSG.format(str(subscription_id))) diff --git a/src/nbi/service/sse_telemetry/__init__.py b/src/nbi/service/sse_telemetry/__init__.py index 72b6a4fb5..20ec037ab 100644 --- a/src/nbi/service/sse_telemetry/__init__.py +++ b/src/nbi/service/sse_telemetry/__init__.py @@ -12,18 +12,23 @@ # See the License for the specific language governing permissions and # limitations under the License. -# RFC 8299 - YANG Data Model for L3VPN Service Delivery -# Ref: https://datatracker.ietf.org/doc/rfc8299 + +# RFC 8639 - Subscription to YANG Notifications +# Ref: https://datatracker.ietf.org/doc/html/rfc8639 + +# RFC 8641 - Subscription to YANG Notifications for Datastore Updates +# Ref: https://datatracker.ietf.org/doc/html/rfc8641 from nbi.service.NbiApplication import NbiApplication -from .CreateSubscription import CreateSubscription +from .EstablishSubscription import EstablishSubscription from .DeleteSubscription import DeleteSubscription +from .StreamSubscription import StreamSubscription def register_telemetry_subscription(nbi_app: NbiApplication): nbi_app.add_rest_api_resource( - CreateSubscription, + EstablishSubscription, '/restconf/operations/subscriptions:establish-subscription', '/restconf/operations/subscriptions:establish-subscription/', ) @@ -32,3 +37,8 @@ def register_telemetry_subscription(nbi_app: NbiApplication): '/restconf/operations/subscriptions:delete-subscription', '/restconf/operations/subscriptions:delete-subscription/', ) + nbi_app.add_rest_api_resource( + StreamSubscription, + '/restconf/stream/', + '/restconf/stream//', + ) -- GitLab From 3c10a72fc579f65e5766d097670907dad725feba Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 21 Sep 2025 16:16:26 +0000 Subject: [PATCH 280/367] ECOC F5GA Telemetry Demo: - Converted subscribe telemetry script to python --- .../subscribe_telemetry_slice1.py | 71 +++++++++++++++++++ 1 file changed, 71 insertions(+) create mode 100644 src/tests/ecoc25-f5ga-telemetry/subscribe_telemetry_slice1.py diff --git a/src/tests/ecoc25-f5ga-telemetry/subscribe_telemetry_slice1.py b/src/tests/ecoc25-f5ga-telemetry/subscribe_telemetry_slice1.py new file mode 100644 index 000000000..b4f87873e --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/subscribe_telemetry_slice1.py @@ -0,0 +1,71 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import requests, websocket + + +RESTCONF_ADDRESS = '0.0.0.0' +RESTCONF_PORT = 80 +TARGET_SIMAP_NAME = 'e2e' +TARGET_LINK_NAME = 'E2E-L1' +SAMPLING_INTERVAL = 10.0 + + +SUBSCRIBE_URI = '/restconf/operations/subscriptions:establish-subscription' +SUBSCRIBE_URL = 'http://{:s}:{:d}{:s}'.format(RESTCONF_ADDRESS, RESTCONF_PORT, SUBSCRIBE_URI) +XPATH_FILTER = '/ietf-network:networks/network={:s}/ietf-network-topology:link={:s}/simap-telemetry:simap-telemetry' +REQUEST = { + 'ietf-subscribed-notifications:input': { + 'datastore': 'operational', + 'ietf-yang-push:datastore-xpath-filter': XPATH_FILTER.format(TARGET_SIMAP_NAME, TARGET_LINK_NAME), + 'ietf-yang-push:periodic': { + 'ietf-yang-push:period': SAMPLING_INTERVAL + } + } +} + + +def on_open(ws): + print('### Opened stream ###') + +def on_message(ws, message): + print(message) + +def on_error(ws, error): + print(error) + +def on_close(ws, close_status_code, close_msg): + print('### Closed stream ###') + +def main() -> None: + print('[E2E] Subscribe Telemetry slice1...') + reply = requests.get(SUBSCRIBE_URL, json=REQUEST, allow_redirects=True) + assert reply.is_json + reply_data = reply.json() + + if 'uri' not in reply_data: + raise Exception('Unexpected Reply: {:s}'.format(str(reply_data))) + subscription_uri = reply_data['uri'] + + stream_url = 'http://{:s}:{:d}{:s}'.format(RESTCONF_ADDRESS, RESTCONF_PORT, subscription_uri) + print('Opening stream "{:s}" (press Ctrl+C to stop)...'.format(stream_url)) + + ws = websocket.WebSocketApp( + stream_url, on_open=on_open, on_message=on_message, on_error=on_error, on_close=on_close + ) + ws.run_forever() + +if __name__ == '__main__': + main() -- GitLab From 160199664f43f6852e46cc50a4ffc971949d2936 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 21 Sep 2025 16:22:24 +0000 Subject: [PATCH 281/367] NBI component - SSE Telemetry: - Reverting old flask-sse library --- src/nbi/Config.py | 7 ------- src/nbi/requirements.in | 6 ++---- src/nbi/service/NbiApplication.py | 5 +---- 3 files changed, 3 insertions(+), 15 deletions(-) diff --git a/src/nbi/Config.py b/src/nbi/Config.py index 8df9a7a3d..14f12af68 100644 --- a/src/nbi/Config.py +++ b/src/nbi/Config.py @@ -13,8 +13,6 @@ # limitations under the License. from werkzeug.security import generate_password_hash -from common.Constants import ServiceNameEnum -from common.Settings import get_service_host, get_setting # REST-API users RESTAPI_USERS = { # TODO: implement a database of credentials and permissions @@ -23,8 +21,3 @@ RESTAPI_USERS = { # TODO: implement a database of credentials and permissions # Rebuild using: "python -c 'import secrets; print(secrets.token_hex())'" SECRET_KEY = '2b8ab76763d81f7bced786de8ba40bd67eea6ff79217a711eb5f8d1f19c145c1' - -redis_host = get_service_host(ServiceNameEnum.NBI) -redis_port = 6379 -redis_pass = get_setting('REDIS_PASSWORD') -REDIS_URL = 'redis://:{:s}@{:s}:{:d}'.format(redis_pass, redis_host, redis_port) diff --git a/src/nbi/requirements.in b/src/nbi/requirements.in index 73c9df815..72ca62b1e 100644 --- a/src/nbi/requirements.in +++ b/src/nbi/requirements.in @@ -12,18 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -asgiref==3.9.* deepdiff==6.7.* deepmerge==1.1.* eventlet==0.39.0 -Flask[async]==2.1.3 +Flask==2.1.3 Flask-HTTPAuth==4.5.0 Flask-RESTful==0.3.9 flask-socketio==5.5.1 -flask-sse==1.0.0 #gevent==24.11.1 #gevent-websocket==0.10.1 -greenlet==3.1.* +#greenlet==3.1.1 gunicorn==23.0.0 jsonschema==4.4.0 kafka-python==2.0.6 diff --git a/src/nbi/service/NbiApplication.py b/src/nbi/service/NbiApplication.py index caed3ce1a..8d9e7a879 100644 --- a/src/nbi/service/NbiApplication.py +++ b/src/nbi/service/NbiApplication.py @@ -18,9 +18,8 @@ from typing import Any, List, Optional, Tuple from flask import Flask, request from flask_restful import Api, Resource from flask_socketio import Namespace, SocketIO -from flask_sse import sse from common.tools.kafka.Variables import KafkaConfig, KafkaTopic -from nbi.Config import REDIS_URL, SECRET_KEY +from nbi.Config import SECRET_KEY from nbi.service.database.base import rebuild_database from .database.Engine import Engine @@ -42,8 +41,6 @@ class NbiApplication: self._app = Flask(__name__) self._app.config['SECRET_KEY'] = SECRET_KEY - self._app.config['REDIS_URL'] = REDIS_URL - self._app.register_blueprint(sse, url_prefix='/restconf/stream') self._app.after_request(log_request) self._api = Api(self._app, prefix=base_url) -- GitLab From f51c73eda7ef2493da641fcedba2ed0e48169e5e Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 21 Sep 2025 16:22:46 +0000 Subject: [PATCH 282/367] NBI component - SSE Telemetry: - Adding dependency on SIMAP Connector to Dockerfile --- src/nbi/Dockerfile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/nbi/Dockerfile b/src/nbi/Dockerfile index 63556432b..f4957f34a 100644 --- a/src/nbi/Dockerfile +++ b/src/nbi/Dockerfile @@ -85,6 +85,8 @@ COPY src/qkd_app/__init__.py qkd_app/__init__.py COPY src/qkd_app/client/. qkd_app/client/ COPY src/qos_profile/__init__.py qos_profile/__init__.py COPY src/qos_profile/client/. qos_profile/client/ +COPY src/simap_connector/__init__.py simap_connector/__init__.py +COPY src/simap_connector/client/. simap_connector/client/ COPY src/vnt_manager/__init__.py vnt_manager/__init__.py COPY src/vnt_manager/client/. vnt_manager/client/ RUN mkdir -p /var/teraflow/tests/tools -- GitLab From a5fc98d150d188ca19351a5176838f358a544eda Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 21 Sep 2025 16:26:12 +0000 Subject: [PATCH 283/367] Simap Connector: - Added python dependencies for Kafka and CockroachDB --- src/simap_connector/requirements.in | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/simap_connector/requirements.in b/src/simap_connector/requirements.in index 5c92783a2..180c1f034 100644 --- a/src/simap_connector/requirements.in +++ b/src/simap_connector/requirements.in @@ -13,3 +13,8 @@ # limitations under the License. requests==2.27.* +kafka-python==2.0.* +psycopg2-binary==2.9.* +SQLAlchemy==1.4.* +sqlalchemy-cockroachdb==1.4.* +SQLAlchemy-Utils==0.38.* -- GitLab From 4e077a5422b2392689a8540fdaf2e66494d8630a Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 21 Sep 2025 16:33:11 +0000 Subject: [PATCH 284/367] NBI component - SSE Telemetry: - Fix name of EstablishSubscription resource --- src/nbi/service/sse_telemetry/EstablishSubscription.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/nbi/service/sse_telemetry/EstablishSubscription.py b/src/nbi/service/sse_telemetry/EstablishSubscription.py index c9b553e48..9bb68fcf4 100644 --- a/src/nbi/service/sse_telemetry/EstablishSubscription.py +++ b/src/nbi/service/sse_telemetry/EstablishSubscription.py @@ -57,7 +57,7 @@ from simap_connector.client.SimapConnectorClient import SimapConnectorClient LOGGER = logging.getLogger(__name__) -class CreateSubscription(Resource): +class EstablishSubscription(Resource): # @HTTP_AUTH.login_required def post(self): if not request.is_json: -- GitLab From 809e5a310bd64f84bddbdb6c3b0260e5fad2e6fc Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Sun, 21 Sep 2025 16:39:30 +0000 Subject: [PATCH 285/367] NBI component - SSE Telemetry: - Add missing import in StreamSubscription --- src/nbi/service/sse_telemetry/StreamSubscription.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/nbi/service/sse_telemetry/StreamSubscription.py b/src/nbi/service/sse_telemetry/StreamSubscription.py index 0a598d9e0..a92d5b150 100644 --- a/src/nbi/service/sse_telemetry/StreamSubscription.py +++ b/src/nbi/service/sse_telemetry/StreamSubscription.py @@ -18,7 +18,7 @@ import logging, time #, asyncio, json, uuid #from datetime import datetime from typing import Dict, List #, Optional, Union from flask import Response #, jsonify, request -#from flask_restful import Resource +from flask_restful import Resource #from flask_sse import sse from kafka import KafkaConsumer, TopicPartition from kafka.admin import KafkaAdminClient, NewTopic -- GitLab From c5f4d3134bbdabd1baa986db9bb0a618080b3816 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 22 Sep 2025 11:55:23 +0000 Subject: [PATCH 286/367] Proto: - Update subscription_id to be 64 bit integer --- proto/simap_connector.proto | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/proto/simap_connector.proto b/proto/simap_connector.proto index 19ed89751..3b966dbf6 100644 --- a/proto/simap_connector.proto +++ b/proto/simap_connector.proto @@ -25,7 +25,7 @@ service SimapConnectorService { } message SubscriptionId { - uint32 subscription_id = 1; + uint64 subscription_id = 1; } message Subscription { -- GitLab From 67b1e3b64d688706fea2a26a4125c77b0276605c Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 22 Sep 2025 11:55:52 +0000 Subject: [PATCH 287/367] Manifests: - Removed Redis from NBI --- manifests/nbiservice.yaml | 31 ------------------------------- 1 file changed, 31 deletions(-) diff --git a/manifests/nbiservice.yaml b/manifests/nbiservice.yaml index 0cf45401f..ec6db58b7 100644 --- a/manifests/nbiservice.yaml +++ b/manifests/nbiservice.yaml @@ -56,11 +56,6 @@ spec: value: "tfs123" - name: CRDB_SSLMODE value: "require" - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: redis-secrets - key: REDIS_PASSWORD envFrom: - secretRef: name: kfk-kpi-data @@ -85,28 +80,6 @@ spec: limits: cpu: 1000m memory: 2048Mi - - name: redis - image: redis:7.0-alpine - env: - - name: REDIS_PASSWORD - valueFrom: - secretKeyRef: - name: redis-secrets - key: REDIS_PASSWORD - ports: - - containerPort: 6379 - name: client - command: ["redis-server"] - args: - - --requirepass - - $(REDIS_PASSWORD) - resources: - requests: - cpu: 50m - memory: 64Mi - limits: - cpu: 500m - memory: 512Mi --- apiVersion: v1 kind: Service @@ -130,7 +103,3 @@ spec: # protocol: TCP # port: 9192 # targetPort: 9192 - - name: redis - protocol: TCP - port: 6379 - targetPort: 6379 -- GitLab From 0b6c46f0d874ef35b9b551f0b14d98150c7d26b0 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 22 Sep 2025 11:57:48 +0000 Subject: [PATCH 288/367] NBI component - SSE Telemetry: - Updated endpoint names --- src/nbi/service/sse_telemetry/__init__.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/nbi/service/sse_telemetry/__init__.py b/src/nbi/service/sse_telemetry/__init__.py index 20ec037ab..5aa699588 100644 --- a/src/nbi/service/sse_telemetry/__init__.py +++ b/src/nbi/service/sse_telemetry/__init__.py @@ -31,14 +31,17 @@ def register_telemetry_subscription(nbi_app: NbiApplication): EstablishSubscription, '/restconf/operations/subscriptions:establish-subscription', '/restconf/operations/subscriptions:establish-subscription/', + endpoint='sse.establish', ) nbi_app.add_rest_api_resource( DeleteSubscription, '/restconf/operations/subscriptions:delete-subscription', '/restconf/operations/subscriptions:delete-subscription/', + endpoint='sse.delete', ) nbi_app.add_rest_api_resource( StreamSubscription, '/restconf/stream/', '/restconf/stream//', + endpoint='sse.stream', ) -- GitLab From acf4d381c95f9ab94f5d4e05556adb9a503d8f45 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 22 Sep 2025 12:00:11 +0000 Subject: [PATCH 289/367] Simap Connector: - Updated database model fields and added sub-subscription model - Generalized telemetry pool to include collectors, aggregators, and synthesizers - Implemented Aggregator and Collector workers - Implemented logic for establish/delete subscriptions --- .../service/SimapConnectorService.py | 9 +- .../SimapConnectorServiceServicerImpl.py | 108 +++++++++++- src/simap_connector/service/Tools.py | 156 ++++++++++++++++++ src/simap_connector/service/__main__.py | 12 +- .../service/database/SubSubscription.py | 112 +++++++++++++ .../service/database/Subscription.py | 71 ++++---- .../database/models/SubSubscriptionModel.py | 48 ++++++ .../database/models/SubscriptionModel.py | 26 +-- .../service/simap_updater/SimapUpdater.py | 10 +- .../service/telemetry/TelemetryPool.py | 102 +++++++++--- .../telemetry/worker/AggregatorWorker.py | 112 +++++++++++++ .../telemetry/worker/CollectorWorker.py | 120 ++++++++++++++ .../SynthesizerWorker.py} | 34 +--- .../service/telemetry/worker/_Worker.py | 57 +++++++ .../service/telemetry/worker/__init__.py | 13 ++ .../telemetry/worker/data/AggregationCache.py | 61 +++++++ .../telemetry/{ => worker/data}/Resources.py | 3 +- .../service/telemetry/worker/data/Sample.py | 23 +++ .../{ => worker/data}/SyntheticSamplers.py | 9 +- .../service/telemetry/worker/data/__init__.py | 13 ++ 20 files changed, 975 insertions(+), 124 deletions(-) create mode 100644 src/simap_connector/service/Tools.py create mode 100644 src/simap_connector/service/database/SubSubscription.py create mode 100644 src/simap_connector/service/database/models/SubSubscriptionModel.py create mode 100644 src/simap_connector/service/telemetry/worker/AggregatorWorker.py create mode 100644 src/simap_connector/service/telemetry/worker/CollectorWorker.py rename src/simap_connector/service/telemetry/{TelemetryWorker.py => worker/SynthesizerWorker.py} (61%) create mode 100644 src/simap_connector/service/telemetry/worker/_Worker.py create mode 100644 src/simap_connector/service/telemetry/worker/__init__.py create mode 100644 src/simap_connector/service/telemetry/worker/data/AggregationCache.py rename src/simap_connector/service/telemetry/{ => worker/data}/Resources.py (96%) create mode 100644 src/simap_connector/service/telemetry/worker/data/Sample.py rename src/simap_connector/service/telemetry/{ => worker/data}/SyntheticSamplers.py (96%) create mode 100644 src/simap_connector/service/telemetry/worker/data/__init__.py diff --git a/src/simap_connector/service/SimapConnectorService.py b/src/simap_connector/service/SimapConnectorService.py index 94457397e..fc960f214 100644 --- a/src/simap_connector/service/SimapConnectorService.py +++ b/src/simap_connector/service/SimapConnectorService.py @@ -18,7 +18,9 @@ from common.Constants import ServiceNameEnum from common.Settings import get_service_port_grpc from common.proto.simap_connector_pb2 import DESCRIPTOR as SIMAP_CONNECTOR_DESCRIPTOR from common.proto.simap_connector_pb2_grpc import add_SimapConnectorServiceServicer_to_server +from common.tools.rest_conf.client.RestConfClient import RestConfClient from common.tools.service.GenericGrpcService import GenericGrpcService +from .telemetry.TelemetryPool import TelemetryPool from .SimapConnectorServiceServicerImpl import SimapConnectorServiceServicerImpl @@ -27,11 +29,14 @@ LOGGER = logging.getLogger(__name__) class SimapConnectorService(GenericGrpcService): def __init__( - self, db_engine : sqlalchemy.engine.Engine, cls_name : str = __name__ + self, db_engine : sqlalchemy.engine.Engine, restconf_client : RestConfClient, + telemetry_pool : TelemetryPool, cls_name : str = __name__ ) -> None: port = get_service_port_grpc(ServiceNameEnum.SIMAP_CONNECTOR) super().__init__(port, cls_name=cls_name) - self.simap_connector_servicer = SimapConnectorServiceServicerImpl(db_engine) + self.simap_connector_servicer = SimapConnectorServiceServicerImpl( + db_engine, restconf_client, telemetry_pool + ) def install_servicers(self): add_SimapConnectorServiceServicer_to_server(self.simap_connector_servicer, self.server) diff --git a/src/simap_connector/service/SimapConnectorServiceServicerImpl.py b/src/simap_connector/service/SimapConnectorServiceServicerImpl.py index 49dcaf704..7ee0d0a87 100644 --- a/src/simap_connector/service/SimapConnectorServiceServicerImpl.py +++ b/src/simap_connector/service/SimapConnectorServiceServicerImpl.py @@ -14,12 +14,23 @@ import grpc, logging, sqlalchemy +from typing import List from common.proto.context_pb2 import Empty from common.proto.simap_connector_pb2 import Subscription, SubscriptionId from common.proto.simap_connector_pb2_grpc import SimapConnectorServiceServicer +from common.tools.rest_conf.client.RestConfClient import RestConfClient from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method -from .database.Subscription import subscription_set, subscription_delete - +from device.client.DeviceClient import DeviceClient +from .database.Subscription import subscription_get, subscription_set, subscription_delete +from .database.SubSubscription import ( + sub_subscription_list, sub_subscription_set, sub_subscription_delete +) +from .telemetry.worker.data.AggregationCache import AggregationCache +from .telemetry.TelemetryPool import TelemetryPool +from .Tools import ( + SupportingLink, create_kafka_topic, delete_kafka_topic, delete_underlay_subscription, + discover_supporting_links, establish_underlay_subscription, get_controller_id, +) LOGGER = logging.getLogger(__name__) @@ -27,17 +38,100 @@ METRICS_POOL = MetricsPool('SimapConnector', 'RPC') class SimapConnectorServiceServicerImpl(SimapConnectorServiceServicer): - def __init__(self, db_engine : sqlalchemy.engine.Engine) -> None: + def __init__( + self, db_engine : sqlalchemy.engine.Engine, restconf_client : RestConfClient, + telemetry_pool : TelemetryPool + ) -> None: LOGGER.debug('Creating Servicer...') - self.db_engine = db_engine + self._db_engine = db_engine + self._restconf_client = restconf_client + self._telemetry_pool = telemetry_pool LOGGER.debug('Servicer Created') + def _get_metrics(self) -> MetricsPool: return METRICS_POOL + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) - def EstablishSubscription(self, request : Subscription, context : grpc.ServicerContext) -> SubscriptionId: - return subscription_set(self.db_engine, request) + def EstablishSubscription( + self, request : Subscription, context : grpc.ServicerContext + ) -> SubscriptionId: + datastore = request.datastore + xpath_filter = request.xpath_filter + period = request.period + supporting_links : List[SupportingLink] = discover_supporting_links( + self._restconf_client, xpath_filter + ) + + parent_subscription_uuid, parent_subscription_id = subscription_set( + self._db_engine, datastore, xpath_filter, period + ) + + aggregation_cache = AggregationCache() + + device_client = DeviceClient() + sup_link_xpath_filters : List[str] = list() + for supporting_link in supporting_links: + controller_id = get_controller_id(supporting_link.network_id) + sup_link_xpath_filter = supporting_link.get_xpath_filter() + sup_link_xpath_filters.append(sup_link_xpath_filter) + + if controller_id is not None: + underlay_sub_id = establish_underlay_subscription( + device_client, controller_id, sup_link_xpath_filter, period + ) + + collector_name = '{:s}:{:s}'.format( + controller_id, str(underlay_sub_id.subscription_id) + ) + self._telemetry_pool.start_collector( + collector_name, underlay_sub_id.subscription_id, controller_id, + underlay_sub_id.subscription_uri, aggregation_cache, period + ) + + sub_request = Subscription() + sub_request.datastore = datastore + sub_request.xpath_filter = sup_link_xpath_filter + sub_request.period = period + sub_subscription_set( + self._db_engine, parent_subscription_uuid, controller_id, datastore, + sup_link_xpath_filter, period, underlay_sub_id.subscription_id, + underlay_sub_id.subscription_uri + ) + + topic = 'subscription.{:d}'.format(parent_subscription_id) + create_kafka_topic(topic) + + aggregator_name = '{:s}:{:s}'.format( + controller_id, str(parent_subscription_id) + ) + self._telemetry_pool.start_aggregator( + aggregator_name, parent_subscription_id, aggregation_cache, topic, period + ) + + return SubscriptionId(subscription_id=parent_subscription_id) + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def DeleteSubscription(self, request : SubscriptionId, context : grpc.ServicerContext) -> Empty: - return subscription_delete(self.db_engine, request) + parent_subscription_id = request.subscription_id + subscription = subscription_get(self._db_engine, parent_subscription_id) + if subscription is None: return Empty() + + # TODO: desactivate subscription aggregator and collectors + + topic = 'subscription.{:d}'.format(parent_subscription_id) + delete_kafka_topic(topic) + + parent_subscription_uuid = subscription['subscription_uuid'] + + device_client = DeviceClient() + sub_subscriptions = sub_subscription_list(self._db_engine, parent_subscription_uuid) + for sub_subscription in sub_subscriptions: + sub_subscription_id = sub_subscription['sub_subscription_id'] + controller_id = sub_subscription['controller_uuid' ] + delete_underlay_subscription(device_client, controller_id, sub_subscription_id) + sub_subscription_delete(self._db_engine, parent_subscription_uuid, sub_subscription_id) + + subscription_delete(self._db_engine, parent_subscription_id) + return Empty() diff --git a/src/simap_connector/service/Tools.py b/src/simap_connector/service/Tools.py new file mode 100644 index 000000000..e4ce34fe0 --- /dev/null +++ b/src/simap_connector/service/Tools.py @@ -0,0 +1,156 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging +from dataclasses import dataclass +from kafka.admin import KafkaAdminClient, NewTopic +from kafka.errors import BrokerResponseError +from typing import List, Optional +from common.proto.monitoring_pb2 import ( + SSEMonitoringSubscriptionConfig, SSEMonitoringSubscriptionResponse +) +from common.tools.kafka.Variables import KafkaConfig +from common.tools.rest_conf.client.RestConfClient import RestConfClient +from device.client.DeviceClient import DeviceClient + + +LOGGER = logging.getLogger(__name__) + + +XPATH_LINK_TEMPLATE = ( + '/ietf-network:networks/network={:s}' + '/ietf-network-topology:link={:s}/simap-telemetry:simap-telemetry' +) + +@dataclass +class SupportingLink: + network_id : str + link_id : str + + def get_xpath_filter(self) -> str: + return XPATH_LINK_TEMPLATE.format(self.network_id, self.link_id) + + +def discover_supporting_links(restconf_client : RestConfClient, xpath_filter : str) -> List[SupportingLink]: + xpath_filter_2 = xpath_filter.replace('/simap-telemetry:simap-telemetry', '') + xpath_filter_2 = xpath_filter.replace('/simap-telemetry', '') + xpath_data = restconf_client.get(xpath_filter_2) + if not xpath_data: + MSG = 'Resource({:s} => {:s}) not found in SIMAP Server' + raise Exception(MSG.format(str(xpath_filter), str(xpath_filter_2))) + + links = xpath_data.get('ietf-network-topology:link', list()) + if len(links) == 0: + raise Exception('Link({:s}) not found'.format(str(xpath_filter_2))) + if len(links) > 1: + raise Exception('Multiple occurrences for Link({:s})'.format(str(xpath_filter_2))) + link = links[0] + supporting_links = link.get('supporting-link', list()) + if len(supporting_links) == 0: + MSG = 'No supporting links found for Resource({:s}, {:s})' + raise Exception(MSG.format(str(xpath_filter), str(xpath_data))) + + supporting_link_xpaths : List[SupportingLink] = [ + SupportingLink(sup_link['network-ref'], sup_link['link-ref']) + for sup_link in supporting_links + ] + return supporting_link_xpaths + + +#def compose_establish_subscription(datastore : str, xpath_filter : str, period : float) -> Dict: +# return { +# 'ietf-subscribed-notifications:input': { +# 'datastore': datastore, +# 'ietf-yang-push:datastore-xpath-filter': xpath_filter, +# 'ietf-yang-push:periodic': { +# 'ietf-yang-push:period': period, +# } +# } +# } + + +CONTROLLER_MAP = { + 'e2e' : 'TFS-E2E', + 'agg' : 'TFS-AGG', + 'trans-pkt': 'TFS-IP', + 'trans-opt': 'NCE-T', + 'access' : 'NCE-FAN', + 'admin' : None, # controller-less +} + +def get_controller_id(network_id : str) -> Optional[str]: + # TODO: Future improvement: infer controller based on topology data + if network_id not in CONTROLLER_MAP: + MSG = 'Unable to identify controller for SimapNetwork({:s})' + raise Exception(MSG.format(str(network_id))) + return CONTROLLER_MAP[network_id] + + +@dataclass +class UnderlaySubscriptionId: + subscription_id : int + subscription_uri : str + + @classmethod + def from_reply(cls, sse_sub_rep : SSEMonitoringSubscriptionResponse) -> 'UnderlaySubscriptionId': + return cls( + subscription_id = sse_sub_rep.identifier, + subscription_uri = sse_sub_rep.uri, + ) + +def establish_underlay_subscription( + device_client : DeviceClient, controller_uuid : str, xpath_filter : str, + sampling_interval : float +) -> UnderlaySubscriptionId: + sse_sub_req = SSEMonitoringSubscriptionConfig() + sse_sub_req.device_id.device_uuid.uuid = controller_uuid + sse_sub_req.config_type = SSEMonitoringSubscriptionConfig.Subscribe + sse_sub_req.uri = xpath_filter + sse_sub_req.sampling_interval = str(sampling_interval) + sse_sub_rep = device_client.SSETelemetrySubscribe(sse_sub_req) + return UnderlaySubscriptionId.from_reply(sse_sub_rep) + +def delete_underlay_subscription( + device_client : DeviceClient, controller_uuid : str, subscription_id : int +) -> None: + sse_unsub_req = SSEMonitoringSubscriptionConfig() + sse_unsub_req.device_id.device_uuid.uuid = controller_uuid + sse_unsub_req.config_type = SSEMonitoringSubscriptionConfig.Unsubscribe + sse_unsub_req.identifier = subscription_id + device_client.SSETelemetrySubscribe(sse_unsub_req) + + +KAFKA_BOOT_SERVERS = KafkaConfig.get_kafka_address() + +def create_kafka_topic(topic : str) -> None: + try: + kafka_admin = KafkaAdminClient(bootstrap_servers=KAFKA_BOOT_SERVERS) + existing_topics = set(kafka_admin.list_topics()) + if topic in existing_topics: return + to_create = [NewTopic(topic, num_partitions=3, replication_factor=1)] + kafka_admin.create_topics(to_create, validate_only=False) + except BrokerResponseError: + MSG = 'Error creating Topic({:s})' + LOGGER.exception(MSG.format(str(topic))) + +def delete_kafka_topic(topic : str) -> None: + try: + kafka_admin = KafkaAdminClient(bootstrap_servers=KAFKA_BOOT_SERVERS) + existing_topics = set(kafka_admin.list_topics()) + if topic not in existing_topics: return + kafka_admin.delete_topics([topic]) + except BrokerResponseError: + MSG = 'Error deleting Topic({:s})' + LOGGER.exception(MSG.format(str(topic))) diff --git a/src/simap_connector/service/__main__.py b/src/simap_connector/service/__main__.py index 6e1741255..8fdc788c6 100644 --- a/src/simap_connector/service/__main__.py +++ b/src/simap_connector/service/__main__.py @@ -35,7 +35,7 @@ from .SimapConnectorService import SimapConnectorService TERMINATE = threading.Event() LOG_LEVEL = get_log_level() -logging.basicConfig(level=LOG_LEVEL, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s") +logging.basicConfig(level=LOG_LEVEL, format='[%(asctime)s] %(levelname)s:%(name)s:%(message)s') logging.getLogger('RestConfClient').setLevel(logging.WARN) LOGGER = logging.getLogger(__name__) @@ -75,16 +75,16 @@ def main(): rebuild_database(db_engine) - # Starting service - grpc_service = SimapConnectorService(db_engine) - grpc_service.start() - - restconf_client = RestConfClient( scheme=SIMAP_SERVER_SCHEME, address=SIMAP_SERVER_ADDRESS, port=SIMAP_SERVER_PORT, username=SIMAP_SERVER_USERNAME, password=SIMAP_SERVER_PASSWORD, ) + + # Starting service + grpc_service = SimapConnectorService(db_engine, restconf_client) + grpc_service.start() + simap_client = SimapClient(restconf_client) telemetry_pool = TelemetryPool(simap_client, terminate=TERMINATE) simap_updater = SimapUpdater(simap_client, telemetry_pool, TERMINATE) diff --git a/src/simap_connector/service/database/SubSubscription.py b/src/simap_connector/service/database/SubSubscription.py new file mode 100644 index 000000000..340fefbc2 --- /dev/null +++ b/src/simap_connector/service/database/SubSubscription.py @@ -0,0 +1,112 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime, logging +from sqlalchemy.dialects.postgresql import insert +from sqlalchemy.engine import Engine +from sqlalchemy.orm import Session, sessionmaker +from sqlalchemy_cockroachdb import run_transaction +from typing import Dict, List, Optional, Tuple +from common.method_wrappers.ServiceExceptions import NotFoundException +from .models.SubSubscriptionModel import SubSubscriptionModel + + +LOGGER = logging.getLogger(__name__) + + +def sub_subscription_list(db_engine : Engine, parent_subscription_uuid : str) -> List[Dict]: + def callback(session : Session) -> List[Dict]: + obj_list : List[SubSubscriptionModel] = ( + session + .query(SubSubscriptionModel) + .filter_by(parent=parent_subscription_uuid) + .all() + ) + return [obj.dump() for obj in obj_list] + return run_transaction(sessionmaker(bind=db_engine), callback) + + +def sub_subscription_get( + db_engine : Engine, parent_subscription_uuid : str, sub_subscription_id : int +) -> Dict: + def callback(session : Session) -> Optional[Dict]: + obj : Optional[SubSubscriptionModel] = ( + session + .query(SubSubscriptionModel) + .filter_by(parent=parent_subscription_uuid, sub_subscription_id=sub_subscription_id) + .one_or_none() + ) + return None if obj is None else obj.dump() + obj = run_transaction(sessionmaker(bind=db_engine), callback) + if obj is None: + sub_sub_key = '{:s}/{:s}'.format(str(parent_subscription_uuid), str(sub_subscription_id)) + raise NotFoundException('SubSubscription', sub_sub_key) + return obj + + +def sub_subscription_set( + db_engine : Engine, parent_subscription_uuid : str, controller_uuid : str, datastore : str, + xpath_filter : str, period : float, sub_subscription_id : int, sub_subscription_uri : str +) -> str: + now = datetime.datetime.now(datetime.timezone.utc) + sub_subscription_data = { + 'parent' : parent_subscription_uuid, + 'controller_uuid' : controller_uuid, + 'datastore' : datastore, + 'xpath_filter' : xpath_filter, + 'period' : period, + 'sub_subscription_id' : sub_subscription_id, + 'sub_subscription_uri': sub_subscription_uri, + 'created_at' : now, + 'updated_at' : now, + } + + def callback(session : Session) -> Tuple[bool, str]: + stmt = insert(SubSubscriptionModel).values([sub_subscription_data]) + stmt = stmt.on_conflict_do_update( + index_elements=[SubSubscriptionModel.subscription_uuid], + set_=dict( + controller_uuid = stmt.excluded.controller_uuid, + datastore = stmt.excluded.datastore, + xpath_filter = stmt.excluded.xpath_filter, + period = stmt.excluded.period, + sub_subscription_id = stmt.excluded.sub_subscription_id, + sub_subscription_uri = stmt.excluded.sub_subscription_uri, + updated_at = stmt.excluded.updated_at, + ) + ) + stmt = stmt.returning( + SubSubscriptionModel.created_at, SubSubscriptionModel.updated_at, + SubSubscriptionModel.sub_subscription_uuid + ) + return_values = session.execute(stmt).fetchone() + created_at,updated_at,subscription_uuid = return_values + return updated_at > created_at, subscription_uuid + + _, subscription_uuid = run_transaction(sessionmaker(bind=db_engine), callback) + return subscription_uuid + + +def sub_subscription_delete( + db_engine : Engine, parent_subscription_uuid : str, sub_subscription_id : int +) -> bool: + def callback(session : Session) -> bool: + num_deleted = ( + session + .query(SubSubscriptionModel) + .filter_by(parent=parent_subscription_uuid, sub_subscription_id=sub_subscription_id) + .delete() + ) + return num_deleted > 0 + return run_transaction(sessionmaker(bind=db_engine), callback) diff --git a/src/simap_connector/service/database/Subscription.py b/src/simap_connector/service/database/Subscription.py index 2e6e83eb6..6081fc485 100644 --- a/src/simap_connector/service/database/Subscription.py +++ b/src/simap_connector/service/database/Subscription.py @@ -12,14 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -import datetime, logging, uuid +import datetime, logging from sqlalchemy.dialects.postgresql import insert from sqlalchemy.engine import Engine from sqlalchemy.orm import Session, sessionmaker from sqlalchemy_cockroachdb import run_transaction from typing import Dict, Optional, Tuple -from common.proto.context_pb2 import Empty -from common.proto.simap_connector_pb2 import Subscription, SubscriptionId from common.method_wrappers.ServiceExceptions import NotFoundException from .models.SubscriptionModel import SubscriptionModel @@ -27,11 +25,7 @@ from .models.SubscriptionModel import SubscriptionModel LOGGER = logging.getLogger(__name__) -def get_random_uuid() -> str: - return str(uuid.uuid4()) - -def subscription_get(db_engine : Engine, request : SubscriptionId) -> Subscription: - subscription_id = request.subscription_id +def subscription_get(db_engine : Engine, subscription_id : int) -> Dict: def callback(session : Session) -> Optional[Dict]: obj : Optional[SubscriptionModel] = ( session @@ -43,48 +37,51 @@ def subscription_get(db_engine : Engine, request : SubscriptionId) -> Subscripti obj = run_transaction(sessionmaker(bind=db_engine), callback) if obj is None: raise NotFoundException('Subscription', str(subscription_id)) - return Subscription(**obj) + return obj -def subscription_set(db_engine : Engine, request : Subscription) -> SubscriptionId: - subscription_uuid = get_random_uuid() +def subscription_set( + db_engine : Engine, datastore : str, xpath_filter : str, period : float +) -> Tuple[str, int]: now = datetime.datetime.now(datetime.timezone.utc) + subscription_data = { + 'datastore' : datastore, + 'xpath_filter' : xpath_filter, + 'period' : period, + 'created_at' : now, + 'updated_at' : now, + } - subscription_data = [{ - 'subscription_uuid': subscription_uuid, - #'subscription_id' : subscription_id, - 'datastore' : request.datastore, - 'filter_xpath' : request.filter_xpath, - 'sampling_interval': request.sampling_interval, - 'created_at' : now, - 'updated_at' : now, - }] - - def callback(session : Session) -> Tuple[bool, int]: - stmt = insert(SubscriptionModel).values(subscription_data) + def callback(session : Session) -> Tuple[bool, str, int]: + stmt = insert(SubscriptionModel).values([subscription_data]) stmt = stmt.on_conflict_do_update( index_elements=[SubscriptionModel.subscription_uuid], set_=dict( - datastore = stmt.excluded.datastore, - filter_xpath = stmt.excluded.filter_xpath, - sampling_interval = stmt.excluded.sampling_interval, - updated_at = stmt.excluded.updated_at, + datastore = stmt.excluded.datastore, + xpath_filter = stmt.excluded.xpath_filter, + period = stmt.excluded.period, + updated_at = stmt.excluded.updated_at, ) ) stmt = stmt.returning( SubscriptionModel.created_at, SubscriptionModel.updated_at, - SubscriptionModel.subscription_id + SubscriptionModel.subscription_uuid, SubscriptionModel.subscription_id ) - created_at,updated_at,subscription_id = session.execute(stmt).fetchone() - return updated_at > created_at, subscription_id + return_values = session.execute(stmt).fetchone() + created_at,updated_at,subscription_uuid,subscription_id = return_values + return updated_at > created_at, subscription_uuid, subscription_id - _,subscription_id = run_transaction(sessionmaker(bind=db_engine), callback) - return SubscriptionId(subscription_id=subscription_id) + _, subscription_uuid, subscription_id = run_transaction(sessionmaker(bind=db_engine), callback) + return subscription_uuid, subscription_id -def subscription_delete(db_engine : Engine, request : SubscriptionId) -> Empty: - subscription_id = request.subscription_id + +def subscription_delete(db_engine : Engine, subscription_id : int) -> bool: def callback(session : Session) -> bool: - num_deleted = session.query(SubscriptionModel).filter_by(subscription_id=subscription_id).delete() + num_deleted = ( + session + .query(SubscriptionModel) + .filter_by(subscription_id=subscription_id) + .delete() + ) return num_deleted > 0 - run_transaction(sessionmaker(bind=db_engine), callback) - return Empty() + return run_transaction(sessionmaker(bind=db_engine), callback) diff --git a/src/simap_connector/service/database/models/SubSubscriptionModel.py b/src/simap_connector/service/database/models/SubSubscriptionModel.py new file mode 100644 index 000000000..58e9a6d78 --- /dev/null +++ b/src/simap_connector/service/database/models/SubSubscriptionModel.py @@ -0,0 +1,48 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from sqlalchemy import BigInteger, Column, DateTime, Float, ForeignKey, String, text +from sqlalchemy.dialects.postgresql import UUID +from sqlalchemy.orm import relationship +from typing import Dict +from ._Base import _Base + + +class SubSubscriptionModel(_Base): + __tablename__ = 'sub_subscription' + + parent = Column(ForeignKey('subscription.subscription_uuid', ondelete='CASCADE'), primary_key=True) + sub_subscription_uuid = Column(UUID(as_uuid=False), primary_key=True, server_default=text('gen_random_uuid()')) + controller_uuid = Column(String, nullable=False, unique=False) + datastore = Column(String, nullable=False, unique=False) + xpath_filter = Column(String, nullable=False, unique=False) + period = Column(Float, nullable=False, unique=False) + sub_subscription_id = Column(BigInteger, nullable=False, unique=False) + sub_subscription_uri = Column(String, nullable=False, unique=False) + created_at = Column(DateTime, nullable=False) + updated_at = Column(DateTime, nullable=False) + + subscription = relationship('SubscriptionModel', back_populates='sub_subscriptions') + + def dump(self) -> Dict: + return { + 'parent_subscription_uuid' : self.parent, + 'sub_subscription_uuid' : self.sub_subscription_uuid, + 'controller_uuid' : self.controller_uuid, + 'datastore' : self.datastore, + 'xpath_filter' : self.xpath_filter, + 'period' : self.period, + 'sub_subscription_id' : self.sub_subscription_id, + 'sub_subscription_uri' : self.sub_subscription_uri, + } diff --git a/src/simap_connector/service/database/models/SubscriptionModel.py b/src/simap_connector/service/database/models/SubscriptionModel.py index 6a506149f..ad4ac5e78 100644 --- a/src/simap_connector/service/database/models/SubscriptionModel.py +++ b/src/simap_connector/service/database/models/SubscriptionModel.py @@ -13,8 +13,9 @@ # limitations under the License. from sqlalchemy import Column, DateTime, String -from sqlalchemy import Column, Float, Integer, String +from sqlalchemy import Column, Float, BigInteger, String, text from sqlalchemy.dialects.postgresql import UUID +from sqlalchemy.orm import relationship from typing import Dict from ._Base import _Base @@ -22,20 +23,21 @@ from ._Base import _Base class SubscriptionModel(_Base): __tablename__ = 'subscription' - subscription_uuid = Column(UUID(as_uuid=False), primary_key=True) - subscription_id = Column(Integer, nullable=False, unique=True, autoincrement=True) - datastore = Column(String, nullable=False, unique=False) - filter_xpath = Column(String, nullable=False, unique=False) - sampling_interval = Column(Float, nullable=False, unique=False) - created_at = Column(DateTime, nullable=False) - updated_at = Column(DateTime, nullable=False) + subscription_uuid = Column(UUID(as_uuid=False), primary_key=True, server_default=text('gen_random_uuid()')) + subscription_id = Column(BigInteger, nullable=False, unique=True, server_default=text('unique_rowid()')) + datastore = Column(String, nullable=False, unique=False) + xpath_filter = Column(String, nullable=False, unique=False) + period = Column(Float, nullable=False, unique=False) + created_at = Column(DateTime, nullable=False) + updated_at = Column(DateTime, nullable=False) - def dump_id(self) -> Dict: - return {'subscription_uuid': {'uuid': self.subscription_uuid}} + sub_subscriptions = relationship('SubSubscriptionModel') def dump(self) -> Dict: return { + 'subscription_uuid': self.subscription_uuid, + 'subscription_id' : self.subscription_id, 'datastore' : self.datastore, - 'filter_xpath' : self.filter_xpath, - 'sampling_interval': self.sampling_interval, + 'xpath_filter' : self.xpath_filter, + 'period' : self.period, } diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 6c2c9ea8d..112108c8a 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -25,7 +25,7 @@ from common.tools.grpc.BaseEventDispatcher import BaseEventDispatcher from common.tools.grpc.Tools import grpc_message_to_json_string from context.client.ContextClient import ContextClient from simap_connector.service.simap_updater.MockSimaps import delete_mock_simap, set_mock_simap -from simap_connector.service.telemetry.Resources import ResourceLink, Resources, SyntheticSampler +from simap_connector.service.telemetry.worker.data.Resources import ResourceLink, Resources, SyntheticSampler from simap_connector.service.telemetry.TelemetryPool import TelemetryPool from .ObjectCache import CachedEntities, ObjectCache from .SimapClient import SimapClient @@ -357,7 +357,7 @@ class EventDispatcher(BaseEventDispatcher): related_service_ids=[], )) sampling_interval = 1.0 - self._telemetry_pool.start_worker(worker_name, resources, sampling_interval) + self._telemetry_pool.start_synthesizer(worker_name, resources, sampling_interval) return True @@ -425,7 +425,7 @@ class EventDispatcher(BaseEventDispatcher): self._object_cache.delete(CachedEntities.LINK, link_name) worker_name = '{:s}:{:s}'.format(topology_name, link_name) - self._telemetry_pool.stop_worker(worker_name) + self._telemetry_pool.stop_synthesizer(worker_name) MSG = 'Link Removed: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(link_event))) @@ -531,7 +531,7 @@ class EventDispatcher(BaseEventDispatcher): resources = Resources() sampling_interval = 1.0 - self._telemetry_pool.start_worker(domain_name, resources, sampling_interval) + self._telemetry_pool.start_synthesizer(domain_name, resources, sampling_interval) return True @@ -627,7 +627,7 @@ class EventDispatcher(BaseEventDispatcher): #self._object_cache.delete(CachedEntities.SERVICE, service_uuid) #self._object_cache.delete(CachedEntities.SERVICE, service_name) - self._telemetry_pool.stop_worker(domain_name) + self._telemetry_pool.stop_synthesizer(domain_name) MSG = 'Logical Link Removed for Service: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(service_event))) diff --git a/src/simap_connector/service/telemetry/TelemetryPool.py b/src/simap_connector/service/telemetry/TelemetryPool.py index c8ee4247b..50c2366ad 100644 --- a/src/simap_connector/service/telemetry/TelemetryPool.py +++ b/src/simap_connector/service/telemetry/TelemetryPool.py @@ -14,73 +14,131 @@ import logging, threading +from queue import Queue from typing import Dict, Optional from simap_connector.service.simap_updater.SimapClient import SimapClient -from .Resources import Resources -from .TelemetryWorker import TelemetryWorker +from .worker.data.Resources import Resources +from .worker.data.Sample import Sample +from .worker._Worker import _Worker, WorkerTypeEnum, get_worker_key +from .worker.AggregatorWorker import AggregatorWorker +from .worker.CollectorWorker import CollectorWorker +from .worker.SynthesizerWorker import SynthesizerWorker LOGGER = logging.getLogger(__name__) +WORKER_CLASSES = { + WorkerTypeEnum.AGGREGATOR : AggregatorWorker, + WorkerTypeEnum.COLLECTOR : CollectorWorker, + WorkerTypeEnum.SYNTHESIZER: SynthesizerWorker, +} + + class TelemetryPool: def __init__( self, simap_client : SimapClient, terminate : Optional[threading.Event] = None ) -> None: self._simap_client = simap_client - self._workers : Dict[str, TelemetryWorker] = dict() + self._workers : Dict[str, _Worker] = dict() self._lock = threading.Lock() self._terminate = threading.Event() if terminate is None else terminate - def has_worker(self, worker_name : str) -> bool: + + def has_worker(self, worker_type : WorkerTypeEnum, worker_name : str) -> bool: + worker_key = get_worker_key(worker_type, worker_name) + return self.has_worker_by_key(worker_key) + + + def has_worker_by_key(self, worker_key : str) -> bool: with self._lock: - return worker_name in self._workers + return worker_key in self._workers + + + def start_aggregator( + self, worker_name : str, parent_subscription_id : int, aggregation_cache : AggregationCache, + topic : str, sampling_interval : float + ) -> None: + self._start_worker( + WorkerTypeEnum.AGGREGATOR, worker_name, parent_subscription_id, aggregation_cache, + topic, sampling_interval + ) + - def start_worker( + def start_collector( + self, worker_name : str, subscription_id : int, controller_uuid : Optional[str], + subscription_uri : str, aggregation_cache : AggregationCache, sampling_interval : float + ) -> None: + self._start_worker( + WorkerTypeEnum.COLLECTOR, worker_name, subscription_id, controller_uuid, + subscription_uri, aggregation_cache, sampling_interval + ) + + + def start_synthesizer( self, worker_name : str, resources : Resources, sampling_interval : float ) -> None: + self._start_worker( + WorkerTypeEnum.SYNTHESIZER, worker_name, self._simap_client, resources, + sampling_interval + ) + + + def _start_worker( + self, worker_type : WorkerTypeEnum, worker_name : str, *args, **kwargs + ) -> None: + worker_key = get_worker_key(worker_type, worker_name) with self._lock: - if worker_name in self._workers: + if worker_key in self._workers: MSG = '[start_worker] Worker({:s}) already exists' - LOGGER.debug(MSG.format(str(worker_name))) + LOGGER.debug(MSG.format(str(worker_key))) return - worker = TelemetryWorker( - worker_name, self._simap_client, resources, sampling_interval, - terminate=self._terminate + worker_class = WORKER_CLASSES.get(worker_type) + if worker_class is None: + MSG = 'Unsupported WorkerType({:s})' + raise Exception(MSG.format(str(worker_type))) + + worker : _Worker = worker_class( + worker_name, *args, terminate=self._terminate, **kwargs ) worker.start() MSG = '[start_worker] Started Worker({:s})' - LOGGER.info(MSG.format(str(worker_name))) + LOGGER.info(MSG.format(str(worker_key))) + + self._workers[worker_key] = worker + - self._workers[worker_name] = worker + def stop_worker(self, worker_type : WorkerTypeEnum, worker_name : str) -> None: + worker_key = get_worker_key(worker_type, worker_name) + self.stop_worker_by_key(worker_key) - def stop_worker(self, worker_name : str) -> None: + def stop_worker_by_key(self, worker_key : str) -> None: with self._lock: - worker = self._workers.pop(worker_name, None) + worker = self._workers.pop(worker_key, None) if worker is None: MSG = '[stop_worker] Worker({:s}) not found' - LOGGER.debug(MSG.format(str(worker_name))) + LOGGER.debug(MSG.format(str(worker_key))) return worker.stop() MSG = '[stop_worker] Stopped Worker({:s})' - LOGGER.info(MSG.format(str(worker_name))) + LOGGER.info(MSG.format(str(worker_key))) def stop_all(self) -> None: - LOGGER.info('[stop_all] Stopping all worker') + LOGGER.info('[stop_all] Stopping all workers') with self._lock: - names = list(self._workers.keys()) + worker_keys = list(self._workers.keys()) - for name in names: + for worker_key in worker_keys: try: - self.stop_worker(name) + self.stop_worker_by_key(worker_key) except Exception: MSG = '[stop_all] Unhandled Exception stopping Worker({:s})' - LOGGER.exception(MSG.format(str(name))) + LOGGER.exception(MSG.format(str(worker_key))) diff --git a/src/simap_connector/service/telemetry/worker/AggregatorWorker.py b/src/simap_connector/service/telemetry/worker/AggregatorWorker.py new file mode 100644 index 000000000..acc978001 --- /dev/null +++ b/src/simap_connector/service/telemetry/worker/AggregatorWorker.py @@ -0,0 +1,112 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import json, math, threading, time +from dataclasses import dataclass +from kafka import KafkaProducer +from typing import Dict, Optional, Union +from common.tools.kafka.Variables import KafkaConfig +from .data.AggregationCache import AggregationCache +from ._Worker import _Worker, WorkerTypeEnum + + +KAFKA_BOOT_SERVERS = KafkaConfig.get_kafka_address() + +WAIT_LOOP_GRANULARITY = 0.5 + + +@dataclass +class ServerSentEvent: + event_data : Union[str, Dict] + event_type : Optional[str] = None + event_id : Optional[int] = None + + def format(self) -> str: + # SSE specs + event_data = self.event_data + if not isinstance(event_data, str): + event_data = json.dumps(event_data) + + lines = [ + 'data: {:s}'.format(line) + for line in event_data.splitlines() + ] + + if self.event_type: + lines.insert(0, 'event: {:s}'.format(str(self.event_type))) + + if self.event_id: + lines.append('id: {:d}'.format(int(self.event_id))) + + return '\n'.join(lines) + '\n\n' + + +class AggregatorWorker(_Worker): + def __init__( + self, worker_name : str, parent_subscription_id : int, aggregation_cache : AggregationCache, + topic : str, sampling_interval : float, terminate : Optional[threading.Event] = None + ) -> None: + super().__init__(WorkerTypeEnum.AGGREGATOR, worker_name, terminate=terminate) + self._parent_subscription_id = parent_subscription_id + self._aggregation_cache = aggregation_cache + self._topic = topic + self._sampling_interval = sampling_interval + + + def run(self) -> None: + self._logger.info('[run] Starting...') + + kafka_producer = KafkaProducer(bootstrap_servers=KAFKA_BOOT_SERVERS) + update_counter = 1 + + try: + while not self._stop_event.is_set() and not self._terminate.is_set(): + self._logger.info('[run] Aggregating...') + + link_sample = self._aggregation_cache.aggregate() + + data = {'notification': { + 'eventTime': link_sample.timestamp.isoformat() + 'Z', + 'push-update': { + 'id': update_counter, + 'datastore-contents': {'simap-telemetry:simap-telemetry': { + 'bandwidth-utilization': '{:.2f}'.format(link_sample.bandwidth_utilization), + 'latency' : '{:.3f}'.format(link_sample.latency), + 'related-service-ids' : list(link_sample.related_service_ids), + }} + } + }} + + event = ServerSentEvent( + event_data=data, event_id=update_counter, event_type='push-update' + ) + str_event = event.format() + + kafka_producer.send( + self._topic, key=self._topic.encode('utf-8'), + value=str_event.encode('utf-8') + ) + kafka_producer.flush() + + # Make wait responsible to terminations + iterations = int(math.ceil(self._sampling_interval / WAIT_LOOP_GRANULARITY)) + for _ in range(iterations): + if self._stop_event.is_set(): break + if self._terminate.is_set() : break + time.sleep(WAIT_LOOP_GRANULARITY) + except Exception: + self._logger.exception('[run] Unhandled Exception') + finally: + self._logger.info('[run] Terminated') diff --git a/src/simap_connector/service/telemetry/worker/CollectorWorker.py b/src/simap_connector/service/telemetry/worker/CollectorWorker.py new file mode 100644 index 000000000..6bbb4b47e --- /dev/null +++ b/src/simap_connector/service/telemetry/worker/CollectorWorker.py @@ -0,0 +1,120 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import json, requests, threading +from requests.exceptions import ReadTimeout +from typing import Optional +from .data.AggregationCache import AggregationCache, LinkSample +from ._Worker import _Worker, WorkerTypeEnum + + +CONTROLLER_TO_ADDRESS_PORT = { + 'TFS-E2E' : ('10.254.0.10', 80), + 'TFS-AGG' : ('10.254.0.11', 80), + 'TFS-IP' : ('10.254.0.12', 80), + 'NCE-T' : ('10.254.0.9', 80), + 'NCE-FAN' : ('10.254.0.9', 80), + 'SIMAP' : ('10.254.0.9', 80), +} + + +class CollectorWorker(_Worker): + def __init__( + self, worker_name : str, subscription_id : int, controller_uuid : Optional[str], + subscription_uri : str, aggregation_cache : AggregationCache, sampling_interval : float, + terminate : Optional[threading.Event] = None + ) -> None: + super().__init__(WorkerTypeEnum.COLLECTOR, worker_name, terminate=terminate) + self._subscription_id = subscription_id + self._controller_uuid = controller_uuid + self._subscription_uri = subscription_uri + self._aggregation_cache = aggregation_cache + self._sampling_interval = sampling_interval + + def run(self) -> None: + self._logger.info('[run] Starting...') + + try: + address_port = CONTROLLER_TO_ADDRESS_PORT.get(self._controller_uuid) + if address_port is None: address_port = CONTROLLER_TO_ADDRESS_PORT['SIMAP'] + address, port = address_port + stream_url = 'http://{:s}:{:d}{:s}'.format(address, port, self._subscription_uri) + self._logger.info('[run] Opening stream "{:s}"...'.format(str(stream_url))) + + session = requests.Session() + try: + # NOTE: Trick: we set 1-second read_timeout to force the loop to give control + # back and be able to check termination events. + with session.get(stream_url, stream=True, timeout=(10, 1)) as reply: + reply.raise_for_status() + + it_lines = reply.iter_lines(decode_unicode=True, chunk_size=1024) + + while not self._stop_event.is_set() and not self._terminate.is_set(): + try: + line = next(it_lines) # may block until read_timeout + except StopIteration: + break # server closed + except ReadTimeout: + continue # no data this tick; loop to check termination conditions + + if line is None: continue + if len(line) == 0: continue + + self._logger.info('[run] ==> {:s}'.format(str(line))) + if not line.startswith('data:'): continue + data = json.loads(data[5:]) + + if 'notification' not in data: + MSG = 'Field(notification) missing: {:s}' + raise Exception(MSG.format(str(data))) + notification = data['notification'] + + if 'push-update' not in notification: + MSG = 'Field(notification/push-update) missing: {:s}' + raise Exception(MSG.format(str(data))) + push_update = notification['push-update'] + + if 'datastore-contents' not in push_update: + MSG = 'Field(notification/push-update/datastore-contents) missing: {:s}' + raise Exception(MSG.format(str(data))) + datastore_contents = push_update['datastore-contents'] + + if 'simap-telemetry:simap-telemetry' not in datastore_contents: + MSG = ( + 'Field(notification/push-update/datastore-contents' + '/simap-telemetry:simap-telemetry) missing: {:s}' + ) + raise Exception(MSG.format(str(data))) + simap_telemetry = datastore_contents['simap-telemetry:simap-telemetry'] + + bandwidth_utilization = float(simap_telemetry['bandwidth-utilization']) + latency = float(simap_telemetry['latency']) + related_service_ids = simap_telemetry['related-service-ids'] + + link_sample = LinkSample( + subscription_id = self._subscription_id, + bandwidth_utilization = bandwidth_utilization, + latency = latency, + related_service_ids = related_service_ids, + ) + self._aggregation_cache.update(link_sample) + finally: + if session is not None: + session.close() + except Exception: + self._logger.exception('[run] Unhandled Exception') + finally: + self._logger.info('[run] Terminated') diff --git a/src/simap_connector/service/telemetry/TelemetryWorker.py b/src/simap_connector/service/telemetry/worker/SynthesizerWorker.py similarity index 61% rename from src/simap_connector/service/telemetry/TelemetryWorker.py rename to src/simap_connector/service/telemetry/worker/SynthesizerWorker.py index cd590ad19..575f4c9bf 100644 --- a/src/simap_connector/service/telemetry/TelemetryWorker.py +++ b/src/simap_connector/service/telemetry/worker/SynthesizerWorker.py @@ -13,46 +13,32 @@ # limitations under the License. -import logging, math, threading, time +import math, threading, time from typing import Optional from simap_connector.service.simap_updater.SimapClient import SimapClient -from .Resources import Resources - - -LOGGER = logging.getLogger(__name__) +from .data.Resources import Resources +from ._Worker import _Worker, WorkerTypeEnum WAIT_LOOP_GRANULARITY = 0.5 -class TelemetryWorker(threading.Thread): +class SynthesizerWorker(_Worker): def __init__( self, worker_name : str, simap_client : SimapClient, resources : Resources, sampling_interval : float, terminate : Optional[threading.Event] = None ) -> None: - name = 'TelemetryWorker({:s})'.format(str(worker_name)) - super().__init__(name=name, daemon=True) - self._worker_name = worker_name + super().__init__(WorkerTypeEnum.SYNTHESIZER, worker_name, terminate=terminate) self._simap_client = simap_client self._resources = resources self._sampling_interval = sampling_interval - self._stop_event = threading.Event() - self._terminate = threading.Event() if terminate is None else terminate - - def stop(self) -> None: - MSG = '[stop][{:s}] Stopping...' - LOGGER.info(MSG.format(str(self._worker_name))) - self._stop_event.set() - self.join() def run(self) -> None: - MSG = '[run][{:s}] Starting...' - LOGGER.info(MSG.format(str(self._worker_name))) + self._logger.info('[run] Starting...') try: while not self._stop_event.is_set() and not self._terminate.is_set(): - MSG = '[run][{:s}] Sampling...' - LOGGER.info(MSG.format(str(self._worker_name))) + self._logger.info('[run] Sampling...') self._resources.generate_samples(self._simap_client) @@ -64,8 +50,6 @@ class TelemetryWorker(threading.Thread): time.sleep(WAIT_LOOP_GRANULARITY) except Exception: - MSG = '[run][{:s}] Unhandled Exception' - LOGGER.exception(MSG.format(str(self._worker_name))) + self._logger.exception('[run] Unhandled Exception') finally: - MSG = '[run][{:s}] Terminated' - LOGGER.info(MSG.format(str(self._worker_name))) + self._logger.info('[run] Terminated') diff --git a/src/simap_connector/service/telemetry/worker/_Worker.py b/src/simap_connector/service/telemetry/worker/_Worker.py new file mode 100644 index 000000000..caec6b9fa --- /dev/null +++ b/src/simap_connector/service/telemetry/worker/_Worker.py @@ -0,0 +1,57 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging, threading +from enum import Enum +from typing import Optional + + +class WorkerTypeEnum(Enum): + AGGREGATOR = 'aggregator' + COLLECTOR = 'collector' + SYNTHESIZER = 'synthesizer' + + +def get_worker_key(worker_type : WorkerTypeEnum, worker_name : str) -> str: + return '{:s}-{:s}'.format(worker_type.value, worker_name) + + +class _Worker(threading.Thread): + def __init__( + self, worker_type : WorkerTypeEnum, worker_name : str, + terminate : Optional[threading.Event] = None + ) -> None: + self._worker_type = worker_type + self._worker_name = worker_name + self._worker_key = get_worker_key(worker_type, worker_name) + name = 'TelemetryWorker({:s})'.format(self._worker_name) + super().__init__(name=name, daemon=True) + self._logger = logging.getLogger(name) + self._stop_event = threading.Event() + self._terminate = threading.Event() if terminate is None else terminate + + @property + def worker_type(self) -> WorkerTypeEnum: return self._worker_type + + @property + def worker_name(self) -> str: return self._worker_name + + @property + def worker_key(self) -> str: return self._worker_key + + def stop(self) -> None: + self._logger.info('[stop] Stopping...') + self._stop_event.set() + self.join() diff --git a/src/simap_connector/service/telemetry/worker/__init__.py b/src/simap_connector/service/telemetry/worker/__init__.py new file mode 100644 index 000000000..7363515f0 --- /dev/null +++ b/src/simap_connector/service/telemetry/worker/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/simap_connector/service/telemetry/worker/data/AggregationCache.py b/src/simap_connector/service/telemetry/worker/data/AggregationCache.py new file mode 100644 index 000000000..a5f811818 --- /dev/null +++ b/src/simap_connector/service/telemetry/worker/data/AggregationCache.py @@ -0,0 +1,61 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import threading +from dataclasses import dataclass, field +from datetime import datetime +from typing import Dict, Set, Tuple + + +@dataclass +class LinkSample: + subscription_id : int + bandwidth_utilization : float + latency : float + related_service_ids : Set[str] = field(default_factory=set) + + +@dataclass +class AggregatedLinkSample: + timestamp : datetime + bandwidth_utilization : float = field(default=0.0) + latency : float = field(default=0.0) + related_service_ids : Set[str] = field(default_factory=set) + + +class AggregationCache: + def __init__(self) -> None: + self._lock = threading.Lock() + self._samples : Dict[Tuple[str, str], LinkSample] = dict() + + + def update(self, link_sample : LinkSample) -> None: + link_key = (link_sample.domain_name, link_sample.link_name) + with self._lock: + self._samples[link_key] = link_sample + + + def aggregate(self) -> AggregatedLinkSample: + with self._lock: + agg = AggregatedLinkSample(timestamp=datetime.utcnow()) + for sample in self._samples.values(): + agg.bandwidth_utilization = max( + agg.bandwidth_utilization, sample.bandwidth_utilization + ) + agg.latency = agg.latency + sample.latency + agg.related_service_ids = agg.related_service_ids.union( + sample.related_service_ids + ) + return agg diff --git a/src/simap_connector/service/telemetry/Resources.py b/src/simap_connector/service/telemetry/worker/data/Resources.py similarity index 96% rename from src/simap_connector/service/telemetry/Resources.py rename to src/simap_connector/service/telemetry/worker/data/Resources.py index 4e81cc4fe..49c16c340 100644 --- a/src/simap_connector/service/telemetry/Resources.py +++ b/src/simap_connector/service/telemetry/worker/data/Resources.py @@ -16,7 +16,7 @@ from dataclasses import dataclass, field from typing import List from simap_connector.service.simap_updater.SimapClient import SimapClient -from simap_connector.service.telemetry.SyntheticSamplers import SyntheticSampler +from .SyntheticSamplers import SyntheticSampler @dataclass @@ -33,6 +33,7 @@ class ResourceNode: cpu_utilization.value, related_service_ids=self.related_service_ids ) + @dataclass class ResourceLink: domain_name : str diff --git a/src/simap_connector/service/telemetry/worker/data/Sample.py b/src/simap_connector/service/telemetry/worker/data/Sample.py new file mode 100644 index 000000000..f9bfb0614 --- /dev/null +++ b/src/simap_connector/service/telemetry/worker/data/Sample.py @@ -0,0 +1,23 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from dataclasses import dataclass + + +@dataclass +class Sample: + timestamp : float + subscription_id : int + value : float diff --git a/src/simap_connector/service/telemetry/SyntheticSamplers.py b/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py similarity index 96% rename from src/simap_connector/service/telemetry/SyntheticSamplers.py rename to src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py index c80d03694..2f33b32d1 100644 --- a/src/simap_connector/service/telemetry/SyntheticSamplers.py +++ b/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py @@ -17,12 +17,7 @@ import math, random, threading from dataclasses import dataclass from datetime import datetime from typing import Dict - - -@dataclass -class Sample: - timestamp : float - value : float +from .Sample import Sample @dataclass @@ -54,7 +49,7 @@ class SyntheticSampler: noise = self.amplitude * random.random() value = abs((1.0 - self.noise_ratio) * waveform + self.noise_ratio * noise) - return Sample(timestamp, value) + return Sample(timestamp, 0, value) class SyntheticSamplers: diff --git a/src/simap_connector/service/telemetry/worker/data/__init__.py b/src/simap_connector/service/telemetry/worker/data/__init__.py new file mode 100644 index 000000000..7363515f0 --- /dev/null +++ b/src/simap_connector/service/telemetry/worker/data/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. -- GitLab From ce595b49bc27d6810152dfd757fb32b1186dcc6a Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 22 Sep 2025 12:01:14 +0000 Subject: [PATCH 290/367] ECOC F5GA Telemetry Demo: - Updated subscribe telemetry script to use long-term GET requests --- .../subscribe_telemetry_slice1.py | 43 ++++++++++--------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/src/tests/ecoc25-f5ga-telemetry/subscribe_telemetry_slice1.py b/src/tests/ecoc25-f5ga-telemetry/subscribe_telemetry_slice1.py index b4f87873e..86ee09dab 100644 --- a/src/tests/ecoc25-f5ga-telemetry/subscribe_telemetry_slice1.py +++ b/src/tests/ecoc25-f5ga-telemetry/subscribe_telemetry_slice1.py @@ -13,10 +13,11 @@ # limitations under the License. -import requests, websocket +import requests +from requests.auth import HTTPBasicAuth -RESTCONF_ADDRESS = '0.0.0.0' +RESTCONF_ADDRESS = '127.0.0.1' RESTCONF_PORT = 80 TARGET_SIMAP_NAME = 'e2e' TARGET_LINK_NAME = 'E2E-L1' @@ -37,23 +38,24 @@ REQUEST = { } -def on_open(ws): - print('### Opened stream ###') - -def on_message(ws, message): - print(message) - -def on_error(ws, error): - print(error) - -def on_close(ws, close_status_code, close_msg): - print('### Closed stream ###') - def main() -> None: print('[E2E] Subscribe Telemetry slice1...') - reply = requests.get(SUBSCRIBE_URL, json=REQUEST, allow_redirects=True) - assert reply.is_json - reply_data = reply.json() + headers = {'accept': 'application/json'} + auth = HTTPBasicAuth('admin', 'admin') + print(SUBSCRIBE_URL) + print(REQUEST) + reply = requests.post( + SUBSCRIBE_URL, headers=headers, json=REQUEST, auth=auth, + verify=False, allow_redirects=True, timeout=30 + ) + content_type = reply.headers.get('Content-Type', '') + if 'application/json' not in content_type: + raise Exception('Not JSON:', reply.content.decode('UTF-8')) + try: + reply_data = reply.json() + except ValueError as e: + str_error = 'Invalid JSON: {:s}'.format(str(reply.content.decode('UTF-8'))) + raise Exception(str_error) from e if 'uri' not in reply_data: raise Exception('Unexpected Reply: {:s}'.format(str(reply_data))) @@ -62,10 +64,9 @@ def main() -> None: stream_url = 'http://{:s}:{:d}{:s}'.format(RESTCONF_ADDRESS, RESTCONF_PORT, subscription_uri) print('Opening stream "{:s}" (press Ctrl+C to stop)...'.format(stream_url)) - ws = websocket.WebSocketApp( - stream_url, on_open=on_open, on_message=on_message, on_error=on_error, on_close=on_close - ) - ws.run_forever() + with requests.get(stream_url, stream=True) as resp: + for line in resp.iter_lines(decode_unicode=True): + print(line) if __name__ == '__main__': main() -- GitLab From 842e66842c62d07e75c72c0749301abf01f8b5b4 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 22 Sep 2025 13:05:36 +0000 Subject: [PATCH 291/367] Simap Connector: - Fix imports --- src/simap_connector/service/telemetry/TelemetryPool.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/simap_connector/service/telemetry/TelemetryPool.py b/src/simap_connector/service/telemetry/TelemetryPool.py index 50c2366ad..712177236 100644 --- a/src/simap_connector/service/telemetry/TelemetryPool.py +++ b/src/simap_connector/service/telemetry/TelemetryPool.py @@ -14,11 +14,10 @@ import logging, threading -from queue import Queue from typing import Dict, Optional from simap_connector.service.simap_updater.SimapClient import SimapClient from .worker.data.Resources import Resources -from .worker.data.Sample import Sample +from .worker.data.AggregationCache import AggregationCache from .worker._Worker import _Worker, WorkerTypeEnum, get_worker_key from .worker.AggregatorWorker import AggregatorWorker from .worker.CollectorWorker import CollectorWorker -- GitLab From 06d5947d0a98271661e305efdf83ae56b144cb87 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 22 Sep 2025 13:09:06 +0000 Subject: [PATCH 292/367] Simap Connector: - Fix Dockerfile --- src/simap_connector/Dockerfile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/simap_connector/Dockerfile b/src/simap_connector/Dockerfile index faaaf9947..35dd3abfb 100644 --- a/src/simap_connector/Dockerfile +++ b/src/simap_connector/Dockerfile @@ -54,6 +54,8 @@ RUN python3 -m pip install -r requirements.txt WORKDIR /var/teraflow COPY src/context/__init__.py context/__init__.py COPY src/context/client/. context/client/ +COPY src/device/__init__.py device/__init__.py +COPY src/device/client/. device/client/ COPY src/simap_connector/. simap_connector/ # Start the service -- GitLab From 39ffb3da081eab6478f373e89b2cc9ff848d5aaf Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 22 Sep 2025 13:14:56 +0000 Subject: [PATCH 293/367] Simap Connector: - Fix parameters for SimapConnectorService --- src/simap_connector/service/__main__.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/simap_connector/service/__main__.py b/src/simap_connector/service/__main__.py index 8fdc788c6..e52768f8e 100644 --- a/src/simap_connector/service/__main__.py +++ b/src/simap_connector/service/__main__.py @@ -81,12 +81,12 @@ def main(): password=SIMAP_SERVER_PASSWORD, ) - # Starting service - grpc_service = SimapConnectorService(db_engine, restconf_client) - grpc_service.start() - simap_client = SimapClient(restconf_client) telemetry_pool = TelemetryPool(simap_client, terminate=TERMINATE) + + grpc_service = SimapConnectorService(db_engine, restconf_client, telemetry_pool) + grpc_service.start() + simap_updater = SimapUpdater(simap_client, telemetry_pool, TERMINATE) simap_updater.start() -- GitLab From 6098db5052b0aab90176aecaaf9efcec84550e9c Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 22 Sep 2025 13:22:58 +0000 Subject: [PATCH 294/367] Simap Connector: - Set debug level --- manifests/simap_connectorservice.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/manifests/simap_connectorservice.yaml b/manifests/simap_connectorservice.yaml index a061e1f7f..90d55409c 100644 --- a/manifests/simap_connectorservice.yaml +++ b/manifests/simap_connectorservice.yaml @@ -36,7 +36,7 @@ spec: - containerPort: 9192 env: - name: LOG_LEVEL - value: "INFO" + value: "DEBUG" - name: SIMAP_SERVER_SCHEME value: "http" - name: SIMAP_SERVER_ADDRESS -- GitLab From 08244e95ddf7002005edb6db6784d7c9421b173a Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 22 Sep 2025 13:24:02 +0000 Subject: [PATCH 295/367] NBI component: - Increased workers to 8 --- src/nbi/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/nbi/Dockerfile b/src/nbi/Dockerfile index f4957f34a..05e6f861a 100644 --- a/src/nbi/Dockerfile +++ b/src/nbi/Dockerfile @@ -94,4 +94,4 @@ COPY src/tests/tools/mock_osm/. tests/tools/mock_osm/ # Start the service # NOTE: Configured single worker to prevent issues with multi-worker synchronization. To be invetsigated. -ENTRYPOINT ["gunicorn", "--workers", "1", "--worker-class", "eventlet", "--bind", "0.0.0.0:8080", "nbi.service.app:app"] +ENTRYPOINT ["gunicorn", "--workers", "8", "--worker-class", "eventlet", "--bind", "0.0.0.0:8080", "nbi.service.app:app"] -- GitLab From 1dca476dd9c0073b9c84421e168f01f6dc83a64e Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 22 Sep 2025 13:31:02 +0000 Subject: [PATCH 296/367] Simap Connector: - Fix logging message format --- src/simap_connector/service/telemetry/worker/_Worker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/simap_connector/service/telemetry/worker/_Worker.py b/src/simap_connector/service/telemetry/worker/_Worker.py index caec6b9fa..ae0da4fc7 100644 --- a/src/simap_connector/service/telemetry/worker/_Worker.py +++ b/src/simap_connector/service/telemetry/worker/_Worker.py @@ -36,7 +36,7 @@ class _Worker(threading.Thread): self._worker_type = worker_type self._worker_name = worker_name self._worker_key = get_worker_key(worker_type, worker_name) - name = 'TelemetryWorker({:s})'.format(self._worker_name) + name = 'TelemetryWorker({:s})'.format(self._worker_key) super().__init__(name=name, daemon=True) self._logger = logging.getLogger(name) self._stop_event = threading.Event() -- GitLab From 6612e410f3bc2f3c616182dc751fa4502609af5a Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 22 Sep 2025 13:50:30 +0000 Subject: [PATCH 297/367] Simap Connector: - Added min/max values for synthetic sample generators --- .../service/simap_updater/SimapUpdater.py | 3 ++ .../worker/data/SyntheticSamplers.py | 28 ++++++++++++------- 2 files changed, 21 insertions(+), 10 deletions(-) diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 112108c8a..014ea29ca 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -346,6 +346,8 @@ class EventDispatcher(BaseEventDispatcher): period_scale = 86_400, offset_scale = 50, noise_ratio = 0.05, + min_value = 0.0, + max_value = 100.0, ), latency_sampler=SyntheticSampler.create_random( amplitude_scale = 0.5, @@ -353,6 +355,7 @@ class EventDispatcher(BaseEventDispatcher): period_scale = 60.0, offset_scale = 10.0, noise_ratio = 0.05, + min_value = 0.0, ), related_service_ids=[], )) diff --git a/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py b/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py index 2f33b32d1..b94297128 100644 --- a/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py +++ b/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py @@ -13,31 +13,36 @@ # limitations under the License. -import math, random, threading -from dataclasses import dataclass +import math, random, sys, threading +from dataclasses import dataclass, field from datetime import datetime -from typing import Dict +from typing import Dict, Optional from .Sample import Sample @dataclass class SyntheticSampler: - amplitude : float = 0.0 - phase : float = 0.0 - period : float = 1.0 - offset : float = 0.0 - noise_ratio : float = 0.0 + amplitude : float = field(default=0.0) + phase : float = field(default=0.0) + period : float = field(default=1.0) + offset : float = field(default=0.0) + noise_ratio : float = field(default=0.0) + min_value : float = field(default=-sys.float_info.max) + max_value : float = field(default=sys.float_info.max) @classmethod def create_random( cls, amplitude_scale : float, phase_scale : float, period_scale : float, - offset_scale : float, noise_ratio : float + offset_scale : float, noise_ratio : float, + min_value : Optional[float] = None, max_value : Optional[float] = None ) -> 'SyntheticSampler': amplitude = amplitude_scale * random.random() phase = phase_scale * random.random() period = period_scale * random.random() offset = offset_scale * random.random() + amplitude - return cls(amplitude, phase, period, offset, noise_ratio) + if min_value is None: min_value = -sys.float_info.max + if max_value is None: max_value = sys.float_info.max + return cls(amplitude, phase, period, offset, noise_ratio, min_value, max_value) def get_sample(self) -> Sample: timestamp = datetime.timestamp(datetime.utcnow()) @@ -49,6 +54,9 @@ class SyntheticSampler: noise = self.amplitude * random.random() value = abs((1.0 - self.noise_ratio) * waveform + self.noise_ratio * noise) + value = max(value, self.min_value) + value = min(value, self.max_value) + return Sample(timestamp, 0, value) -- GitLab From b418698c1d957b6ff3f3acd1fc55ca3e289883f2 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 22 Sep 2025 13:50:59 +0000 Subject: [PATCH 298/367] Common - Tools - RestConf - Client: - Corrected RestConf version forcing --- src/common/tools/rest_conf/client/RestConfClient.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/common/tools/rest_conf/client/RestConfClient.py b/src/common/tools/rest_conf/client/RestConfClient.py index a44200c0f..088bb4ae1 100644 --- a/src/common/tools/rest_conf/client/RestConfClient.py +++ b/src/common/tools/rest_conf/client/RestConfClient.py @@ -33,10 +33,8 @@ class RestConfClient(RestApiClient): timeout=timeout, verify_certs=verify_certs, allow_redirects=allow_redirects, logger=logger ) - + self._restconf_version = restconf_version self._discover_base_url() - if restconf_version is not None: - self._base_url += '/{:s}'.format(restconf_version) def _discover_base_url(self) -> None: @@ -61,6 +59,8 @@ class RestConfClient(RestApiClient): if not isinstance(href, str): raise AttributeError('Attribute "links[0].href" must be a str') self._base_url = str(href).replace('//', '/') + if self._restconf_version is not None: + self._base_url += '/{:s}'.format(self._restconf_version) def get( -- GitLab From d3a7b3f9c5feb0586b6f789740fa4341ae18a98e Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 22 Sep 2025 14:16:02 +0000 Subject: [PATCH 299/367] Test - Tools - Mock NCE-T /NCE-FAN Ctrl - Updated SimapUpdater to always update instead of create --- .../tools/mock_nce_fan_ctrl/nce_fan_ctrl/SimapUpdater.py | 4 ++-- src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/SimapUpdater.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/SimapUpdater.py b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/SimapUpdater.py index cce0179c2..8cc114433 100644 --- a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/SimapUpdater.py +++ b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/SimapUpdater.py @@ -51,7 +51,7 @@ class SimapUpdater: tp['tp-id'] for tp in node['ietf-network-topology:termination-point'] ] - te_topo.node(node_id).create(termination_point_ids=tp_ids) + te_topo.node(node_id).update(termination_point_ids=tp_ids) links = network_data.get('ietf-network-topology:link', list()) for link in links: @@ -63,6 +63,6 @@ class SimapUpdater: link_dst_node_id = link_dst['dest-node'] link_dst_tp_id = link_dst['dest-tp'] - te_topo.link(link_id).create( + te_topo.link(link_id).update( link_src_node_id, link_src_tp_id, link_dst_node_id, link_dst_tp_id ) diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/SimapUpdater.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/SimapUpdater.py index 9f8e312c2..cc792313b 100644 --- a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/SimapUpdater.py +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/SimapUpdater.py @@ -51,7 +51,7 @@ class SimapUpdater: tp['tp-id'] for tp in node['ietf-network-topology:termination-point'] ] - te_topo.node(node_id).create(termination_point_ids=tp_ids) + te_topo.node(node_id).update(termination_point_ids=tp_ids) links = network_data.get('ietf-network-topology:link', list()) for link in links: @@ -63,7 +63,7 @@ class SimapUpdater: link_dst_node_id = link_dst['dest-node'] link_dst_tp_id = link_dst['dest-tp'] - te_topo.link(link_id).create( + te_topo.link(link_id).update( link_src_node_id, link_src_tp_id, link_dst_node_id, link_dst_tp_id ) -- GitLab From 4d623809608bf96a9b6029b3f3ac9d3a2b006e1a Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 22 Sep 2025 14:57:40 +0000 Subject: [PATCH 300/367] Service component - L3NM NCE-FAN Service Handler: - Fixed generation of delete config rules --- .../service_handlers/l3nm_ncefan/ConfigRules.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/service/service/service_handlers/l3nm_ncefan/ConfigRules.py b/src/service/service/service_handlers/l3nm_ncefan/ConfigRules.py index ad44a3e8e..592f2e53c 100644 --- a/src/service/service/service_handlers/l3nm_ncefan/ConfigRules.py +++ b/src/service/service/service_handlers/l3nm_ncefan/ConfigRules.py @@ -109,13 +109,13 @@ def setup_config_rules(service_name: str, json_settings: Dict) -> List[Dict]: def teardown_config_rules(service_name: str, json_settings: Dict) -> List[Dict]: app_flow_id : str = json_settings["app_flow_id"] - application_name : str = f"App_Flow_{app_flow_id}" + app_flow_app_name: str = f"App_Flow_{app_flow_id}" app_flow_name : str = f"App_Flow_{app_flow_id}" - qos_profile_name : str = json_settings.get("app_flow_qos_profile", "AR_VR_Gaming") + qos_profile_name : str = f"AR_VR_Gaming_{app_flow_id}" - app_flow = {"name": app_flow_name } - qos_profile = {"name": qos_profile_name} - application = {"name": application_name} + app_flow = {"name": app_flow_name } + qos_profile = {"name": qos_profile_name } + application = {"name": app_flow_app_name} app_flow_datamodel = { "huawei-nce-app-flow:app-flows": { -- GitLab From 06094d3d1acd0b29c17c5249d800ad37fcea73d6 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 22 Sep 2025 14:58:21 +0000 Subject: [PATCH 301/367] Simap Connector: - Fixed discovery of supporting links - Fixed stop of synthesizer workers --- src/simap_connector/service/Tools.py | 2 +- src/simap_connector/service/simap_updater/SimapUpdater.py | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/simap_connector/service/Tools.py b/src/simap_connector/service/Tools.py index e4ce34fe0..071af59c1 100644 --- a/src/simap_connector/service/Tools.py +++ b/src/simap_connector/service/Tools.py @@ -45,7 +45,7 @@ class SupportingLink: def discover_supporting_links(restconf_client : RestConfClient, xpath_filter : str) -> List[SupportingLink]: xpath_filter_2 = xpath_filter.replace('/simap-telemetry:simap-telemetry', '') - xpath_filter_2 = xpath_filter.replace('/simap-telemetry', '') + xpath_filter_2 = xpath_filter_2.replace('/simap-telemetry', '') xpath_data = restconf_client.get(xpath_filter_2) if not xpath_data: MSG = 'Resource({:s} => {:s}) not found in SIMAP Server' diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 014ea29ca..c1da22751 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -26,6 +26,7 @@ from common.tools.grpc.Tools import grpc_message_to_json_string from context.client.ContextClient import ContextClient from simap_connector.service.simap_updater.MockSimaps import delete_mock_simap, set_mock_simap from simap_connector.service.telemetry.worker.data.Resources import ResourceLink, Resources, SyntheticSampler +from simap_connector.service.telemetry.worker._Worker import WorkerTypeEnum from simap_connector.service.telemetry.TelemetryPool import TelemetryPool from .ObjectCache import CachedEntities, ObjectCache from .SimapClient import SimapClient @@ -428,7 +429,7 @@ class EventDispatcher(BaseEventDispatcher): self._object_cache.delete(CachedEntities.LINK, link_name) worker_name = '{:s}:{:s}'.format(topology_name, link_name) - self._telemetry_pool.stop_synthesizer(worker_name) + self._telemetry_pool.stop_worker(WorkerTypeEnum.SYNTHESIZER, worker_name) MSG = 'Link Removed: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(link_event))) @@ -630,7 +631,7 @@ class EventDispatcher(BaseEventDispatcher): #self._object_cache.delete(CachedEntities.SERVICE, service_uuid) #self._object_cache.delete(CachedEntities.SERVICE, service_name) - self._telemetry_pool.stop_synthesizer(domain_name) + self._telemetry_pool.stop_worker(WorkerTypeEnum.SYNTHESIZER, domain_name) MSG = 'Logical Link Removed for Service: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(service_event))) -- GitLab From 336a6bc5b5b9d6689658912014ca032dbb730033 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 22 Sep 2025 14:58:47 +0000 Subject: [PATCH 302/367] Test - Tools - Mock NCE-T Ctrl: - Fixed parsing of requests --- .../nce_t_ctrl/ResourceOsuTunnels.py | 32 +++++++++++-------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/ResourceOsuTunnels.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/ResourceOsuTunnels.py index 7fe14208f..af5a00b6e 100644 --- a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/ResourceOsuTunnels.py +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/ResourceOsuTunnels.py @@ -37,13 +37,15 @@ class OsuTunnels(Resource): json_request = request.get_json() if not json_request: abort(400) if not isinstance(json_request, dict): abort(400) - if 'ietf-te:te' not in json_request: abort(400) - te_data = json_request['ietf-te:te'] - if not isinstance(te_data, dict): abort(400) - if 'tunnels' not in te_data: abort(400) - te_tunnels = te_data['tunnels'] - if 'tunnel' not in te_tunnels: abort(400) - osu_tunnels = te_tunnels['tunnel'] + #if 'ietf-te:te' not in json_request: abort(400) + #te_data = json_request['ietf-te:te'] + #if not isinstance(te_data, dict): abort(400) + #if 'tunnels' not in te_data: abort(400) + #te_tunnels = te_data['tunnels'] + #if 'tunnel' not in te_tunnels: abort(400) + #osu_tunnels = te_tunnels['tunnel'] + if 'ietf-te:tunnel' not in json_request: abort(400) + osu_tunnels = json_request['ietf-te:tunnel'] if not isinstance(osu_tunnels, list): abort(400) if len(osu_tunnels) != 1: abort(400) osu_tunnel = osu_tunnels[0] @@ -65,13 +67,15 @@ class OsuTunnel(Resource): json_request = request.get_json() if not json_request: abort(400) if not isinstance(json_request, dict): abort(400) - if 'ietf-te:te' not in json_request: abort(400) - te_data = json_request['ietf-te:te'] - if not isinstance(te_data, dict): abort(400) - if 'tunnels' not in te_data: abort(400) - te_tunnels = te_data['tunnels'] - if 'tunnel' not in te_tunnels: abort(400) - osu_tunnels = te_tunnels['tunnel'] + #if 'ietf-te:te' not in json_request: abort(400) + #te_data = json_request['ietf-te:te'] + #if not isinstance(te_data, dict): abort(400) + #if 'tunnels' not in te_data: abort(400) + #te_tunnels = te_data['tunnels'] + #if 'tunnel' not in te_tunnels: abort(400) + #osu_tunnels = te_tunnels['tunnel'] + if 'ietf-te:tunnel' not in json_request: abort(400) + osu_tunnels = json_request['ietf-te:tunnel'] if not isinstance(osu_tunnels, list): abort(400) if len(osu_tunnels) != 1: abort(400) osu_tunnel = osu_tunnels[0] -- GitLab From 797811901e7562359be1b15bf03ce99c0868a5ee Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 22 Sep 2025 15:22:28 +0000 Subject: [PATCH 303/367] Simap Connector: - Fixed composition of name for aggregator worker --- .../service/SimapConnectorServiceServicerImpl.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/simap_connector/service/SimapConnectorServiceServicerImpl.py b/src/simap_connector/service/SimapConnectorServiceServicerImpl.py index 7ee0d0a87..4ee89a4a8 100644 --- a/src/simap_connector/service/SimapConnectorServiceServicerImpl.py +++ b/src/simap_connector/service/SimapConnectorServiceServicerImpl.py @@ -102,9 +102,7 @@ class SimapConnectorServiceServicerImpl(SimapConnectorServiceServicer): topic = 'subscription.{:d}'.format(parent_subscription_id) create_kafka_topic(topic) - aggregator_name = '{:s}:{:s}'.format( - controller_id, str(parent_subscription_id) - ) + aggregator_name = str(parent_subscription_id) self._telemetry_pool.start_aggregator( aggregator_name, parent_subscription_id, aggregation_cache, topic, period ) -- GitLab From 5e4f3e0b746c159999530ace9f8807fd24ddf3d5 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 22 Sep 2025 15:55:46 +0000 Subject: [PATCH 304/367] Device component IETF L3VPN/Slice/NCE Driver: - Fixed data types of requests/replies --- src/device/service/DeviceServiceServicerImpl.py | 4 ++-- src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py | 2 +- src/device/service/drivers/ietf_slice/IetfSliceDriver.py | 2 +- src/device/service/drivers/nce/NCEDriver.py | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/device/service/DeviceServiceServicerImpl.py b/src/device/service/DeviceServiceServicerImpl.py index d599a5467..38ede445f 100644 --- a/src/device/service/DeviceServiceServicerImpl.py +++ b/src/device/service/DeviceServiceServicerImpl.py @@ -425,10 +425,10 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): 'SSETelemetrySubscribe', extra_details='Driver returned an unexpected number of responses: {:d}'.format(len(r)) ) sub_conf: dict = r[0] - return SSEMonitoringSubscriptionResponse(identifier=sub_conf['identifier'], uri=sub_conf['uri']) + return SSEMonitoringSubscriptionResponse(identifier=str(sub_conf['identifier']), uri=sub_conf['uri']) if config_type == SSEMonitoringSubscriptionConfig.Unsubscribe: - r = driver.UnsubscribeState([(request.identifier, 0, 0)]) + r = driver.UnsubscribeState([(str(request.identifier), 0, 0)]) if len(r) != 1: raise OperationFailedException( 'SSETelemetrySubscribe', extra_details='Driver returned an unexpected number of responses: {:d}'.format(len(r)) diff --git a/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py b/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py index 835b186a5..454ac42d2 100644 --- a/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py +++ b/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py @@ -266,7 +266,7 @@ class IetfL3VpnDriver(_Driver): identifier = s[0] s_data : UnsubscribedNotificationsSchema = { 'delete-subscription': { - 'identifier': identifier, + 'identifier': int(identifier), } } self._handler_subscription.unsubscribe(s_data) diff --git a/src/device/service/drivers/ietf_slice/IetfSliceDriver.py b/src/device/service/drivers/ietf_slice/IetfSliceDriver.py index 0ad01846d..345d202e9 100644 --- a/src/device/service/drivers/ietf_slice/IetfSliceDriver.py +++ b/src/device/service/drivers/ietf_slice/IetfSliceDriver.py @@ -227,7 +227,7 @@ class IetfSliceDriver(_Driver): identifier = s[0] s_data : UnsubscribedNotificationsSchema = { 'delete-subscription': { - 'identifier': identifier, + 'identifier': int(identifier), } } self._handler_subscription.unsubscribe(s_data) diff --git a/src/device/service/drivers/nce/NCEDriver.py b/src/device/service/drivers/nce/NCEDriver.py index e154abffb..67dc791a2 100644 --- a/src/device/service/drivers/nce/NCEDriver.py +++ b/src/device/service/drivers/nce/NCEDriver.py @@ -245,7 +245,7 @@ class NCEDriver(_Driver): identifier = s[0] s_data : UnsubscribedNotificationsSchema = { 'delete-subscription': { - 'identifier': identifier, + 'identifier': int(identifier), } } self._handler_subscription.unsubscribe(s_data) -- GitLab From 5768de7212954178caf07a10c8fd1c8fff8fa60a Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 22 Sep 2025 16:15:12 +0000 Subject: [PATCH 305/367] Simap Connector: - Fixed insert of sub-subscriptions --- src/simap_connector/service/database/SubSubscription.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/simap_connector/service/database/SubSubscription.py b/src/simap_connector/service/database/SubSubscription.py index 340fefbc2..5b708562f 100644 --- a/src/simap_connector/service/database/SubSubscription.py +++ b/src/simap_connector/service/database/SubSubscription.py @@ -75,7 +75,10 @@ def sub_subscription_set( def callback(session : Session) -> Tuple[bool, str]: stmt = insert(SubSubscriptionModel).values([sub_subscription_data]) stmt = stmt.on_conflict_do_update( - index_elements=[SubSubscriptionModel.subscription_uuid], + index_elements=[ + SubSubscriptionModel.parent, + SubSubscriptionModel.sub_subscription_uuid, + ], set_=dict( controller_uuid = stmt.excluded.controller_uuid, datastore = stmt.excluded.datastore, -- GitLab From 78440a4208da024a0398a0843fb0722325828333 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 22 Sep 2025 16:39:38 +0000 Subject: [PATCH 306/367] Simap Connector: - Disabled read timeout in collector worker - Fixed wrong symbol parsing reply --- .../service/telemetry/worker/CollectorWorker.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/simap_connector/service/telemetry/worker/CollectorWorker.py b/src/simap_connector/service/telemetry/worker/CollectorWorker.py index 6bbb4b47e..5e0c1a453 100644 --- a/src/simap_connector/service/telemetry/worker/CollectorWorker.py +++ b/src/simap_connector/service/telemetry/worker/CollectorWorker.py @@ -57,7 +57,8 @@ class CollectorWorker(_Worker): try: # NOTE: Trick: we set 1-second read_timeout to force the loop to give control # back and be able to check termination events. - with session.get(stream_url, stream=True, timeout=(10, 1)) as reply: + # , timeout=(10, 1) + with session.get(stream_url, stream=True) as reply: reply.raise_for_status() it_lines = reply.iter_lines(decode_unicode=True, chunk_size=1024) @@ -75,7 +76,7 @@ class CollectorWorker(_Worker): self._logger.info('[run] ==> {:s}'.format(str(line))) if not line.startswith('data:'): continue - data = json.loads(data[5:]) + data = json.loads(line[5:]) if 'notification' not in data: MSG = 'Field(notification) missing: {:s}' -- GitLab From 006ff10d647084c139cb866ab77aad37ee3b61c8 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 22 Sep 2025 16:41:08 +0000 Subject: [PATCH 307/367] Simap Connector: - Corrected update_counter in aggregator worker --- .../service/telemetry/worker/AggregatorWorker.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/simap_connector/service/telemetry/worker/AggregatorWorker.py b/src/simap_connector/service/telemetry/worker/AggregatorWorker.py index acc978001..7ac60adb2 100644 --- a/src/simap_connector/service/telemetry/worker/AggregatorWorker.py +++ b/src/simap_connector/service/telemetry/worker/AggregatorWorker.py @@ -100,6 +100,8 @@ class AggregatorWorker(_Worker): ) kafka_producer.flush() + update_counter += 1 + # Make wait responsible to terminations iterations = int(math.ceil(self._sampling_interval / WAIT_LOOP_GRANULARITY)) for _ in range(iterations): -- GitLab From 46c1cd4edc9a7dfaaab5bd7fe15ef034be85f759 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 22 Sep 2025 17:14:00 +0000 Subject: [PATCH 308/367] Simap Connector: - Corrected empty controller_uuid conditions - Fixed keys in AggregationCache - Enhanced Collector worker to support underlay controller streams and direct SIMAP polling --- .../SimapConnectorServiceServicerImpl.py | 39 ++-- .../service/database/SubSubscription.py | 1 + .../service/telemetry/TelemetryPool.py | 8 +- .../telemetry/worker/CollectorWorker.py | 193 +++++++++++------- .../telemetry/worker/data/AggregationCache.py | 5 +- 5 files changed, 152 insertions(+), 94 deletions(-) diff --git a/src/simap_connector/service/SimapConnectorServiceServicerImpl.py b/src/simap_connector/service/SimapConnectorServiceServicerImpl.py index 4ee89a4a8..31d17ce17 100644 --- a/src/simap_connector/service/SimapConnectorServiceServicerImpl.py +++ b/src/simap_connector/service/SimapConnectorServiceServicerImpl.py @@ -76,28 +76,35 @@ class SimapConnectorServiceServicerImpl(SimapConnectorServiceServicer): sup_link_xpath_filter = supporting_link.get_xpath_filter() sup_link_xpath_filters.append(sup_link_xpath_filter) - if controller_id is not None: + if controller_id is None: + collector_name = 'SIMAP:{:s}:{:s}'.format( + str(supporting_link.network_id), str(supporting_link.link_id) + ) + target_uri = sup_link_xpath_filter + underlay_subscription_id = 0 + else: underlay_sub_id = establish_underlay_subscription( device_client, controller_id, sup_link_xpath_filter, period ) - collector_name = '{:s}:{:s}'.format( controller_id, str(underlay_sub_id.subscription_id) ) - self._telemetry_pool.start_collector( - collector_name, underlay_sub_id.subscription_id, controller_id, - underlay_sub_id.subscription_uri, aggregation_cache, period - ) - - sub_request = Subscription() - sub_request.datastore = datastore - sub_request.xpath_filter = sup_link_xpath_filter - sub_request.period = period - sub_subscription_set( - self._db_engine, parent_subscription_uuid, controller_id, datastore, - sup_link_xpath_filter, period, underlay_sub_id.subscription_id, - underlay_sub_id.subscription_uri - ) + target_uri = underlay_sub_id.subscription_uri + underlay_subscription_id = underlay_sub_id.subscription_id + + self._telemetry_pool.start_collector( + collector_name, controller_id, supporting_link.network_id, + supporting_link.link_id, target_uri, aggregation_cache, period + ) + + sub_request = Subscription() + sub_request.datastore = datastore + sub_request.xpath_filter = sup_link_xpath_filter + sub_request.period = period + sub_subscription_set( + self._db_engine, parent_subscription_uuid, controller_id, datastore, + sup_link_xpath_filter, period, underlay_subscription_id, target_uri + ) topic = 'subscription.{:d}'.format(parent_subscription_id) create_kafka_topic(topic) diff --git a/src/simap_connector/service/database/SubSubscription.py b/src/simap_connector/service/database/SubSubscription.py index 5b708562f..ef4160105 100644 --- a/src/simap_connector/service/database/SubSubscription.py +++ b/src/simap_connector/service/database/SubSubscription.py @@ -60,6 +60,7 @@ def sub_subscription_set( xpath_filter : str, period : float, sub_subscription_id : int, sub_subscription_uri : str ) -> str: now = datetime.datetime.now(datetime.timezone.utc) + if controller_uuid is None: controller_uuid = '' sub_subscription_data = { 'parent' : parent_subscription_uuid, 'controller_uuid' : controller_uuid, diff --git a/src/simap_connector/service/telemetry/TelemetryPool.py b/src/simap_connector/service/telemetry/TelemetryPool.py index 712177236..f88f9d35f 100644 --- a/src/simap_connector/service/telemetry/TelemetryPool.py +++ b/src/simap_connector/service/telemetry/TelemetryPool.py @@ -65,12 +65,12 @@ class TelemetryPool: def start_collector( - self, worker_name : str, subscription_id : int, controller_uuid : Optional[str], - subscription_uri : str, aggregation_cache : AggregationCache, sampling_interval : float + self, worker_name : str, controller_uuid : Optional[str], network_id : str, link_id : str, + target_uri : str, aggregation_cache : AggregationCache, sampling_interval : float ) -> None: self._start_worker( - WorkerTypeEnum.COLLECTOR, worker_name, subscription_id, controller_uuid, - subscription_uri, aggregation_cache, sampling_interval + WorkerTypeEnum.COLLECTOR, worker_name, controller_uuid, network_id, link_id, + target_uri, aggregation_cache, sampling_interval ) diff --git a/src/simap_connector/service/telemetry/worker/CollectorWorker.py b/src/simap_connector/service/telemetry/worker/CollectorWorker.py index 5e0c1a453..48081b045 100644 --- a/src/simap_connector/service/telemetry/worker/CollectorWorker.py +++ b/src/simap_connector/service/telemetry/worker/CollectorWorker.py @@ -13,7 +13,7 @@ # limitations under the License. -import json, requests, threading +import json, math, requests, threading, time from requests.exceptions import ReadTimeout from typing import Optional from .data.AggregationCache import AggregationCache, LinkSample @@ -29,17 +29,21 @@ CONTROLLER_TO_ADDRESS_PORT = { 'SIMAP' : ('10.254.0.9', 80), } +WAIT_LOOP_GRANULARITY = 0.5 + class CollectorWorker(_Worker): def __init__( - self, worker_name : str, subscription_id : int, controller_uuid : Optional[str], - subscription_uri : str, aggregation_cache : AggregationCache, sampling_interval : float, + self, worker_name : str, controller_uuid : Optional[str], + network_id : str, link_id : str, target_uri : str, + aggregation_cache : AggregationCache, sampling_interval : float, terminate : Optional[threading.Event] = None ) -> None: super().__init__(WorkerTypeEnum.COLLECTOR, worker_name, terminate=terminate) - self._subscription_id = subscription_id self._controller_uuid = controller_uuid - self._subscription_uri = subscription_uri + self._network_id = network_id + self._link_id = link_id + self._target_uri = target_uri self._aggregation_cache = aggregation_cache self._sampling_interval = sampling_interval @@ -48,74 +52,119 @@ class CollectorWorker(_Worker): try: address_port = CONTROLLER_TO_ADDRESS_PORT.get(self._controller_uuid) - if address_port is None: address_port = CONTROLLER_TO_ADDRESS_PORT['SIMAP'] - address, port = address_port - stream_url = 'http://{:s}:{:d}{:s}'.format(address, port, self._subscription_uri) - self._logger.info('[run] Opening stream "{:s}"...'.format(str(stream_url))) - - session = requests.Session() - try: - # NOTE: Trick: we set 1-second read_timeout to force the loop to give control - # back and be able to check termination events. - # , timeout=(10, 1) - with session.get(stream_url, stream=True) as reply: - reply.raise_for_status() - - it_lines = reply.iter_lines(decode_unicode=True, chunk_size=1024) - - while not self._stop_event.is_set() and not self._terminate.is_set(): - try: - line = next(it_lines) # may block until read_timeout - except StopIteration: - break # server closed - except ReadTimeout: - continue # no data this tick; loop to check termination conditions - - if line is None: continue - if len(line) == 0: continue - - self._logger.info('[run] ==> {:s}'.format(str(line))) - if not line.startswith('data:'): continue - data = json.loads(line[5:]) - - if 'notification' not in data: - MSG = 'Field(notification) missing: {:s}' - raise Exception(MSG.format(str(data))) - notification = data['notification'] - - if 'push-update' not in notification: - MSG = 'Field(notification/push-update) missing: {:s}' - raise Exception(MSG.format(str(data))) - push_update = notification['push-update'] - - if 'datastore-contents' not in push_update: - MSG = 'Field(notification/push-update/datastore-contents) missing: {:s}' - raise Exception(MSG.format(str(data))) - datastore_contents = push_update['datastore-contents'] - - if 'simap-telemetry:simap-telemetry' not in datastore_contents: - MSG = ( - 'Field(notification/push-update/datastore-contents' - '/simap-telemetry:simap-telemetry) missing: {:s}' - ) - raise Exception(MSG.format(str(data))) - simap_telemetry = datastore_contents['simap-telemetry:simap-telemetry'] - - bandwidth_utilization = float(simap_telemetry['bandwidth-utilization']) - latency = float(simap_telemetry['latency']) - related_service_ids = simap_telemetry['related-service-ids'] - - link_sample = LinkSample( - subscription_id = self._subscription_id, - bandwidth_utilization = bandwidth_utilization, - latency = latency, - related_service_ids = related_service_ids, - ) - self._aggregation_cache.update(link_sample) - finally: - if session is not None: - session.close() + if address_port is None: + address, port = CONTROLLER_TO_ADDRESS_PORT['SIMAP'] + self.direct_simap_polling(address, port) + else: + address, port = address_port + self.underlay_subscription_stream(address, port) except Exception: self._logger.exception('[run] Unhandled Exception') finally: self._logger.info('[run] Terminated') + + def underlay_subscription_stream(self, address : str, port : int) -> None: + stream_url = 'http://{:s}:{:d}{:s}'.format(address, port, self._target_uri) + MSG = '[underlay_subscription_stream] Opening stream "{:s}"...' + self._logger.info(MSG.format(str(stream_url))) + + session = requests.Session() + try: + # NOTE: Trick: we set 1-second read_timeout to force the loop to give control + # back and be able to check termination events. + # , timeout=(10, 1) + with session.get(stream_url, stream=True) as reply: + reply.raise_for_status() + + it_lines = reply.iter_lines(decode_unicode=True, chunk_size=1024) + + while not self._stop_event.is_set() and not self._terminate.is_set(): + try: + line = next(it_lines) # may block until read_timeout + except StopIteration: + break # server closed + except ReadTimeout: + continue # no data this tick; loop to check termination conditions + + if line is None: continue + if len(line) == 0: continue + + MSG = '[underlay_subscription_stream] ==> {:s}' + self._logger.info(MSG.format(str(line))) + if not line.startswith('data:'): continue + data = json.loads(line[5:]) + + if 'notification' not in data: + MSG = 'Field(notification) missing: {:s}' + raise Exception(MSG.format(str(data))) + notification = data['notification'] + + if 'push-update' not in notification: + MSG = 'Field(notification/push-update) missing: {:s}' + raise Exception(MSG.format(str(data))) + push_update = notification['push-update'] + + if 'datastore-contents' not in push_update: + MSG = 'Field(notification/push-update/datastore-contents) missing: {:s}' + raise Exception(MSG.format(str(data))) + datastore_contents = push_update['datastore-contents'] + + if 'simap-telemetry:simap-telemetry' not in datastore_contents: + MSG = ( + 'Field(notification/push-update/datastore-contents' + '/simap-telemetry:simap-telemetry) missing: {:s}' + ) + raise Exception(MSG.format(str(data))) + simap_telemetry = datastore_contents['simap-telemetry:simap-telemetry'] + + bandwidth_utilization = float(simap_telemetry['bandwidth-utilization']) + latency = float(simap_telemetry['latency']) + related_service_ids = simap_telemetry['related-service-ids'] + + link_sample = LinkSample( + network_id = self._network_id, + link_id = self._link_id, + bandwidth_utilization = bandwidth_utilization, + latency = latency, + related_service_ids = related_service_ids, + ) + self._aggregation_cache.update(link_sample) + finally: + if session is not None: + session.close() + + def direct_simap_polling(self, address : str, port : int) -> None: + simap_url = 'http://{:s}:{:d}{:s}'.format(address, port, self._target_uri) + + while not self._stop_event.is_set() and not self._terminate.is_set(): + MSG = '[direct_simap_polling] Requesting "{:s}"...' + self._logger.info(MSG.format(str(simap_url))) + + with requests.get(simap_url, timeout=10) as reply: + reply.raise_for_status() + data = reply.json() + + if 'simap-telemetry:simap-telemetry' not in data: + MSG = 'Field(simap-telemetry:simap-telemetry) missing: {:s}' + raise Exception(MSG.format(str(data))) + simap_telemetry = data['simap-telemetry:simap-telemetry'] + + bandwidth_utilization = float(simap_telemetry['bandwidth-utilization']) + latency = float(simap_telemetry['latency']) + related_service_ids = simap_telemetry.get('related-service-ids', list()) + + link_sample = LinkSample( + network_id = self._network_id, + link_id = self._link_id, + bandwidth_utilization = bandwidth_utilization, + latency = latency, + related_service_ids = related_service_ids, + ) + self._aggregation_cache.update(link_sample) + + # Make wait responsible to terminations + iterations = int(math.ceil(self._sampling_interval / WAIT_LOOP_GRANULARITY)) + for _ in range(iterations): + if self._stop_event.is_set(): break + if self._terminate.is_set() : break + time.sleep(WAIT_LOOP_GRANULARITY) diff --git a/src/simap_connector/service/telemetry/worker/data/AggregationCache.py b/src/simap_connector/service/telemetry/worker/data/AggregationCache.py index a5f811818..31a71d096 100644 --- a/src/simap_connector/service/telemetry/worker/data/AggregationCache.py +++ b/src/simap_connector/service/telemetry/worker/data/AggregationCache.py @@ -21,7 +21,8 @@ from typing import Dict, Set, Tuple @dataclass class LinkSample: - subscription_id : int + network_id : str + link_id : str bandwidth_utilization : float latency : float related_service_ids : Set[str] = field(default_factory=set) @@ -42,7 +43,7 @@ class AggregationCache: def update(self, link_sample : LinkSample) -> None: - link_key = (link_sample.domain_name, link_sample.link_name) + link_key = (link_sample.network_id, link_sample.link_id) with self._lock: self._samples[link_key] = link_sample -- GitLab From 9a4a29b93d12c30fc780bf048849b119c30b3f6d Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 22 Sep 2025 17:25:07 +0000 Subject: [PATCH 309/367] Simap Connector: - Corrected simap/nce-fan/nce-t TCP port --- .../service/telemetry/worker/CollectorWorker.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/simap_connector/service/telemetry/worker/CollectorWorker.py b/src/simap_connector/service/telemetry/worker/CollectorWorker.py index 48081b045..923479be9 100644 --- a/src/simap_connector/service/telemetry/worker/CollectorWorker.py +++ b/src/simap_connector/service/telemetry/worker/CollectorWorker.py @@ -24,9 +24,9 @@ CONTROLLER_TO_ADDRESS_PORT = { 'TFS-E2E' : ('10.254.0.10', 80), 'TFS-AGG' : ('10.254.0.11', 80), 'TFS-IP' : ('10.254.0.12', 80), - 'NCE-T' : ('10.254.0.9', 80), - 'NCE-FAN' : ('10.254.0.9', 80), - 'SIMAP' : ('10.254.0.9', 80), + 'NCE-T' : ('10.254.0.9', 8082), + 'NCE-FAN' : ('10.254.0.9', 8081), + 'SIMAP' : ('10.254.0.9', 8080), } WAIT_LOOP_GRANULARITY = 0.5 -- GitLab From 395772392ffa9f1f0ce047186ae0864490b3dda2 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 22 Sep 2025 17:33:15 +0000 Subject: [PATCH 310/367] Simap Connector: - Corrected collector worker direct simap URL --- src/simap_connector/service/telemetry/worker/CollectorWorker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/simap_connector/service/telemetry/worker/CollectorWorker.py b/src/simap_connector/service/telemetry/worker/CollectorWorker.py index 923479be9..d9721bc85 100644 --- a/src/simap_connector/service/telemetry/worker/CollectorWorker.py +++ b/src/simap_connector/service/telemetry/worker/CollectorWorker.py @@ -134,7 +134,7 @@ class CollectorWorker(_Worker): session.close() def direct_simap_polling(self, address : str, port : int) -> None: - simap_url = 'http://{:s}:{:d}{:s}'.format(address, port, self._target_uri) + simap_url = 'http://{:s}:{:d}/restconf/data{:s}'.format(address, port, self._target_uri) while not self._stop_event.is_set() and not self._terminate.is_set(): MSG = '[direct_simap_polling] Requesting "{:s}"...' -- GitLab From 0c7d6ca291dfe74f72340be7982e98cd38097d87 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 23 Sep 2025 08:59:51 +0000 Subject: [PATCH 311/367] Simap Connector: - Added collector_name to sub-subscription database model - Deactivated synthesizers for abstract simaps --- .../SimapConnectorServiceServicerImpl.py | 32 ++++++++++++------- .../service/database/SubSubscription.py | 5 ++- .../database/models/SubSubscriptionModel.py | 2 ++ .../service/simap_updater/SimapUpdater.py | 8 ++--- 4 files changed, 30 insertions(+), 17 deletions(-) diff --git a/src/simap_connector/service/SimapConnectorServiceServicerImpl.py b/src/simap_connector/service/SimapConnectorServiceServicerImpl.py index 31d17ce17..4568adc74 100644 --- a/src/simap_connector/service/SimapConnectorServiceServicerImpl.py +++ b/src/simap_connector/service/SimapConnectorServiceServicerImpl.py @@ -21,6 +21,7 @@ from common.proto.simap_connector_pb2_grpc import SimapConnectorServiceServicer from common.tools.rest_conf.client.RestConfClient import RestConfClient from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method from device.client.DeviceClient import DeviceClient +from simap_connector.service.telemetry.worker._Worker import WorkerTypeEnum from .database.Subscription import subscription_get, subscription_set, subscription_delete from .database.SubSubscription import ( sub_subscription_list, sub_subscription_set, sub_subscription_delete @@ -77,8 +78,8 @@ class SimapConnectorServiceServicerImpl(SimapConnectorServiceServicer): sup_link_xpath_filters.append(sup_link_xpath_filter) if controller_id is None: - collector_name = 'SIMAP:{:s}:{:s}'.format( - str(supporting_link.network_id), str(supporting_link.link_id) + collector_name = '{:d}:SIMAP:{:s}:{:s}'.format( + parent_subscription_id, str(supporting_link.network_id), str(supporting_link.link_id) ) target_uri = sup_link_xpath_filter underlay_subscription_id = 0 @@ -86,8 +87,8 @@ class SimapConnectorServiceServicerImpl(SimapConnectorServiceServicer): underlay_sub_id = establish_underlay_subscription( device_client, controller_id, sup_link_xpath_filter, period ) - collector_name = '{:s}:{:s}'.format( - controller_id, str(underlay_sub_id.subscription_id) + collector_name = '{:d}:{:s}:{:s}'.format( + parent_subscription_id, controller_id, str(underlay_sub_id.subscription_id) ) target_uri = underlay_sub_id.subscription_uri underlay_subscription_id = underlay_sub_id.subscription_id @@ -103,7 +104,8 @@ class SimapConnectorServiceServicerImpl(SimapConnectorServiceServicer): sub_request.period = period sub_subscription_set( self._db_engine, parent_subscription_uuid, controller_id, datastore, - sup_link_xpath_filter, period, underlay_subscription_id, target_uri + sup_link_xpath_filter, period, underlay_subscription_id, target_uri, + collector_name ) topic = 'subscription.{:d}'.format(parent_subscription_id) @@ -123,20 +125,26 @@ class SimapConnectorServiceServicerImpl(SimapConnectorServiceServicer): subscription = subscription_get(self._db_engine, parent_subscription_id) if subscription is None: return Empty() - # TODO: desactivate subscription aggregator and collectors - - topic = 'subscription.{:d}'.format(parent_subscription_id) - delete_kafka_topic(topic) - - parent_subscription_uuid = subscription['subscription_uuid'] + aggregator_name = str(parent_subscription_id) + self._telemetry_pool.stop_worker(WorkerTypeEnum.AGGREGATOR, aggregator_name) device_client = DeviceClient() + parent_subscription_uuid = subscription['subscription_uuid'] sub_subscriptions = sub_subscription_list(self._db_engine, parent_subscription_uuid) for sub_subscription in sub_subscriptions: sub_subscription_id = sub_subscription['sub_subscription_id'] controller_id = sub_subscription['controller_uuid' ] - delete_underlay_subscription(device_client, controller_id, sub_subscription_id) + collector_name = sub_subscription['collector_name' ] + + self._telemetry_pool.stop_worker(WorkerTypeEnum.COLLECTOR, collector_name) + + if controller_id is not None and len(controller_id) > 0: + delete_underlay_subscription(device_client, controller_id, sub_subscription_id) + sub_subscription_delete(self._db_engine, parent_subscription_uuid, sub_subscription_id) + topic = 'subscription.{:d}'.format(parent_subscription_id) + delete_kafka_topic(topic) + subscription_delete(self._db_engine, parent_subscription_id) return Empty() diff --git a/src/simap_connector/service/database/SubSubscription.py b/src/simap_connector/service/database/SubSubscription.py index ef4160105..4c19a6c64 100644 --- a/src/simap_connector/service/database/SubSubscription.py +++ b/src/simap_connector/service/database/SubSubscription.py @@ -57,7 +57,8 @@ def sub_subscription_get( def sub_subscription_set( db_engine : Engine, parent_subscription_uuid : str, controller_uuid : str, datastore : str, - xpath_filter : str, period : float, sub_subscription_id : int, sub_subscription_uri : str + xpath_filter : str, period : float, sub_subscription_id : int, sub_subscription_uri : str, + collector_name : str ) -> str: now = datetime.datetime.now(datetime.timezone.utc) if controller_uuid is None: controller_uuid = '' @@ -69,6 +70,7 @@ def sub_subscription_set( 'period' : period, 'sub_subscription_id' : sub_subscription_id, 'sub_subscription_uri': sub_subscription_uri, + 'collector_name' : collector_name, 'created_at' : now, 'updated_at' : now, } @@ -87,6 +89,7 @@ def sub_subscription_set( period = stmt.excluded.period, sub_subscription_id = stmt.excluded.sub_subscription_id, sub_subscription_uri = stmt.excluded.sub_subscription_uri, + collector_name = stmt.excluded.collector_name, updated_at = stmt.excluded.updated_at, ) ) diff --git a/src/simap_connector/service/database/models/SubSubscriptionModel.py b/src/simap_connector/service/database/models/SubSubscriptionModel.py index 58e9a6d78..e05182457 100644 --- a/src/simap_connector/service/database/models/SubSubscriptionModel.py +++ b/src/simap_connector/service/database/models/SubSubscriptionModel.py @@ -30,6 +30,7 @@ class SubSubscriptionModel(_Base): period = Column(Float, nullable=False, unique=False) sub_subscription_id = Column(BigInteger, nullable=False, unique=False) sub_subscription_uri = Column(String, nullable=False, unique=False) + collector_name = Column(String, nullable=False, unique=False) created_at = Column(DateTime, nullable=False) updated_at = Column(DateTime, nullable=False) @@ -45,4 +46,5 @@ class SubSubscriptionModel(_Base): 'period' : self.period, 'sub_subscription_id' : self.sub_subscription_id, 'sub_subscription_uri' : self.sub_subscription_uri, + 'collector_name' : self.collector_name, } diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index c1da22751..11b549f42 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -533,9 +533,9 @@ class EventDispatcher(BaseEventDispatcher): #dom_link.update(src_dev_name, src_ep_name, dst_dev_name, dst_ep_name) - resources = Resources() - sampling_interval = 1.0 - self._telemetry_pool.start_synthesizer(domain_name, resources, sampling_interval) + #resources = Resources() + #sampling_interval = 1.0 + #self._telemetry_pool.start_synthesizer(domain_name, resources, sampling_interval) return True @@ -631,7 +631,7 @@ class EventDispatcher(BaseEventDispatcher): #self._object_cache.delete(CachedEntities.SERVICE, service_uuid) #self._object_cache.delete(CachedEntities.SERVICE, service_name) - self._telemetry_pool.stop_worker(WorkerTypeEnum.SYNTHESIZER, domain_name) + #self._telemetry_pool.stop_worker(WorkerTypeEnum.SYNTHESIZER, domain_name) MSG = 'Logical Link Removed for Service: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(service_event))) -- GitLab From 02517bc45ceaa666b319ce8967c5337f4886f77b Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 23 Sep 2025 09:29:31 +0000 Subject: [PATCH 312/367] Simap Connector: - Updated AggregatorWorker to inject simap aggregated telemetry in SIMAP server - Updated SIMAP Link Detail discovery to report link details --- .../SimapConnectorServiceServicerImpl.py | 24 ++++++--- src/simap_connector/service/Tools.py | 51 +++++++++++++------ .../service/telemetry/TelemetryPool.py | 8 +-- .../telemetry/worker/AggregatorWorker.py | 15 +++++- 4 files changed, 68 insertions(+), 30 deletions(-) diff --git a/src/simap_connector/service/SimapConnectorServiceServicerImpl.py b/src/simap_connector/service/SimapConnectorServiceServicerImpl.py index 4568adc74..46fc797f2 100644 --- a/src/simap_connector/service/SimapConnectorServiceServicerImpl.py +++ b/src/simap_connector/service/SimapConnectorServiceServicerImpl.py @@ -29,8 +29,8 @@ from .database.SubSubscription import ( from .telemetry.worker.data.AggregationCache import AggregationCache from .telemetry.TelemetryPool import TelemetryPool from .Tools import ( - SupportingLink, create_kafka_topic, delete_kafka_topic, delete_underlay_subscription, - discover_supporting_links, establish_underlay_subscription, get_controller_id, + LinkDetails, create_kafka_topic, delete_kafka_topic, delete_underlay_subscription, + discover_link_details, establish_underlay_subscription, get_controller_id ) LOGGER = logging.getLogger(__name__) @@ -60,9 +60,10 @@ class SimapConnectorServiceServicerImpl(SimapConnectorServiceServicer): datastore = request.datastore xpath_filter = request.xpath_filter period = request.period - supporting_links : List[SupportingLink] = discover_supporting_links( + link_details : LinkDetails = discover_link_details( self._restconf_client, xpath_filter ) + xpath_filter = link_details.link.get_xpath_filter() parent_subscription_uuid, parent_subscription_id = subscription_set( self._db_engine, datastore, xpath_filter, period @@ -72,14 +73,15 @@ class SimapConnectorServiceServicerImpl(SimapConnectorServiceServicer): device_client = DeviceClient() sup_link_xpath_filters : List[str] = list() - for supporting_link in supporting_links: + for supporting_link in link_details.supporting_links: controller_id = get_controller_id(supporting_link.network_id) sup_link_xpath_filter = supporting_link.get_xpath_filter() sup_link_xpath_filters.append(sup_link_xpath_filter) if controller_id is None: collector_name = '{:d}:SIMAP:{:s}:{:s}'.format( - parent_subscription_id, str(supporting_link.network_id), str(supporting_link.link_id) + parent_subscription_id, str(supporting_link.network_id), + str(supporting_link.link_id) ) target_uri = sup_link_xpath_filter underlay_subscription_id = 0 @@ -88,7 +90,8 @@ class SimapConnectorServiceServicerImpl(SimapConnectorServiceServicer): device_client, controller_id, sup_link_xpath_filter, period ) collector_name = '{:d}:{:s}:{:s}'.format( - parent_subscription_id, controller_id, str(underlay_sub_id.subscription_id) + parent_subscription_id, controller_id, + str(underlay_sub_id.subscription_id) ) target_uri = underlay_sub_id.subscription_uri underlay_subscription_id = underlay_sub_id.subscription_id @@ -112,15 +115,20 @@ class SimapConnectorServiceServicerImpl(SimapConnectorServiceServicer): create_kafka_topic(topic) aggregator_name = str(parent_subscription_id) + network_id = link_details.link.network_id + link_id = link_details.link.link_id self._telemetry_pool.start_aggregator( - aggregator_name, parent_subscription_id, aggregation_cache, topic, period + aggregator_name, network_id, link_id, parent_subscription_id, + aggregation_cache, topic, period ) return SubscriptionId(subscription_id=parent_subscription_id) @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) - def DeleteSubscription(self, request : SubscriptionId, context : grpc.ServicerContext) -> Empty: + def DeleteSubscription( + self, request : SubscriptionId, context : grpc.ServicerContext + ) -> Empty: parent_subscription_id = request.subscription_id subscription = subscription_get(self._db_engine, parent_subscription_id) if subscription is None: return Empty() diff --git a/src/simap_connector/service/Tools.py b/src/simap_connector/service/Tools.py index 071af59c1..af532a370 100644 --- a/src/simap_connector/service/Tools.py +++ b/src/simap_connector/service/Tools.py @@ -13,8 +13,8 @@ # limitations under the License. -import logging -from dataclasses import dataclass +import logging, re +from dataclasses import dataclass, field from kafka.admin import KafkaAdminClient, NewTopic from kafka.errors import BrokerResponseError from typing import List, Optional @@ -34,8 +34,13 @@ XPATH_LINK_TEMPLATE = ( '/ietf-network-topology:link={:s}/simap-telemetry:simap-telemetry' ) +RE_XPATH_LINK = re.compile( + r'^/ietf-network:networks/network=([^/]+)/ietf-network-topology:link=([^/]+)/?.*$' +) + + @dataclass -class SupportingLink: +class Link: network_id : str link_id : str @@ -43,30 +48,44 @@ class SupportingLink: return XPATH_LINK_TEMPLATE.format(self.network_id, self.link_id) -def discover_supporting_links(restconf_client : RestConfClient, xpath_filter : str) -> List[SupportingLink]: - xpath_filter_2 = xpath_filter.replace('/simap-telemetry:simap-telemetry', '') - xpath_filter_2 = xpath_filter_2.replace('/simap-telemetry', '') - xpath_data = restconf_client.get(xpath_filter_2) +@dataclass +class LinkDetails: + link : Link + supporting_links : List[Link] = field(default_factory=list) + + +def discover_link_details(restconf_client : RestConfClient, xpath_filter : str) -> LinkDetails: + link_xpath_match = RE_XPATH_LINK.match(xpath_filter) + if link_xpath_match is None: + raise Exception('Unsupported xpath_filter({:s})'.format(str(xpath_filter))) + + network_id, link_id = link_xpath_match.groups() + link_details = LinkDetails(Link(network_id, link_id)) + + xpath_filter = link_details.link.get_xpath_filter() + xpath_data = restconf_client.get(xpath_filter) if not xpath_data: - MSG = 'Resource({:s} => {:s}) not found in SIMAP Server' - raise Exception(MSG.format(str(xpath_filter), str(xpath_filter_2))) + raise Exception('Resource({:s}) not found in SIMAP Server'.format(str(xpath_filter))) links = xpath_data.get('ietf-network-topology:link', list()) if len(links) == 0: - raise Exception('Link({:s}) not found'.format(str(xpath_filter_2))) + raise Exception('Link({:s}) not found'.format(str(xpath_filter))) if len(links) > 1: - raise Exception('Multiple occurrences for Link({:s})'.format(str(xpath_filter_2))) + raise Exception('Multiple occurrences for Link({:s})'.format(str(xpath_filter))) link = links[0] + if link['link-id'] != link_id: + MSG = 'Retieved Link({:s}) does not match xpath_filter({:s})' + raise Exception(MSG.format(str(link), str(xpath_filter))) supporting_links = link.get('supporting-link', list()) if len(supporting_links) == 0: MSG = 'No supporting links found for Resource({:s}, {:s})' raise Exception(MSG.format(str(xpath_filter), str(xpath_data))) - supporting_link_xpaths : List[SupportingLink] = [ - SupportingLink(sup_link['network-ref'], sup_link['link-ref']) - for sup_link in supporting_links - ] - return supporting_link_xpaths + for sup_link in supporting_links: + link_details.supporting_links.append(Link( + sup_link['network-ref'], sup_link['link-ref'] + )) + return link_details #def compose_establish_subscription(datastore : str, xpath_filter : str, period : float) -> Dict: diff --git a/src/simap_connector/service/telemetry/TelemetryPool.py b/src/simap_connector/service/telemetry/TelemetryPool.py index f88f9d35f..8f99dbb42 100644 --- a/src/simap_connector/service/telemetry/TelemetryPool.py +++ b/src/simap_connector/service/telemetry/TelemetryPool.py @@ -55,12 +55,12 @@ class TelemetryPool: def start_aggregator( - self, worker_name : str, parent_subscription_id : int, aggregation_cache : AggregationCache, - topic : str, sampling_interval : float + self, worker_name : str, network_id : str, link_id : str, parent_subscription_id : int, + aggregation_cache : AggregationCache, topic : str, sampling_interval : float ) -> None: self._start_worker( - WorkerTypeEnum.AGGREGATOR, worker_name, parent_subscription_id, aggregation_cache, - topic, sampling_interval + WorkerTypeEnum.AGGREGATOR, worker_name, self._simap_client, network_id, link_id, + parent_subscription_id, aggregation_cache, topic, sampling_interval ) diff --git a/src/simap_connector/service/telemetry/worker/AggregatorWorker.py b/src/simap_connector/service/telemetry/worker/AggregatorWorker.py index 7ac60adb2..653382b7f 100644 --- a/src/simap_connector/service/telemetry/worker/AggregatorWorker.py +++ b/src/simap_connector/service/telemetry/worker/AggregatorWorker.py @@ -18,6 +18,7 @@ from dataclasses import dataclass from kafka import KafkaProducer from typing import Dict, Optional, Union from common.tools.kafka.Variables import KafkaConfig +from simap_connector.service.simap_updater.SimapClient import SimapClient from .data.AggregationCache import AggregationCache from ._Worker import _Worker, WorkerTypeEnum @@ -55,10 +56,14 @@ class ServerSentEvent: class AggregatorWorker(_Worker): def __init__( - self, worker_name : str, parent_subscription_id : int, aggregation_cache : AggregationCache, - topic : str, sampling_interval : float, terminate : Optional[threading.Event] = None + self, worker_name : str, simap_client : SimapClient, network_id : str, link_id : str, + parent_subscription_id : int, aggregation_cache : AggregationCache, topic : str, + sampling_interval : float, terminate : Optional[threading.Event] = None ) -> None: super().__init__(WorkerTypeEnum.AGGREGATOR, worker_name, terminate=terminate) + self._simap_client = simap_client + self._network_id = network_id + self._link_id = link_id self._parent_subscription_id = parent_subscription_id self._aggregation_cache = aggregation_cache self._topic = topic @@ -100,6 +105,12 @@ class AggregatorWorker(_Worker): ) kafka_producer.flush() + simap_link = self._simap_client.network(self._network_id).link(self._link_id) + simap_link.telemetry.update( + link_sample.bandwidth_utilization, link_sample.latency, + related_service_ids=list(link_sample.related_service_ids) + ) + update_counter += 1 # Make wait responsible to terminations -- GitLab From 18add942e93cd241a1454465d64bf0fc3e7babf1 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 23 Sep 2025 10:05:56 +0000 Subject: [PATCH 313/367] Simap Connector: - Fixed generation of xpath filters after dscovery of link details --- .../service/SimapConnectorServiceServicerImpl.py | 7 ++----- src/simap_connector/service/Tools.py | 14 +++++++------- 2 files changed, 9 insertions(+), 12 deletions(-) diff --git a/src/simap_connector/service/SimapConnectorServiceServicerImpl.py b/src/simap_connector/service/SimapConnectorServiceServicerImpl.py index 46fc797f2..9fb38abcc 100644 --- a/src/simap_connector/service/SimapConnectorServiceServicerImpl.py +++ b/src/simap_connector/service/SimapConnectorServiceServicerImpl.py @@ -14,7 +14,6 @@ import grpc, logging, sqlalchemy -from typing import List from common.proto.context_pb2 import Empty from common.proto.simap_connector_pb2 import Subscription, SubscriptionId from common.proto.simap_connector_pb2_grpc import SimapConnectorServiceServicer @@ -63,7 +62,7 @@ class SimapConnectorServiceServicerImpl(SimapConnectorServiceServicer): link_details : LinkDetails = discover_link_details( self._restconf_client, xpath_filter ) - xpath_filter = link_details.link.get_xpath_filter() + xpath_filter = link_details.link.get_xpath_filter(add_simap_telemetry=True) parent_subscription_uuid, parent_subscription_id = subscription_set( self._db_engine, datastore, xpath_filter, period @@ -72,11 +71,9 @@ class SimapConnectorServiceServicerImpl(SimapConnectorServiceServicer): aggregation_cache = AggregationCache() device_client = DeviceClient() - sup_link_xpath_filters : List[str] = list() for supporting_link in link_details.supporting_links: controller_id = get_controller_id(supporting_link.network_id) - sup_link_xpath_filter = supporting_link.get_xpath_filter() - sup_link_xpath_filters.append(sup_link_xpath_filter) + sup_link_xpath_filter = supporting_link.get_xpath_filter(add_simap_telemetry=True) if controller_id is None: collector_name = '{:d}:SIMAP:{:s}:{:s}'.format( diff --git a/src/simap_connector/service/Tools.py b/src/simap_connector/service/Tools.py index af532a370..953ea25c0 100644 --- a/src/simap_connector/service/Tools.py +++ b/src/simap_connector/service/Tools.py @@ -29,10 +29,8 @@ from device.client.DeviceClient import DeviceClient LOGGER = logging.getLogger(__name__) -XPATH_LINK_TEMPLATE = ( - '/ietf-network:networks/network={:s}' - '/ietf-network-topology:link={:s}/simap-telemetry:simap-telemetry' -) +XPATH_LINK_TEMPLATE = '/ietf-network:networks/network={:s}/ietf-network-topology:link={:s}' +SIMAP_TELEMETRY_SUFFIX = '/simap-telemetry:simap-telemetry' RE_XPATH_LINK = re.compile( r'^/ietf-network:networks/network=([^/]+)/ietf-network-topology:link=([^/]+)/?.*$' @@ -44,8 +42,10 @@ class Link: network_id : str link_id : str - def get_xpath_filter(self) -> str: - return XPATH_LINK_TEMPLATE.format(self.network_id, self.link_id) + def get_xpath_filter(self, add_simap_telemetry : bool = True) -> str: + xpath_filter = XPATH_LINK_TEMPLATE.format(self.network_id, self.link_id) + if add_simap_telemetry: xpath_filter += SIMAP_TELEMETRY_SUFFIX + return xpath_filter @dataclass @@ -62,7 +62,7 @@ def discover_link_details(restconf_client : RestConfClient, xpath_filter : str) network_id, link_id = link_xpath_match.groups() link_details = LinkDetails(Link(network_id, link_id)) - xpath_filter = link_details.link.get_xpath_filter() + xpath_filter = link_details.link.get_xpath_filter(add_simap_telemetry=False) xpath_data = restconf_client.get(xpath_filter) if not xpath_data: raise Exception('Resource({:s}) not found in SIMAP Server'.format(str(xpath_filter))) -- GitLab From e1f109157d8035e0535893465c60630f0791be62 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 23 Sep 2025 10:07:22 +0000 Subject: [PATCH 314/367] Simap Connector: - Corrected random smple generator parameters --- src/simap_connector/service/simap_updater/SimapUpdater.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 11b549f42..5396ded0e 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -342,7 +342,7 @@ class EventDispatcher(BaseEventDispatcher): resources.links.append(ResourceLink( domain_name=topology_name, link_name=link_name, bandwidth_utilization_sampler=SyntheticSampler.create_random( - amplitude_scale = 45.0, + amplitude_scale = 35.0, phase_scale = 1e-7, period_scale = 86_400, offset_scale = 50, -- GitLab From eef3210d8bd00331a17d85e784dcbffae6db5ea3 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 23 Sep 2025 10:36:46 +0000 Subject: [PATCH 315/367] Simap Connector: - Fixed formatting of delete underlay subscription --- src/simap_connector/service/Tools.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/simap_connector/service/Tools.py b/src/simap_connector/service/Tools.py index 953ea25c0..024f8d708 100644 --- a/src/simap_connector/service/Tools.py +++ b/src/simap_connector/service/Tools.py @@ -147,7 +147,7 @@ def delete_underlay_subscription( sse_unsub_req = SSEMonitoringSubscriptionConfig() sse_unsub_req.device_id.device_uuid.uuid = controller_uuid sse_unsub_req.config_type = SSEMonitoringSubscriptionConfig.Unsubscribe - sse_unsub_req.identifier = subscription_id + sse_unsub_req.identifier = str(subscription_id) device_client.SSETelemetrySubscribe(sse_unsub_req) -- GitLab From 78afa02c1a622875af03e5095de8bdad9d116525 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 23 Sep 2025 11:10:00 +0000 Subject: [PATCH 316/367] Device component - IETF L3VPN/Slice and NCE Drivers: - Fixed format of delete-subscription requests - Fixed return types from establish-subscription requests --- src/device/service/DeviceServiceServicerImpl.py | 2 +- .../service/drivers/ietf_l3vpn/IetfL3VpnDriver.py | 4 ++-- .../ietf_l3vpn/handlers/SubscriptionHandler.py | 4 ++-- .../service/drivers/ietf_slice/IetfSliceDriver.py | 4 ++-- .../ietf_slice/handlers/SubscriptionHandler.py | 4 ++-- src/device/service/drivers/nce/NCEDriver.py | 14 ++++++++++++-- .../drivers/nce/handlers/SubscriptionHandler.py | 4 ++-- 7 files changed, 23 insertions(+), 13 deletions(-) diff --git a/src/device/service/DeviceServiceServicerImpl.py b/src/device/service/DeviceServiceServicerImpl.py index 38ede445f..80ec6ae4d 100644 --- a/src/device/service/DeviceServiceServicerImpl.py +++ b/src/device/service/DeviceServiceServicerImpl.py @@ -425,7 +425,7 @@ class DeviceServiceServicerImpl(DeviceServiceServicer): 'SSETelemetrySubscribe', extra_details='Driver returned an unexpected number of responses: {:d}'.format(len(r)) ) sub_conf: dict = r[0] - return SSEMonitoringSubscriptionResponse(identifier=str(sub_conf['identifier']), uri=sub_conf['uri']) + return SSEMonitoringSubscriptionResponse(identifier=str(sub_conf['id']), uri=sub_conf['uri']) if config_type == SSEMonitoringSubscriptionConfig.Unsubscribe: r = driver.UnsubscribeState([(str(request.identifier), 0, 0)]) diff --git a/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py b/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py index 454ac42d2..c1cf51536 100644 --- a/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py +++ b/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py @@ -265,8 +265,8 @@ class IetfL3VpnDriver(_Driver): s = subscriptions[0] identifier = s[0] s_data : UnsubscribedNotificationsSchema = { - 'delete-subscription': { - 'identifier': int(identifier), + 'ietf-subscribed-notifications:input': { + 'id': int(identifier), } } self._handler_subscription.unsubscribe(s_data) diff --git a/src/device/service/drivers/ietf_l3vpn/handlers/SubscriptionHandler.py b/src/device/service/drivers/ietf_l3vpn/handlers/SubscriptionHandler.py index 83d62880d..0b7ffdfe1 100644 --- a/src/device/service/drivers/ietf_l3vpn/handlers/SubscriptionHandler.py +++ b/src/device/service/drivers/ietf_l3vpn/handlers/SubscriptionHandler.py @@ -35,10 +35,10 @@ SubscribedNotificationsSchema = TypedDict( 'SubscribedNotificationsSchema', {'ietf-subscribed-notifications:input': Input} ) -SubscriptionSchema = TypedDict('SubscriptionSchema', {'identifier': str}) +SubscriptionSchema = TypedDict('SubscriptionSchema', {'id': str}) UnsubscribedNotificationsSchema = TypedDict( - 'UnsubscribedNotificationsSchema', {'delete-subscription': SubscriptionSchema} + 'UnsubscribedNotificationsSchema', {'ietf-subscribed-notifications:input': SubscriptionSchema} ) diff --git a/src/device/service/drivers/ietf_slice/IetfSliceDriver.py b/src/device/service/drivers/ietf_slice/IetfSliceDriver.py index 345d202e9..269ad3f0c 100644 --- a/src/device/service/drivers/ietf_slice/IetfSliceDriver.py +++ b/src/device/service/drivers/ietf_slice/IetfSliceDriver.py @@ -226,8 +226,8 @@ class IetfSliceDriver(_Driver): s = subscriptions[0] identifier = s[0] s_data : UnsubscribedNotificationsSchema = { - 'delete-subscription': { - 'identifier': int(identifier), + 'ietf-subscribed-notifications:input': { + 'id': int(identifier), } } self._handler_subscription.unsubscribe(s_data) diff --git a/src/device/service/drivers/ietf_slice/handlers/SubscriptionHandler.py b/src/device/service/drivers/ietf_slice/handlers/SubscriptionHandler.py index 83d62880d..0b7ffdfe1 100644 --- a/src/device/service/drivers/ietf_slice/handlers/SubscriptionHandler.py +++ b/src/device/service/drivers/ietf_slice/handlers/SubscriptionHandler.py @@ -35,10 +35,10 @@ SubscribedNotificationsSchema = TypedDict( 'SubscribedNotificationsSchema', {'ietf-subscribed-notifications:input': Input} ) -SubscriptionSchema = TypedDict('SubscriptionSchema', {'identifier': str}) +SubscriptionSchema = TypedDict('SubscriptionSchema', {'id': str}) UnsubscribedNotificationsSchema = TypedDict( - 'UnsubscribedNotificationsSchema', {'delete-subscription': SubscriptionSchema} + 'UnsubscribedNotificationsSchema', {'ietf-subscribed-notifications:input': SubscriptionSchema} ) diff --git a/src/device/service/drivers/nce/NCEDriver.py b/src/device/service/drivers/nce/NCEDriver.py index 67dc791a2..4e40ab7af 100644 --- a/src/device/service/drivers/nce/NCEDriver.py +++ b/src/device/service/drivers/nce/NCEDriver.py @@ -73,6 +73,7 @@ class NCEDriver(_Driver): endpoint_resources.append(endpoint_resource) self._set_initial_config(endpoint_resources) + def _set_initial_config(self, resources: List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: chk_type('resources', resources, list) if len(resources) == 0: @@ -107,6 +108,7 @@ class NCEDriver(_Driver): results.append(True) return results + def Connect(self) -> bool: with self.__lock: if self.__started.is_set(): return True @@ -122,16 +124,19 @@ class NCEDriver(_Driver): self.__started.set() return True + def Disconnect(self) -> bool: with self.__lock: self.__terminate.set() return True + @metered_subclass_method(METRICS_POOL) def GetInitialConfig(self) -> List[Tuple[str, Any]]: with self.__lock: return [] + @metered_subclass_method(METRICS_POOL) def GetConfig(self, resource_keys : List[str] = []) -> List[Tuple[str, Union[Any, None, Exception]]]: chk_type('resources', resource_keys, list) @@ -173,6 +178,7 @@ class NCEDriver(_Driver): return results return results + @metered_subclass_method(METRICS_POOL) def SetConfig(self, resources: List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: results = [] @@ -193,6 +199,7 @@ class NCEDriver(_Driver): results.append((resource_key, e)) return results + @metered_subclass_method(METRICS_POOL) def DeleteConfig(self, resources: List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: LOGGER.debug('[DeleteConfig] resources={:s}'.format(str(resources))) @@ -215,6 +222,7 @@ class NCEDriver(_Driver): results.append((resource_key, e)) return results + @metered_subclass_method(METRICS_POOL) def SubscribeState( self, subscriptions: List[Tuple[str, float, float]] @@ -235,6 +243,7 @@ class NCEDriver(_Driver): s_id = self._handler_subscription.subscribe(s_data) return [s_id] + @metered_subclass_method(METRICS_POOL) def UnsubscribeState( self, subscriptions: List[Tuple[str, float, float]] @@ -244,13 +253,14 @@ class NCEDriver(_Driver): s = subscriptions[0] identifier = s[0] s_data : UnsubscribedNotificationsSchema = { - 'delete-subscription': { - 'identifier': int(identifier), + 'ietf-subscribed-notifications:input': { + 'id': int(identifier), } } self._handler_subscription.unsubscribe(s_data) return [True] + def GetState( self, blocking=False, terminate: Optional[threading.Event] = None ) -> Iterator[Tuple[float, str, Any]]: diff --git a/src/device/service/drivers/nce/handlers/SubscriptionHandler.py b/src/device/service/drivers/nce/handlers/SubscriptionHandler.py index 83d62880d..0b7ffdfe1 100644 --- a/src/device/service/drivers/nce/handlers/SubscriptionHandler.py +++ b/src/device/service/drivers/nce/handlers/SubscriptionHandler.py @@ -35,10 +35,10 @@ SubscribedNotificationsSchema = TypedDict( 'SubscribedNotificationsSchema', {'ietf-subscribed-notifications:input': Input} ) -SubscriptionSchema = TypedDict('SubscriptionSchema', {'identifier': str}) +SubscriptionSchema = TypedDict('SubscriptionSchema', {'id': str}) UnsubscribedNotificationsSchema = TypedDict( - 'UnsubscribedNotificationsSchema', {'delete-subscription': SubscriptionSchema} + 'UnsubscribedNotificationsSchema', {'ietf-subscribed-notifications:input': SubscriptionSchema} ) -- GitLab From 31c2071ebfebe746767c76f3c146239f037a2f96 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 23 Sep 2025 11:10:25 +0000 Subject: [PATCH 317/367] NBI component - SSE Telemetry: - Fixed return type of establish-subscription --- src/nbi/service/sse_telemetry/EstablishSubscription.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/nbi/service/sse_telemetry/EstablishSubscription.py b/src/nbi/service/sse_telemetry/EstablishSubscription.py index 9bb68fcf4..46882b5bb 100644 --- a/src/nbi/service/sse_telemetry/EstablishSubscription.py +++ b/src/nbi/service/sse_telemetry/EstablishSubscription.py @@ -93,7 +93,7 @@ class EstablishSubscription(Resource): subscription_id = subscription_id.subscription_id subscription_uri = url_for('sse.stream', subscription_id=subscription_id) - sub_id = {'identifier': subscription_id, 'uri': subscription_uri} + sub_id = {'id': subscription_id, 'uri': subscription_uri} return jsonify(sub_id) -- GitLab From 5e431a76c06f569ffd0daff713df03ec5c3ab121 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 23 Sep 2025 11:10:47 +0000 Subject: [PATCH 318/367] ECOC F5GA Telemetry Demo: - Added tear-down script --- .../telemetry-delete-slice1.py | 46 +++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 src/tests/ecoc25-f5ga-telemetry/telemetry-delete-slice1.py diff --git a/src/tests/ecoc25-f5ga-telemetry/telemetry-delete-slice1.py b/src/tests/ecoc25-f5ga-telemetry/telemetry-delete-slice1.py new file mode 100644 index 000000000..bb32901a5 --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/telemetry-delete-slice1.py @@ -0,0 +1,46 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import requests +from requests.auth import HTTPBasicAuth + + +RESTCONF_ADDRESS = '127.0.0.1' +RESTCONF_PORT = 80 +TELEMETRY_ID = 1109367143430094849 + +UNSUBSCRIBE_URI = '/restconf/operations/subscriptions:delete-subscription' +UNSUBSCRIBE_URL = 'http://{:s}:{:d}{:s}'.format(RESTCONF_ADDRESS, RESTCONF_PORT, UNSUBSCRIBE_URI) +REQUEST = { + 'ietf-subscribed-notifications:input': { + 'id': TELEMETRY_ID, + } +} + + +def main() -> None: + print('[E2E] Delete Telemetry slice1...') + headers = {'accept': 'application/json'} + auth = HTTPBasicAuth('admin', 'admin') + print(UNSUBSCRIBE_URL) + print(REQUEST) + reply = requests.post( + UNSUBSCRIBE_URL, headers=headers, json=REQUEST, auth=auth, + verify=False, allow_redirects=True, timeout=30 + ) + reply.raise_for_status() + +if __name__ == '__main__': + main() -- GitLab From cac5074d44f3aef2738ec8882017ccbfdddec4df Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 23 Sep 2025 12:31:57 +0000 Subject: [PATCH 319/367] Simap Connector and NBI component: - Added method AffectSampleSynthesizer - Reduced base value for synthesized bandwidth samples --- proto/simap_connector.proto | 12 +++- .../sse_telemetry/AffectSampleSynthesizer.py | 60 +++++++++++++++++++ src/nbi/service/sse_telemetry/__init__.py | 8 ++- .../client/SimapConnectorClient.py | 9 ++- .../SimapConnectorServiceServicerImpl.py | 24 +++++++- .../service/simap_updater/SimapUpdater.py | 4 +- .../service/telemetry/TelemetryPool.py | 10 ++++ .../telemetry/worker/SynthesizerWorker.py | 10 +++- 8 files changed, 129 insertions(+), 8 deletions(-) create mode 100644 src/nbi/service/sse_telemetry/AffectSampleSynthesizer.py diff --git a/proto/simap_connector.proto b/proto/simap_connector.proto index 3b966dbf6..498c871b4 100644 --- a/proto/simap_connector.proto +++ b/proto/simap_connector.proto @@ -20,8 +20,9 @@ import "context.proto"; // Subscription handling according to https://datatracker.ietf.org/doc/html/rfc8641 service SimapConnectorService { - rpc EstablishSubscription(Subscription ) returns (SubscriptionId) {} - rpc DeleteSubscription (SubscriptionId) returns (context.Empty ) {} + rpc EstablishSubscription (Subscription ) returns (SubscriptionId) {} + rpc DeleteSubscription (SubscriptionId) returns (context.Empty ) {} + rpc AffectSampleSynthesizer(Affectation ) returns (context.Empty ) {} } message SubscriptionId { @@ -33,3 +34,10 @@ message Subscription { string xpath_filter = 2; float period = 3; } + +message Affectation { + string network_id = 1; + string link_id = 2; + float bandwidth_factor = 3; + float latency_factor = 4; +} diff --git a/src/nbi/service/sse_telemetry/AffectSampleSynthesizer.py b/src/nbi/service/sse_telemetry/AffectSampleSynthesizer.py new file mode 100644 index 000000000..c201d233b --- /dev/null +++ b/src/nbi/service/sse_telemetry/AffectSampleSynthesizer.py @@ -0,0 +1,60 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging +from flask import jsonify, request +from flask_restful import Resource +from werkzeug.exceptions import BadRequest, UnsupportedMediaType +from common.proto.simap_connector_pb2 import Affectation +from simap_connector.client.SimapConnectorClient import SimapConnectorClient + + +LOGGER = logging.getLogger(__name__) + + +class AffectSampleSynthesizer(Resource): + # @HTTP_AUTH.login_required + def post(self): + if not request.is_json: + raise UnsupportedMediaType('JSON payload is required') + + request_data = request.json + LOGGER.debug('[post] Affectation request: {:s}'.format(str(request_data))) + + if 'network_id' not in request_data: + raise BadRequest('Missing field(network_id)') + network_id = str(request_data['network_id']) + + if 'link_id' not in request_data: + raise BadRequest('Missing field(link_id)') + link_id = str(request_data['link_id']) + + if 'bandwidth_factor' not in request_data: + raise BadRequest('Missing field(bandwidth_factor)') + bandwidth_factor = float(request_data['bandwidth_factor']) + + if 'latency_factor' not in request_data: + raise BadRequest('Missing field(latency_factor)') + latency_factor = float(request_data['latency_factor']) + + affectation = Affectation() + affectation.network_id = network_id + affectation.link_id = link_id + affectation.bandwidth_factor = bandwidth_factor + affectation.latency_factor = latency_factor + + simap_connector_client = SimapConnectorClient() + simap_connector_client.AffectSampleSynthesizer(affectation) + return jsonify({}) diff --git a/src/nbi/service/sse_telemetry/__init__.py b/src/nbi/service/sse_telemetry/__init__.py index 5aa699588..d9bb07d96 100644 --- a/src/nbi/service/sse_telemetry/__init__.py +++ b/src/nbi/service/sse_telemetry/__init__.py @@ -21,11 +21,11 @@ from nbi.service.NbiApplication import NbiApplication +from .AffectSampleSynthesizer import AffectSampleSynthesizer from .EstablishSubscription import EstablishSubscription from .DeleteSubscription import DeleteSubscription from .StreamSubscription import StreamSubscription - def register_telemetry_subscription(nbi_app: NbiApplication): nbi_app.add_rest_api_resource( EstablishSubscription, @@ -45,3 +45,9 @@ def register_telemetry_subscription(nbi_app: NbiApplication): '/restconf/stream//', endpoint='sse.stream', ) + nbi_app.add_rest_api_resource( + AffectSampleSynthesizer, + '/affect_sample_synthesizer', + '/affect_sample_synthesizer/', + endpoint='sse.affect_sample_synthesizer', + ) diff --git a/src/simap_connector/client/SimapConnectorClient.py b/src/simap_connector/client/SimapConnectorClient.py index 3b9f941cc..137f2fb5b 100644 --- a/src/simap_connector/client/SimapConnectorClient.py +++ b/src/simap_connector/client/SimapConnectorClient.py @@ -16,7 +16,7 @@ import grpc, logging from common.Constants import ServiceNameEnum from common.Settings import get_service_host, get_service_port_grpc from common.proto.context_pb2 import Empty -from common.proto.simap_connector_pb2 import Subscription, SubscriptionId +from common.proto.simap_connector_pb2 import Affectation, Subscription, SubscriptionId from common.proto.simap_connector_pb2_grpc import SimapConnectorServiceStub from common.tools.client.RetryDecorator import delay_exponential, retry from common.tools.grpc.Tools import grpc_message_to_json_string @@ -62,3 +62,10 @@ class SimapConnectorClient: response = self.stub.DeleteSubscription(request) LOGGER.debug('DeleteSubscription result: {:s}'.format(grpc_message_to_json_string(response))) return response + + @RETRY_DECORATOR + def AffectSampleSynthesizer(self, request : Affectation) -> Empty: + LOGGER.debug('AffectSampleSynthesizer request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.AffectSampleSynthesizer(request) + LOGGER.debug('AffectSampleSynthesizer result: {:s}'.format(grpc_message_to_json_string(response))) + return response diff --git a/src/simap_connector/service/SimapConnectorServiceServicerImpl.py b/src/simap_connector/service/SimapConnectorServiceServicerImpl.py index 9fb38abcc..8aafffc1a 100644 --- a/src/simap_connector/service/SimapConnectorServiceServicerImpl.py +++ b/src/simap_connector/service/SimapConnectorServiceServicerImpl.py @@ -14,12 +14,14 @@ import grpc, logging, sqlalchemy +from typing import Optional from common.proto.context_pb2 import Empty -from common.proto.simap_connector_pb2 import Subscription, SubscriptionId +from common.proto.simap_connector_pb2 import Affectation, Subscription, SubscriptionId from common.proto.simap_connector_pb2_grpc import SimapConnectorServiceServicer from common.tools.rest_conf.client.RestConfClient import RestConfClient from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method from device.client.DeviceClient import DeviceClient +from simap_connector.service.telemetry.worker.SynthesizerWorker import SynthesizerWorker from simap_connector.service.telemetry.worker._Worker import WorkerTypeEnum from .database.Subscription import subscription_get, subscription_set, subscription_delete from .database.SubSubscription import ( @@ -153,3 +155,23 @@ class SimapConnectorServiceServicerImpl(SimapConnectorServiceServicer): subscription_delete(self._db_engine, parent_subscription_id) return Empty() + + + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) + def AffectSampleSynthesizer( + self, request : Affectation, context : grpc.ServicerContext + ) -> Empty: + network_id = request.network_id + link_id = request.link_id + bandwidth_factor = request.bandwidth_factor + latency_factor = request.latency_factor + + synthesizer_name = '{:s}:{:s}'.format(network_id, link_id) + synthesizer : Optional[SynthesizerWorker] = self._telemetry_pool.get_worker( + WorkerTypeEnum.SYNTHESIZER, synthesizer_name + ) + if synthesizer is None: + MSG = 'Synthesizer({:s}) not found' + raise Exception(MSG.format(synthesizer_name)) + synthesizer.change_resources(bandwidth_factor, latency_factor) + return Empty() diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 5396ded0e..d894dea65 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -342,10 +342,10 @@ class EventDispatcher(BaseEventDispatcher): resources.links.append(ResourceLink( domain_name=topology_name, link_name=link_name, bandwidth_utilization_sampler=SyntheticSampler.create_random( - amplitude_scale = 35.0, + amplitude_scale = 25.0, phase_scale = 1e-7, period_scale = 86_400, - offset_scale = 50, + offset_scale = 25, noise_ratio = 0.05, min_value = 0.0, max_value = 100.0, diff --git a/src/simap_connector/service/telemetry/TelemetryPool.py b/src/simap_connector/service/telemetry/TelemetryPool.py index 8f99dbb42..8486642f3 100644 --- a/src/simap_connector/service/telemetry/TelemetryPool.py +++ b/src/simap_connector/service/telemetry/TelemetryPool.py @@ -54,6 +54,16 @@ class TelemetryPool: return worker_key in self._workers + def get_worker(self, worker_type : WorkerTypeEnum, worker_name : str) -> Optional[_Worker]: + worker_key = get_worker_key(worker_type, worker_name) + return self.get_worker_by_key(worker_key) + + + def get_worker_by_key(self, worker_key : str) -> Optional[_Worker]: + with self._lock: + return self._workers.get(worker_key) + + def start_aggregator( self, worker_name : str, network_id : str, link_id : str, parent_subscription_id : int, aggregation_cache : AggregationCache, topic : str, sampling_interval : float diff --git a/src/simap_connector/service/telemetry/worker/SynthesizerWorker.py b/src/simap_connector/service/telemetry/worker/SynthesizerWorker.py index 575f4c9bf..7edc01af7 100644 --- a/src/simap_connector/service/telemetry/worker/SynthesizerWorker.py +++ b/src/simap_connector/service/telemetry/worker/SynthesizerWorker.py @@ -29,10 +29,17 @@ class SynthesizerWorker(_Worker): sampling_interval : float, terminate : Optional[threading.Event] = None ) -> None: super().__init__(WorkerTypeEnum.SYNTHESIZER, worker_name, terminate=terminate) + self._lock = threading.Lock() self._simap_client = simap_client self._resources = resources self._sampling_interval = sampling_interval + def change_resources(self, bandwidth_factor : float, latency_factor : float) -> None: + with self._lock: + for link in self._resources.links: + link.bandwidth_utilization_sampler.offset *= bandwidth_factor + link.latency_sampler.offset *= latency_factor + def run(self) -> None: self._logger.info('[run] Starting...') @@ -40,7 +47,8 @@ class SynthesizerWorker(_Worker): while not self._stop_event.is_set() and not self._terminate.is_set(): self._logger.info('[run] Sampling...') - self._resources.generate_samples(self._simap_client) + with self._lock: + self._resources.generate_samples(self._simap_client) # Make wait responsible to terminations iterations = int(math.ceil(self._sampling_interval / WAIT_LOOP_GRANULARITY)) -- GitLab From f3289830ac0623f477a1c723074fda7ad965989a Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 23 Sep 2025 17:30:22 +0000 Subject: [PATCH 320/367] Simap Connector component: - Corrected logic to delegate SIMAP physical links and associated synthesizers to appropriate controllers --- .../service/simap_updater/AllowedLinks.py | 20 +++++++++++++++++++ .../service/simap_updater/SimapUpdater.py | 12 ++++++++--- 2 files changed, 29 insertions(+), 3 deletions(-) create mode 100644 src/simap_connector/service/simap_updater/AllowedLinks.py diff --git a/src/simap_connector/service/simap_updater/AllowedLinks.py b/src/simap_connector/service/simap_updater/AllowedLinks.py new file mode 100644 index 000000000..4758eebbf --- /dev/null +++ b/src/simap_connector/service/simap_updater/AllowedLinks.py @@ -0,0 +1,20 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +ALLOWED_LINKS_PER_CONTROLLER = { + 'e2e' : {'L1', 'L2', 'L3', 'L4'}, + 'agg' : {'L7ab', 'L7ba', 'L8ab', 'L8ba', 'L11ab', 'L11ba', + 'L12ab', 'L12ba', 'L13', 'L14'}, + 'trans-pkt': {'L5', 'L6', 'L9', 'L10'}, +} diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index d894dea65..afc197268 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -24,13 +24,16 @@ from common.tools.grpc.BaseEventCollector import BaseEventCollector from common.tools.grpc.BaseEventDispatcher import BaseEventDispatcher from common.tools.grpc.Tools import grpc_message_to_json_string from context.client.ContextClient import ContextClient -from simap_connector.service.simap_updater.MockSimaps import delete_mock_simap, set_mock_simap -from simap_connector.service.telemetry.worker.data.Resources import ResourceLink, Resources, SyntheticSampler +from simap_connector.service.telemetry.worker.data.Resources import ( + ResourceLink, Resources, SyntheticSampler +) from simap_connector.service.telemetry.worker._Worker import WorkerTypeEnum from simap_connector.service.telemetry.TelemetryPool import TelemetryPool +from .AllowedLinks import ALLOWED_LINKS_PER_CONTROLLER +from .MockSimaps import delete_mock_simap, set_mock_simap from .ObjectCache import CachedEntities, ObjectCache from .SimapClient import SimapClient -from .Tools import get_device_endpoint, get_link_endpoint, get_service_endpoint +from .Tools import get_device_endpoint, get_link_endpoint #, get_service_endpoint LOGGER = logging.getLogger(__name__) @@ -290,6 +293,9 @@ class EventDispatcher(BaseEventDispatcher): te_topo = self._simap_client.network(topology_name) te_topo.update() + allowed_link_names = ALLOWED_LINKS_PER_CONTROLLER.get(topology_name) + if link_name not in allowed_link_names: return False + src_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[0][0], force_update =True ) src_endpoint = self._object_cache.get(CachedEntities.ENDPOINT, *(endpoint_uuids[0]), auto_retrieve=False) dst_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[1][0], force_update =True ) -- GitLab From e65cc89df13fbe7c84314c95ee15d6b359fdca08 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 23 Sep 2025 17:32:05 +0000 Subject: [PATCH 321/367] ECOC F5GA Telemetry Demo: - Updated scripts to trigger activate/terminate telemetry --- src/tests/ecoc25-f5ga-telemetry/telemetry-delete-slice1.py | 2 +- ...scribe_telemetry_slice1.py => telemetry-subscribe-slice1.py} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename src/tests/ecoc25-f5ga-telemetry/{subscribe_telemetry_slice1.py => telemetry-subscribe-slice1.py} (100%) diff --git a/src/tests/ecoc25-f5ga-telemetry/telemetry-delete-slice1.py b/src/tests/ecoc25-f5ga-telemetry/telemetry-delete-slice1.py index bb32901a5..b2924e1b2 100644 --- a/src/tests/ecoc25-f5ga-telemetry/telemetry-delete-slice1.py +++ b/src/tests/ecoc25-f5ga-telemetry/telemetry-delete-slice1.py @@ -19,7 +19,7 @@ from requests.auth import HTTPBasicAuth RESTCONF_ADDRESS = '127.0.0.1' RESTCONF_PORT = 80 -TELEMETRY_ID = 1109367143430094849 +TELEMETRY_ID = 1109405947767160833 UNSUBSCRIBE_URI = '/restconf/operations/subscriptions:delete-subscription' UNSUBSCRIBE_URL = 'http://{:s}:{:d}{:s}'.format(RESTCONF_ADDRESS, RESTCONF_PORT, UNSUBSCRIBE_URI) diff --git a/src/tests/ecoc25-f5ga-telemetry/subscribe_telemetry_slice1.py b/src/tests/ecoc25-f5ga-telemetry/telemetry-subscribe-slice1.py similarity index 100% rename from src/tests/ecoc25-f5ga-telemetry/subscribe_telemetry_slice1.py rename to src/tests/ecoc25-f5ga-telemetry/telemetry-subscribe-slice1.py -- GitLab From dae5e4f607289b4cc03d719b07eb55e8efb0f5e6 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 23 Sep 2025 17:32:52 +0000 Subject: [PATCH 322/367] Test - Tools - Traffic Changer GUI: - Implemented traffic changer GUI --- src/tests/tools/traffic_changer/Dockerfile | 46 ++++++ src/tests/tools/traffic_changer/app.py | 137 ++++++++++++++++++ src/tests/tools/traffic_changer/build.sh | 22 +++ src/tests/tools/traffic_changer/deploy.sh | 28 ++++ src/tests/tools/traffic_changer/destroy.sh | 23 +++ .../tools/traffic_changer/requirements.in | 20 +++ .../templates/affect_form.html | 57 ++++++++ 7 files changed, 333 insertions(+) create mode 100644 src/tests/tools/traffic_changer/Dockerfile create mode 100644 src/tests/tools/traffic_changer/app.py create mode 100644 src/tests/tools/traffic_changer/build.sh create mode 100644 src/tests/tools/traffic_changer/deploy.sh create mode 100644 src/tests/tools/traffic_changer/destroy.sh create mode 100644 src/tests/tools/traffic_changer/requirements.in create mode 100644 src/tests/tools/traffic_changer/templates/affect_form.html diff --git a/src/tests/tools/traffic_changer/Dockerfile b/src/tests/tools/traffic_changer/Dockerfile new file mode 100644 index 000000000..9b590eb5e --- /dev/null +++ b/src/tests/tools/traffic_changer/Dockerfile @@ -0,0 +1,46 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +FROM python:3.9-slim + +## Install dependencies +#RUN apt-get --yes --quiet --quiet update && \ +# apt-get --yes --quiet --quiet install --no-install-recommends build-essential && \ +# rm -rf /var/lib/apt/lists/* + +# Set Python to show logs as they occur +ENV PYTHONUNBUFFERED=0 + +# Get generic Python packages +RUN python3 -m pip install --upgrade pip +RUN python3 -m pip install --upgrade setuptools wheel +RUN python3 -m pip install --upgrade pip-tools + +# Get specific Python packages +RUN mkdir -p /var/teraflow/ +WORKDIR /var/teraflow/ +COPY src/tests/tools/traffic_changer/requirements.in ./requirements.in +RUN pip-compile --quiet --output-file=requirements.txt requirements.in +RUN python3 -m pip install -r requirements.txt + +# Get component files +COPY src/tests/tools/traffic_changer/templates ./traffic_changer/templates +COPY src/tests/tools/traffic_changer/*.py ./traffic_changer/ + +# Configure Flask for production +ENV FLASK_ENV="production" + +# Start the service +ENTRYPOINT ["gunicorn", "--workers", "1", "--worker-class", "eventlet", "--bind", "0.0.0.0:8080", "traffic_changer.app:app"] diff --git a/src/tests/tools/traffic_changer/app.py b/src/tests/tools/traffic_changer/app.py new file mode 100644 index 000000000..44a180dc1 --- /dev/null +++ b/src/tests/tools/traffic_changer/app.py @@ -0,0 +1,137 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import enum, json, logging, requests, secrets, time +from flask import Flask, render_template, request, flash + + +logging.basicConfig( + level=logging.INFO, + format='[Worker-%(process)d][%(asctime)s] %(levelname)s:%(name)s:%(message)s', +) + +NETWORK_ID = 'admin' + +class Controller(enum.Enum): + TFS_E2E = 'TFS-E2E' + TFS_AGG = 'TFS-AGG' + TFS_IP = 'TFS-IP' + +CONTROLLER_TO_ADDRESS_PORT = { + Controller.TFS_E2E : ('10.254.0.10', 80), + Controller.TFS_AGG : ('10.254.0.11', 80), + Controller.TFS_IP : ('10.254.0.12', 80), +} + +LINK_ID_TO_CONTROLLER = { + 'L1' : Controller.TFS_E2E, + 'L2' : Controller.TFS_E2E, + 'L3' : Controller.TFS_E2E, + 'L4' : Controller.TFS_E2E, + 'L5' : Controller.TFS_IP, + 'L6' : Controller.TFS_IP, + 'L7ab' : Controller.TFS_AGG, + 'L7ba' : Controller.TFS_AGG, + 'L8ab' : Controller.TFS_AGG, + 'L8ba' : Controller.TFS_AGG, + 'L9' : Controller.TFS_IP, + 'L10' : Controller.TFS_IP, + 'L11ab' : Controller.TFS_AGG, + 'L11ba' : Controller.TFS_AGG, + 'L12ab' : Controller.TFS_AGG, + 'L12ba' : Controller.TFS_AGG, + 'L13' : Controller.TFS_AGG, + 'L14' : Controller.TFS_AGG, +} + +TARGET_URL = 'http://{:s}:{:d}/affect_sample_synthesizer' + + +LOGGER = logging.getLogger(__name__) + +def log_request(response): + timestamp = time.strftime('[%Y-%b-%d %H:%M]') + LOGGER.info( + '%s %s %s %s %s', timestamp, request.remote_addr, request.method, + request.full_path, response.status + ) + return response + + +app = Flask(__name__) +app.config['SECRET_KEY'] = secrets.token_hex(64) +app.after_request(log_request) + + +@app.route('/', methods=['GET', 'POST']) +def index(): + if request.method == 'GET': + return render_template('affect_form.html', payload=None, response=None) + + link_id = request.form.get('link_id', '').strip() + bandwidth_factor = request.form.get('bandwidth_factor', '').strip() + latency_factor = request.form.get('latency_factor', '').strip() + + controller = LINK_ID_TO_CONTROLLER.get(link_id) + if controller is None: + MSG = 'link_id({:s}) not allowed. Must be one of {:s}' + allowed_link_ids = set(LINK_ID_TO_CONTROLLER.keys()) + flash(MSG.format(str(link_id), str(allowed_link_ids)), category='error') + return render_template('affect_form.html', payload=None, response=None) + + try: + bandwidth_factor = float(bandwidth_factor) + if bandwidth_factor < 0.01 or bandwidth_factor > 100.0: raise ValueError() + except Exception: + MSG = 'bandwidth_factor({:s}) must be a float in range [0.01..100.0]' + flash(MSG.format(str(bandwidth_factor)), category='error') + return render_template('affect_form.html', payload=None, response=None) + + try: + latency_factor = float(latency_factor) + if latency_factor < 0.01 or latency_factor > 100.0: raise ValueError() + except Exception: + MSG = 'latency_factor({:s}) must be a float in range [0.01..100.0]' + flash(MSG.format(str(latency_factor)), category='error') + return render_template('affect_form.html', payload=None, response=None) + + payload = { + 'network_id' : NETWORK_ID, + 'link_id' : link_id, + 'bandwidth_factor': bandwidth_factor, + 'latency_factor' : latency_factor, + } + + controller_address, controller_port = CONTROLLER_TO_ADDRESS_PORT.get(controller) + target_url = TARGET_URL.format(controller_address, controller_port) + + try: + resp = requests.post(target_url, json=payload, timeout=10) + try: + resp_content = resp.json() + except Exception: + resp_content = resp.text + + response = {'status_code': resp.status_code, 'body': resp_content, 'ok': resp.ok} + except Exception as e: + flash('Error sending request: {:s}'.format(str(e)), category='error') + response = None + + str_payload = json.dumps(payload) + return render_template('affect_form.html', payload=str_payload, response=response) + +if __name__ == '__main__': + app.run(host='0.0.0.0', port=8080, debug=True, use_reloader=False) diff --git a/src/tests/tools/traffic_changer/build.sh b/src/tests/tools/traffic_changer/build.sh new file mode 100644 index 000000000..ba7df2433 --- /dev/null +++ b/src/tests/tools/traffic_changer/build.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Make folder containing the script the root folder for its execution +cd $(dirname $0)/../../../../ + +# Build image for Traffic Changer +docker buildx build -t traffic-changer:test -f ./src/tests/tools/traffic_changer/Dockerfile . +#docker tag traffic-changer:test localhost:32000/tfs/traffic-changer:test +#docker push localhost:32000/tfs/traffic-changer:test diff --git a/src/tests/tools/traffic_changer/deploy.sh b/src/tests/tools/traffic_changer/deploy.sh new file mode 100644 index 000000000..8cc4ba3bd --- /dev/null +++ b/src/tests/tools/traffic_changer/deploy.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Cleanup +docker rm --force traffic-changer || true + +# Create Traffic Changer +docker run --detach --name traffic-changer --publish 8080:8080 traffic-changer:test + +sleep 2 + +# Dump containers +docker ps -a + +echo "Bye!" diff --git a/src/tests/tools/traffic_changer/destroy.sh b/src/tests/tools/traffic_changer/destroy.sh new file mode 100644 index 000000000..2f6ac62ab --- /dev/null +++ b/src/tests/tools/traffic_changer/destroy.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Cleanup +docker rm --force traffic-changer || true + +# Dump containers +docker ps -a + +echo "Bye!" diff --git a/src/tests/tools/traffic_changer/requirements.in b/src/tests/tools/traffic_changer/requirements.in new file mode 100644 index 000000000..c11cba15e --- /dev/null +++ b/src/tests/tools/traffic_changer/requirements.in @@ -0,0 +1,20 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +eventlet==0.39.0 +Flask==2.1.3 +gunicorn==23.0.0 +requests==2.27.1 +werkzeug==2.3.7 diff --git a/src/tests/tools/traffic_changer/templates/affect_form.html b/src/tests/tools/traffic_changer/templates/affect_form.html new file mode 100644 index 000000000..cfdb0296d --- /dev/null +++ b/src/tests/tools/traffic_changer/templates/affect_form.html @@ -0,0 +1,57 @@ + + + + + + Affect Sample Synthesizer + + + +

Affect Sample Synthesizer

+ + {% with messages = get_flashed_messages() %} + {% if messages %} +
+ {% for m in messages %} +
{{ m }}
+ {% endfor %} +
+ {% endif %} + {% endwith %} + +
+ + + + + + + + + + +
+ + {% if payload %} +

Payload

+
{{ payload }}
+ {% endif %} + + {% if response %} +

Response

+
Status: {{ response.status_code }} (ok: {{ response.ok }})
+

Body

+
{{ response.body | tojson(indent=2) }}
+ {% endif %} + + + + -- GitLab From a64f07b85fc14b8a11a365046f827c9fd800a3cf Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 23 Sep 2025 17:33:12 +0000 Subject: [PATCH 323/367] ECOC F5GA Telemetry Demo: - Updated redeploy script to include traffic-changer GUI --- src/tests/ecoc25-f5ga-telemetry/redeploy.sh | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/tests/ecoc25-f5ga-telemetry/redeploy.sh b/src/tests/ecoc25-f5ga-telemetry/redeploy.sh index ba942cb01..4bdf8715d 100755 --- a/src/tests/ecoc25-f5ga-telemetry/redeploy.sh +++ b/src/tests/ecoc25-f5ga-telemetry/redeploy.sh @@ -35,15 +35,21 @@ case "$HOSTNAME" in cd ~/tfs-ctrl/ docker buildx build -t nce-t-ctrl:mock -f ./src/tests/tools/mock_nce_t_ctrl/Dockerfile . + echo "Building Traffic Changer..." + cd ~/tfs-ctrl/ + docker buildx build -t traffic-changer:mock -f ./src/tests/tools/traffic_changer/Dockerfile . + echo "Cleaning up..." docker rm --force simap-server docker rm --force nce-fan-ctrl docker rm --force nce-t-ctrl + docker rm --force traffic-changer echo "Deploying support services..." - docker run --detach --name simap-server --publish 8080:8080 simap-server:mock - docker run --detach --name nce-fan-ctrl --publish 8081:8080 --env SIMAP_ADDRESS=172.17.0.1 --env SIMAP_PORT=8080 nce-fan-ctrl:mock - docker run --detach --name nce-t-ctrl --publish 8082:8080 --env SIMAP_ADDRESS=172.17.0.1 --env SIMAP_PORT=8080 nce-t-ctrl:mock + docker run --detach --name simap-server --publish 8080:8080 simap-server:mock + docker run --detach --name nce-fan-ctrl --publish 8081:8080 --env SIMAP_ADDRESS=172.17.0.1 --env SIMAP_PORT=8080 nce-fan-ctrl:mock + docker run --detach --name nce-t-ctrl --publish 8082:8080 --env SIMAP_ADDRESS=172.17.0.1 --env SIMAP_PORT=8080 nce-t-ctrl:mock + docker run --detach --name traffic-changer --publish 8083:8080 traffic-changer:mock sleep 2 docker ps -a -- GitLab From bcec95f56b3761cffe2ba9e3408d6bb959bbd4a8 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 23 Sep 2025 17:33:43 +0000 Subject: [PATCH 324/367] Test - Tools - Mock NCE-FAN Ctrl: - Minor code syuling --- src/tests/tools/mock_nce_fan_ctrl/Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/src/tests/tools/mock_nce_fan_ctrl/Dockerfile b/src/tests/tools/mock_nce_fan_ctrl/Dockerfile index b0a3da4e3..cf4dfd9ad 100644 --- a/src/tests/tools/mock_nce_fan_ctrl/Dockerfile +++ b/src/tests/tools/mock_nce_fan_ctrl/Dockerfile @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. + FROM python:3.9-slim # Install dependencies -- GitLab From 796efb645a48b647cffe09d992b291f76c6d2f10 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 23 Sep 2025 17:40:28 +0000 Subject: [PATCH 325/367] ECOC F5GA Telemetry Demo: - Separated redeploy into destroy and deploy scripts to prevent race conditions - Cleanup of old scripts --- .../{redeploy.sh => deploy.sh} | 0 src/tests/ecoc25-f5ga-telemetry/destroy.sh | 56 ++++++++++++++ src/tests/ecoc25-f5ga-telemetry/dump-logs.sh | 22 ------ .../ecoc25-f5ga-telemetry/rebuild-tfs.sh | 21 ------ .../subscribe-telemetry-slice1.sh | 73 ------------------- .../subscribe-telemetry-slice2.sh | 28 ------- 6 files changed, 56 insertions(+), 144 deletions(-) rename src/tests/ecoc25-f5ga-telemetry/{redeploy.sh => deploy.sh} (100%) create mode 100755 src/tests/ecoc25-f5ga-telemetry/destroy.sh delete mode 100755 src/tests/ecoc25-f5ga-telemetry/dump-logs.sh delete mode 100755 src/tests/ecoc25-f5ga-telemetry/rebuild-tfs.sh delete mode 100644 src/tests/ecoc25-f5ga-telemetry/subscribe-telemetry-slice1.sh delete mode 100755 src/tests/ecoc25-f5ga-telemetry/subscribe-telemetry-slice2.sh diff --git a/src/tests/ecoc25-f5ga-telemetry/redeploy.sh b/src/tests/ecoc25-f5ga-telemetry/deploy.sh similarity index 100% rename from src/tests/ecoc25-f5ga-telemetry/redeploy.sh rename to src/tests/ecoc25-f5ga-telemetry/deploy.sh diff --git a/src/tests/ecoc25-f5ga-telemetry/destroy.sh b/src/tests/ecoc25-f5ga-telemetry/destroy.sh new file mode 100755 index 000000000..47977562d --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/destroy.sh @@ -0,0 +1,56 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Assuming the instances are named as: simap-server, tfs-e2e-ctrl, tfs-agg-ctrl, tfs-ip-ctrl + +# Get the current hostname +HOSTNAME=$(hostname) +echo "Destroying in ${HOSTNAME}..." + + +case "$HOSTNAME" in + simap-server) + echo "Cleaning up..." + docker rm --force simap-server + docker rm --force nce-fan-ctrl + docker rm --force nce-t-ctrl + docker rm --force traffic-changer + + sleep 2 + docker ps -a + ;; + tfs-e2e-ctrl) + echo "Destroying TFS E2E Controller..." + source ~/tfs-ctrl/src/tests/ecoc25-f5ga-telemetry/deploy-specs-e2e.sh + kubectl delete namespace $TFS_K8S_NAMESPACE + ;; + tfs-agg-ctrl) + echo "Destroying TFS Agg Controller..." + source ~/tfs-ctrl/src/tests/ecoc25-f5ga-telemetry/deploy-specs-agg.sh + kubectl delete namespace $TFS_K8S_NAMESPACE + ;; + tfs-ip-ctrl) + echo "Destroying TFS IP Controller..." + source ~/tfs-ctrl/src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh + kubectl delete namespace $TFS_K8S_NAMESPACE + ;; + *) + echo "Unknown host: $HOSTNAME" + echo "No commands to run." + ;; +esac + +echo "Ready!" diff --git a/src/tests/ecoc25-f5ga-telemetry/dump-logs.sh b/src/tests/ecoc25-f5ga-telemetry/dump-logs.sh deleted file mode 100755 index 0c2f101ae..000000000 --- a/src/tests/ecoc25-f5ga-telemetry/dump-logs.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -./scripts/show_logs_device.sh > device.log -./scripts/show_logs_service.sh > service.log -./scripts/show_logs_slice.sh > slice.log -./scripts/show_logs_pathcomp_frontend.sh > pathcomp.log -./scripts/show_logs_simap_connector.sh > simap.log -./scripts/show_logs_nbi.sh > nbi.log diff --git a/src/tests/ecoc25-f5ga-telemetry/rebuild-tfs.sh b/src/tests/ecoc25-f5ga-telemetry/rebuild-tfs.sh deleted file mode 100755 index f8ad1abce..000000000 --- a/src/tests/ecoc25-f5ga-telemetry/rebuild-tfs.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -export TFS_REGISTRY_IMAGES=${TFS_REGISTRY_IMAGES:-"http://localhost:5000/tfs/"} -export TFS_COMPONENTS=${TFS_COMPONENTS:-"context device pathcomp service slice nbi webui"} -export TFS_IMAGE_TAG=${TFS_IMAGE_TAG:-"f5ga"} - -./deploy/build-only.sh diff --git a/src/tests/ecoc25-f5ga-telemetry/subscribe-telemetry-slice1.sh b/src/tests/ecoc25-f5ga-telemetry/subscribe-telemetry-slice1.sh deleted file mode 100644 index 3fa27ef28..000000000 --- a/src/tests/ecoc25-f5ga-telemetry/subscribe-telemetry-slice1.sh +++ /dev/null @@ -1,73 +0,0 @@ -#!/bin/bash -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# Make folder containing the script the root folder for its execution -cd $(dirname $0) - - -echo "[E2E] Subscribe Telemetry slice1..." -# POST to create subscription and capture response -resp=$(curl -sS --request POST --location --header 'Content-Type: application/json' \ - --data @data/telemetry/subscription-slice1.json \ - http://0.0.0.0:80/restconf/operations/subscriptions:establish-subscription) -echo "$resp" - -# Ensure `jq` is available for JSON parsing -if ! command -v jq >/dev/null 2>&1; then - echo "Error: jq is required but not installed. Install jq and retry." >&2 - exit 1 -fi - -# Extract the subscription URI from the JSON response -# Example response: {"identifier":"4086","uri":"/restconf/data/subscriptions/4086"} -uri=$(echo "$resp" | jq -r '.uri // empty') - -if [ -z "$uri" ]; then - echo "Failed to extract subscription URI from response" >&2 - exit 1 -fi - -# Build full URL (use http for RESTCONF chunked/SSE-style streaming) -full_url="http://0.0.0.0:80${uri}" - -echo "Streaming telemetry from '$full_url' (press Ctrl+C to stop)..." - -# Attempt a long-lived HTTP GET that will dump data as it arrives. -# Many RESTCONF subscription implementations use chunked responses / SSE -# and this curl invocation will keep printing incoming data. -curl -N -sS -H 'Accept: application/yang-data+json' "$full_url" - -# If your server exposes a WebSocket endpoint instead, use a websocket client -# such as `websocat` or `wscat`. Example (requires websocat): -# websocat "ws://0.0.0.0:80${uri}" -# Or using node's wscat: -# npx wscat -c "ws://0.0.0.0:80${uri}" - -# If you need a Python websocket client (requires `websocket-client`): -# python3 - <<'PY' -#from websocket import create_connection -#ws = create_connection('ws://0.0.0.0:80' + '${uri}') -#try: -# while True: -# msg = ws.recv() -# print(msg) -#finally: -# ws.close() -#PY - -echo - -echo "Done!" diff --git a/src/tests/ecoc25-f5ga-telemetry/subscribe-telemetry-slice2.sh b/src/tests/ecoc25-f5ga-telemetry/subscribe-telemetry-slice2.sh deleted file mode 100755 index 1bfa62322..000000000 --- a/src/tests/ecoc25-f5ga-telemetry/subscribe-telemetry-slice2.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# Make folder containing the script the root folder for its execution -cd $(dirname $0) - - -echo "[E2E] Subscribe Telemetry slice2..." -curl --request POST --location --header 'Content-Type: application/json' \ - --data @data/telemetry/subscription-slice2.json \ - http://0.0.0.0:80/restconf/operations/subscriptions:establish-subscription -echo - - -echo "Done!" -- GitLab From 214722a81c225885dfea4e7290fbb0af44b156e7 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 23 Sep 2025 18:01:04 +0000 Subject: [PATCH 326/367] Simap Connector component: - Fixed self-controller identification for proper physical link delegation --- .../service/simap_updater/SimapUpdater.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index afc197268..343b373c7 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -293,7 +293,17 @@ class EventDispatcher(BaseEventDispatcher): te_topo = self._simap_client.network(topology_name) te_topo.update() - allowed_link_names = ALLOWED_LINKS_PER_CONTROLLER.get(topology_name) + topologies = self._object_cache.get_all(CachedEntities.TOPOLOGY, fresh=False) + topology_names = {t.name for t in topologies} + topology_names.discard(DEFAULT_TOPOLOGY_NAME) + if len(topology_names) != 1: + MSG = 'LinkEvent({:s}) skipped, unable to identify self-controller' + str_link_event = grpc_message_to_json_string(link_event) + LOGGER.warning(MSG.format(str_link_event)) + return False + domain_name = topology_names.pop() # trans-pkt/agg/e2e + + allowed_link_names = ALLOWED_LINKS_PER_CONTROLLER.get(domain_name, set()) if link_name not in allowed_link_names: return False src_device = self._object_cache.get(CachedEntities.DEVICE, endpoint_uuids[0][0], force_update =True ) -- GitLab From 784213f08eb0b84914d61cc6d44a8a81f4acc87b Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 23 Sep 2025 18:13:52 +0000 Subject: [PATCH 327/367] NBI component: - Correct path for RPC AffectSampleSynthesizer --- src/nbi/service/sse_telemetry/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/nbi/service/sse_telemetry/__init__.py b/src/nbi/service/sse_telemetry/__init__.py index d9bb07d96..c668cd942 100644 --- a/src/nbi/service/sse_telemetry/__init__.py +++ b/src/nbi/service/sse_telemetry/__init__.py @@ -47,7 +47,7 @@ def register_telemetry_subscription(nbi_app: NbiApplication): ) nbi_app.add_rest_api_resource( AffectSampleSynthesizer, - '/affect_sample_synthesizer', - '/affect_sample_synthesizer/', + '/restconf/operations/affect_sample_synthesizer', + '/restconf/operations/affect_sample_synthesizer/', endpoint='sse.affect_sample_synthesizer', ) -- GitLab From 9dc969b53916a6cc7807385975c3ce4617c2737b Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 23 Sep 2025 18:14:11 +0000 Subject: [PATCH 328/367] Test - Tools - Traffic Changer GUI: - Correct path for RPC AffectSampleSynthesizer --- src/tests/tools/traffic_changer/app.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests/tools/traffic_changer/app.py b/src/tests/tools/traffic_changer/app.py index 44a180dc1..d3086ed22 100644 --- a/src/tests/tools/traffic_changer/app.py +++ b/src/tests/tools/traffic_changer/app.py @@ -57,7 +57,7 @@ LINK_ID_TO_CONTROLLER = { 'L14' : Controller.TFS_AGG, } -TARGET_URL = 'http://{:s}:{:d}/affect_sample_synthesizer' +TARGET_URL = 'http://{:s}:{:d}/restconf/operations/affect_sample_synthesizer' LOGGER = logging.getLogger(__name__) -- GitLab From fa7269f0f45a0b67cea703425eeb128670003be6 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 25 Sep 2025 10:11:20 +0000 Subject: [PATCH 329/367] ECOC F5GA Telemetry Demo: - Add script to configure VPN port forwarding --- .../config-port-forward-vpn.sh | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100755 src/tests/ecoc25-f5ga-telemetry/config-port-forward-vpn.sh diff --git a/src/tests/ecoc25-f5ga-telemetry/config-port-forward-vpn.sh b/src/tests/ecoc25-f5ga-telemetry/config-port-forward-vpn.sh new file mode 100755 index 000000000..68becba20 --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/config-port-forward-vpn.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +# Enable routing/NAT on TFS-E2E +echo "net.ipv4.ip_forward=1" | sudo tee -a /etc/sysctl.d/20-ipv4-forward.conf +sudo sysctl -p + +# DNAT from VPN to VM-B +sudo iptables -t nat -A PREROUTING -i tun0 -p tcp --dport 8881 -j DNAT --to-destination 10.254.0.9:8080 +sudo iptables -t nat -A PREROUTING -i tun0 -p tcp --dport 8882 -j DNAT --to-destination 10.254.0.9:8083 + +# MASQUERADE replies from VM-B back to VPN (generic, not by port) +sudo iptables -t nat -A POSTROUTING -o enp0s3 -s 10.0.58.0/24 -j MASQUERADE +sudo iptables -t nat -A POSTROUTING -o enp0s3 -s 10.1.7.0/24 -j MASQUERADE +sudo iptables -t nat -A POSTROUTING -o enp0s3 -s 192.168.0.0/16 -j MASQUERADE + + +# allow new+established from VPN to VM-B’s 8080/8083 +sudo iptables -A FORWARD -i tun0 -o enp0s3 -p tcp -d 10.254.0.9 --dport 8080 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT +sudo iptables -A FORWARD -i tun0 -o enp0s3 -p tcp -d 10.254.0.9 --dport 8083 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT + +# allow return traffic back from LAN to VPN +sudo iptables -A FORWARD -i enp0s3 -o tun0 -m state --state ESTABLISHED,RELATED -j ACCEPT + +echo "Done!" -- GitLab From 59ccaf202640d605176fc8979820bccf8e0e1d8d Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 25 Sep 2025 15:21:41 +0000 Subject: [PATCH 330/367] Device component - NCE Driver: - Use PUT instead of POST to prevent already exists error --- src/device/service/drivers/nce/handlers/AppFlowHandler.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/device/service/drivers/nce/handlers/AppFlowHandler.py b/src/device/service/drivers/nce/handlers/AppFlowHandler.py index 7e3b75982..251916fb9 100644 --- a/src/device/service/drivers/nce/handlers/AppFlowHandler.py +++ b/src/device/service/drivers/nce/handlers/AppFlowHandler.py @@ -50,7 +50,7 @@ class AppFlowHandler: qos_profile_name = qos_profile['name'] LOGGER.info('Creating QoS Profile: {:s}'.format(str(request))) url = self._url_qos_profile_item.format(qos_profile_name) - self._rest_conf_client.post(url, body=request) + self._rest_conf_client.put(url, body=request) applications = ( data @@ -63,7 +63,7 @@ class AppFlowHandler: application_name = application['name'] LOGGER.info('Creating Application: {:s}'.format(str(request))) url = self._url_application_item.format(application_name) - self._rest_conf_client.post(url, body=request) + self._rest_conf_client.put(url, body=request) app_flows = ( data @@ -75,10 +75,10 @@ class AppFlowHandler: app_flow_name = app_flow['name'] LOGGER.info('Creating App Flow: {:s}'.format(str(request))) url = self._url_app_flow_item.format(app_flow_name) - self._rest_conf_client.post(url, body=request) + self._rest_conf_client.put(url, body=request) except requests.exceptions.ConnectionError as e: - MSG = 'Failed to send POST requests to NCE FAN NBI' + MSG = 'Failed to send PUT requests to NCE FAN NBI' raise Exception(MSG) from e -- GitLab From ab49481a793fb19aabeb9cc3e2f5048b352a44ee Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 25 Sep 2025 15:22:25 +0000 Subject: [PATCH 331/367] NBI component - SSE Telemetry: - Reduce log messages --- .../sse_telemetry/StreamSubscription.py | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/src/nbi/service/sse_telemetry/StreamSubscription.py b/src/nbi/service/sse_telemetry/StreamSubscription.py index a92d5b150..83b319e2d 100644 --- a/src/nbi/service/sse_telemetry/StreamSubscription.py +++ b/src/nbi/service/sse_telemetry/StreamSubscription.py @@ -55,7 +55,7 @@ KAFKA_BOOT_SERVERS = KafkaConfig.get_kafka_address() class StreamSubscription(Resource): # @HTTP_AUTH.login_required def get(self, subscription_id : int): - LOGGER.warning('[get] begin') + LOGGER.debug('[get] begin') #db = Engine.get_engine() #if db is None: @@ -71,56 +71,56 @@ class StreamSubscription(Resource): # raise NotFound(description=msg) def event_stream(): - LOGGER.warning('[stream:event_stream] begin') + LOGGER.debug('[stream:event_stream] begin') topic = 'subscription.{:s}'.format(str(subscription_id)) - LOGGER.warning('[stream:event_stream] Checking Topics...') + LOGGER.info('[stream:event_stream] Checking Topics...') kafka_admin = KafkaAdminClient(bootstrap_servers=KAFKA_BOOT_SERVERS) existing_topics = set(kafka_admin.list_topics()) - LOGGER.warning('[stream:event_stream] existing_topics={:s}'.format(str(existing_topics))) + LOGGER.info('[stream:event_stream] existing_topics={:s}'.format(str(existing_topics))) if topic not in existing_topics: - LOGGER.warning('[stream:event_stream] Creating Topic...') + LOGGER.info('[stream:event_stream] Creating Topic...') to_create = [NewTopic(topic, num_partitions=3, replication_factor=1)] try: kafka_admin.create_topics(to_create, validate_only=False) - LOGGER.warning('[stream:event_stream] Topic Created') + LOGGER.info('[stream:event_stream] Topic Created') except TopicAlreadyExistsError: pass - LOGGER.warning('[stream:event_stream] Connecting Consumer...') + LOGGER.info('[stream:event_stream] Connecting Consumer...') kafka_consumer = KafkaConsumer( bootstrap_servers = KAFKA_BOOT_SERVERS, group_id = None, # consumer dispatch all messages sent to subscribed topics auto_offset_reset = 'latest', ) - LOGGER.warning('[stream:event_stream] Subscribing topic={:s}...'.format(str(topic))) + LOGGER.info('[stream:event_stream] Subscribing topic={:s}...'.format(str(topic))) kafka_consumer.subscribe(topics=[topic]) - LOGGER.warning('[stream:event_stream] Subscribed') + LOGGER.info('[stream:event_stream] Subscribed') while True: - LOGGER.warning('[stream:event_stream] Waiting...') + #LOGGER.debug('[stream:event_stream] Waiting...') topic_records : Dict[TopicPartition, List[ConsumerRecord]] = \ kafka_consumer.poll(timeout_ms=1000, max_records=1) if len(topic_records) == 0: time.sleep(0.5) continue # no pending records - LOGGER.warning('[stream:event_stream] topic_records={:s}'.format(str(topic_records))) + #LOGGER.info('[stream:event_stream] topic_records={:s}'.format(str(topic_records))) for _topic, records in topic_records.items(): if _topic.topic != topic: continue for record in records: - message_key = record.key.decode('utf-8') + #message_key = record.key.decode('utf-8') message_value = record.value.decode('utf-8') - MSG = '[stream:event_stream] message_key={:s} message_value={:s}' - LOGGER.warning(MSG.format(str(message_key), str(message_value))) + #MSG = '[stream:event_stream] message_key={:s} message_value={:s}' + #LOGGER.debug(MSG.format(str(message_key), str(message_value))) yield message_value - LOGGER.warning('[stream:event_stream] sent') + #LOGGER.debug('[stream:event_stream] Sent') LOGGER.info('[stream:event_stream] Closing...') kafka_consumer.close() - LOGGER.warning('[stream] ready to stream...') + LOGGER.info('[stream] Ready to stream...') return Response(event_stream(), mimetype='text/event-stream') #update_counter = 1 -- GitLab From bd7dacc111ee7c8171896e242fc5903f7e22e0cb Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 25 Sep 2025 15:22:54 +0000 Subject: [PATCH 332/367] Simap Connector component: - Reduced log messages --- .../service/telemetry/worker/AggregatorWorker.py | 2 +- .../service/telemetry/worker/CollectorWorker.py | 4 ++-- .../service/telemetry/worker/SynthesizerWorker.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/simap_connector/service/telemetry/worker/AggregatorWorker.py b/src/simap_connector/service/telemetry/worker/AggregatorWorker.py index 653382b7f..075c3b6d6 100644 --- a/src/simap_connector/service/telemetry/worker/AggregatorWorker.py +++ b/src/simap_connector/service/telemetry/worker/AggregatorWorker.py @@ -78,7 +78,7 @@ class AggregatorWorker(_Worker): try: while not self._stop_event.is_set() and not self._terminate.is_set(): - self._logger.info('[run] Aggregating...') + #self._logger.debug('[run] Aggregating...') link_sample = self._aggregation_cache.aggregate() diff --git a/src/simap_connector/service/telemetry/worker/CollectorWorker.py b/src/simap_connector/service/telemetry/worker/CollectorWorker.py index d9721bc85..27b665d05 100644 --- a/src/simap_connector/service/telemetry/worker/CollectorWorker.py +++ b/src/simap_connector/service/telemetry/worker/CollectorWorker.py @@ -89,8 +89,8 @@ class CollectorWorker(_Worker): if line is None: continue if len(line) == 0: continue - MSG = '[underlay_subscription_stream] ==> {:s}' - self._logger.info(MSG.format(str(line))) + #MSG = '[underlay_subscription_stream] ==> {:s}' + #self._logger.debug(MSG.format(str(line))) if not line.startswith('data:'): continue data = json.loads(line[5:]) diff --git a/src/simap_connector/service/telemetry/worker/SynthesizerWorker.py b/src/simap_connector/service/telemetry/worker/SynthesizerWorker.py index 7edc01af7..884a2cff8 100644 --- a/src/simap_connector/service/telemetry/worker/SynthesizerWorker.py +++ b/src/simap_connector/service/telemetry/worker/SynthesizerWorker.py @@ -45,7 +45,7 @@ class SynthesizerWorker(_Worker): try: while not self._stop_event.is_set() and not self._terminate.is_set(): - self._logger.info('[run] Sampling...') + #self._logger.debug('[run] Sampling...') with self._lock: self._resources.generate_samples(self._simap_client) -- GitLab From a495c490841466fd44be779ff8e323531b23ebb4 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 9 Oct 2025 10:10:42 +0000 Subject: [PATCH 333/367] Common - Tools - RestConf - Server: - Extended Callbacks for data and operations - Implemented Operations dispatcher --- .../server/restconf_server/Callbacks.py | 75 +++++++++++++++---- .../server/restconf_server/DispatchData.py | 8 +- .../restconf_server/DispatchOperations.py | 48 ++++++++++++ .../RestConfServerApplication.py | 7 ++ 4 files changed, 119 insertions(+), 19 deletions(-) create mode 100644 src/common/tools/rest_conf/server/restconf_server/DispatchOperations.py diff --git a/src/common/tools/rest_conf/server/restconf_server/Callbacks.py b/src/common/tools/rest_conf/server/restconf_server/Callbacks.py index 16194fd81..e3e4d0f45 100644 --- a/src/common/tools/rest_conf/server/restconf_server/Callbacks.py +++ b/src/common/tools/rest_conf/server/restconf_server/Callbacks.py @@ -38,12 +38,12 @@ class _Callback: ''' return self._path_pattern.fullmatch(path) - def execute( + def execute_data( self, match : re.Match, path : str, old_data : Optional[Dict], new_data : Optional[Dict] ) -> bool: ''' - Execute the callback action for a matched path. + Execute the callback action for a matched data path. This method should be implemented for each specific callback. @param match: `re.Match` object returned by `match()`. @param path: Original request path that was matched. @@ -55,6 +55,21 @@ class _Callback: msg = MSG.format(match.groupdict(), path, old_data, new_data) raise NotImplementedError(msg) + def execute_operation( + self, match : re.Match, path : str, input_data : Optional[Dict] + ) -> Optional[Dict]: + ''' + Execute the callback action for a matched operation path. + This method should be implemented for each specific callback. + @param match: `re.Match` object returned by `match()`. + @param path: Original request path that was matched. + @param input_data: Input data, if applicable, otherwise `None` + @returns Optional[Dict] containing output data, defaults to None + ''' + MSG = 'match={:s}, path={:s}, input_data={:s}' + msg = MSG.format(match.groupdict(), path, input_data) + raise NotImplementedError(msg) + class CallbackDispatcher: def __init__(self): @@ -63,16 +78,32 @@ class CallbackDispatcher: def register(self, callback : _Callback) -> None: self._callbacks.append(callback) - def dispatch( + def dispatch_data( self, path : str, old_data : Optional[Dict] = None, new_data : Optional[Dict] = None ) -> None: - LOGGER.warning('Checking Callbacks for path={:s}'.format(str(path))) + LOGGER.warning('[dispatch_data] Checking Callbacks for path={:s}'.format(str(path))) for callback in self._callbacks: match = callback.match(path) if match is None: continue - keep_running_callbacks = callback.execute(match, path, old_data, new_data) + keep_running_callbacks = callback.execute_data(match, path, old_data, new_data) if not keep_running_callbacks: break + def dispatch_operation( + self, path : str, input_data : Optional[Dict] = None + ) -> Optional[Dict]: + LOGGER.warning('[dispatch_operation] Checking Callbacks for path={:s}'.format(str(path))) + + # First matching callback is executed, and its output returned. + for callback in self._callbacks: + match = callback.match(path) + if match is None: continue + output_data = callback.execute_operation(match, path, input_data) + return output_data + + # If no callback found, raise NotImplemented exception + MSG = 'Callback for operation ({:s}) not defined' + raise NotImplementedError(MSG.format(str(path))) + # ===== EXAMPLE ========================================================================================== @@ -82,7 +113,7 @@ class CallbackOnNetwork(_Callback): pattern += r'/ietf-network:networks/network=(?P[^/]+)' super().__init__(pattern) - def execute( + def execute_data( self, match : re.Match, path : str, old_data : Optional[Dict], new_data : Optional[Dict] ) -> bool: @@ -96,7 +127,7 @@ class CallbackOnNode(_Callback): pattern += r'/node=(?P[^/]+)' super().__init__(pattern) - def execute( + def execute_data( self, match : re.Match, path : str, old_data : Optional[Dict], new_data : Optional[Dict] ) -> bool: @@ -110,25 +141,39 @@ class CallbackOnLink(_Callback): pattern += r'/ietf-network-topology:link=(?P[^/]+)' super().__init__(pattern) - def execute( + def execute_data( self, match : re.Match, path : str, old_data : Optional[Dict], new_data : Optional[Dict] ) -> bool: print('[on_link]', match.groupdict(), path, old_data, new_data) return False +class CallbackShutdown(_Callback): + def __init__(self) -> None: + pattern = r'/restconf/operations' + pattern += r'/shutdown' + super().__init__(pattern) + + def execute_operation( + self, match : re.Match, path : str, input_data : Optional[Dict] + ) -> bool: + print('[shutdown]', match.groupdict(), path, input_data) + return {'state': 'processing'} + def main() -> None: callbacks = CallbackDispatcher() callbacks.register(CallbackOnNetwork()) callbacks.register(CallbackOnNode()) callbacks.register(CallbackOnLink()) - - callbacks.dispatch('/restconf/data/ietf-network:networks/network=admin') - callbacks.dispatch('/restconf/data/ietf-network:networks/network=admin/node=P-PE2') - callbacks.dispatch('/restconf/data/ietf-network:networks/network=admin/ietf-network-topology:link=L6') - callbacks.dispatch('/restconf/data/ietf-network:networks/network=admin/') - callbacks.dispatch('/restconf/data/ietf-network:networks/network=admin/node=P-PE1/') - callbacks.dispatch('/restconf/data/ietf-network:networks/network=admin/ietf-network-topology:link=L4/') + callbacks.register(CallbackShutdown()) + + callbacks.dispatch_data('/restconf/data/ietf-network:networks/network=admin') + callbacks.dispatch_data('/restconf/data/ietf-network:networks/network=admin/node=P-PE2') + callbacks.dispatch_data('/restconf/data/ietf-network:networks/network=admin/ietf-network-topology:link=L6') + callbacks.dispatch_data('/restconf/data/ietf-network:networks/network=admin/') + callbacks.dispatch_data('/restconf/data/ietf-network:networks/network=admin/node=P-PE1/') + callbacks.dispatch_data('/restconf/data/ietf-network:networks/network=admin/ietf-network-topology:link=L4/') + callbacks.dispatch_operation('/restconf/operations/shutdown/') if __name__ == '__main__': main() diff --git a/src/common/tools/rest_conf/server/restconf_server/DispatchData.py b/src/common/tools/rest_conf/server/restconf_server/DispatchData.py index f0e811534..89cb8206e 100644 --- a/src/common/tools/rest_conf/server/restconf_server/DispatchData.py +++ b/src/common/tools/rest_conf/server/restconf_server/DispatchData.py @@ -70,7 +70,7 @@ class RestConfDispatchData(Resource): LOGGER.info('[POST] {:s} {:s} => {:s}'.format(subpath, str(payload), str(json_data))) - self._callback_dispatcher.dispatch( + self._callback_dispatcher.dispatch_data( '/restconf/data/' + subpath, old_data=None, new_data=json_data ) @@ -102,7 +102,7 @@ class RestConfDispatchData(Resource): diff_data = deepdiff.DeepDiff(old_data, new_data) updated = len(diff_data) > 0 - self._callback_dispatcher.dispatch( + self._callback_dispatcher.dispatch_data( '/restconf/data/' + subpath, old_data=old_data, new_data=new_data ) @@ -140,7 +140,7 @@ class RestConfDispatchData(Resource): #diff_data = deepdiff.DeepDiff(old_data, new_data) #updated = len(diff_data) > 0 - self._callback_dispatcher.dispatch( + self._callback_dispatcher.dispatch_data( '/restconf/data/' + subpath, old_data=old_data, new_data=new_data ) @@ -170,7 +170,7 @@ class RestConfDispatchData(Resource): description='Path({:s}) not found'.format(str(subpath)) ) - self._callback_dispatcher.dispatch( + self._callback_dispatcher.dispatch_data( '/restconf/data/' + subpath, old_data=old_data, new_data=None ) diff --git a/src/common/tools/rest_conf/server/restconf_server/DispatchOperations.py b/src/common/tools/rest_conf/server/restconf_server/DispatchOperations.py new file mode 100644 index 000000000..7e5bdd13a --- /dev/null +++ b/src/common/tools/rest_conf/server/restconf_server/DispatchOperations.py @@ -0,0 +1,48 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging +from flask import Response, abort, jsonify, request +from flask_restful import Resource +from .Callbacks import CallbackDispatcher +from .HttpStatusCodesEnum import HttpStatusCodesEnum +from .YangHandler import YangHandler + +LOGGER = logging.getLogger(__name__) + +class RestConfDispatchOperations(Resource): + def __init__( + self, yang_handler : YangHandler, callback_dispatcher : CallbackDispatcher + ) -> None: + super().__init__() + self._yang_handler = yang_handler + self._callback_dispatcher = callback_dispatcher + + def post(self, subpath : str) -> Response: + try: + payload = request.get_json(force=True) + except Exception: + LOGGER.exception('Invalid JSON') + abort(HttpStatusCodesEnum.CLI_ERR_BAD_REQUEST.value, desctiption='Invalid JSON') + + output_data = self._callback_dispatcher.dispatch_operation( + '/restconf/operations/' + subpath, input_data=payload + ) + + LOGGER.info('[POST] {:s} {:s} => {:s}'.format(subpath, str(payload), str(output_data))) + + response = jsonify(output_data) + response.status_code = HttpStatusCodesEnum.SUCCESS_OK.value + return response diff --git a/src/common/tools/rest_conf/server/restconf_server/RestConfServerApplication.py b/src/common/tools/rest_conf/server/restconf_server/RestConfServerApplication.py index 677277fe3..58384299c 100644 --- a/src/common/tools/rest_conf/server/restconf_server/RestConfServerApplication.py +++ b/src/common/tools/rest_conf/server/restconf_server/RestConfServerApplication.py @@ -20,6 +20,7 @@ from flask_restful import Api, Resource from .Callbacks import CallbackDispatcher from .Config import RESTCONF_PREFIX, SECRET_KEY, STARTUP_FILE, YANG_SEARCH_PATH from .DispatchData import RestConfDispatchData +from .DispatchOperations import RestConfDispatchOperations from .HostMeta import HostMeta from .YangHandler import YangHandler from .YangModelDiscoverer import YangModuleDiscoverer @@ -82,6 +83,12 @@ class RestConfServerApplication: RESTCONF_PREFIX + '/data//', resource_class_args=(self._yang_handler, self._callback_dispatcher) ) + self._api.add_resource( + RestConfDispatchOperations, + RESTCONF_PREFIX + '/operations/', + RESTCONF_PREFIX + '/operations//', + resource_class_args=(self._yang_handler, self._callback_dispatcher) + ) def register_custom( self, resource_class : Type[Resource], -- GitLab From 37ef28662f8cd1f0c6dcf6af29d26cc1bd0ba0f1 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 9 Oct 2025 10:22:24 +0000 Subject: [PATCH 334/367] Code cleanup --- deploy/all.sh | 6 +- manifests/nbiservice.yaml | 2 +- manifests/pathcompservice.yaml | 2 +- manifests/serviceservice.yaml | 2 +- manifests/simap_connectorservice.yaml | 2 +- manifests/sliceservice.yaml | 2 +- manifests/webuiservice.yaml | 96 +++++++++++++-------------- 7 files changed, 56 insertions(+), 56 deletions(-) diff --git a/deploy/all.sh b/deploy/all.sh index f02f7bbb0..a284287bc 100755 --- a/deploy/all.sh +++ b/deploy/all.sh @@ -230,16 +230,16 @@ export GRAF_EXT_PORT_HTTP=${GRAF_EXT_PORT_HTTP:-"3000"} ./deploy/nats.sh # Deploy QuestDB -#./deploy/qdb.sh +./deploy/qdb.sh # Deploy Apache Kafka ./deploy/kafka.sh #Deploy Monitoring (Prometheus, Mimir, Grafana) -#./deploy/monitoring.sh +./deploy/monitoring.sh # Expose Dashboard -#./deploy/expose_dashboard.sh +./deploy/expose_dashboard.sh # Deploy TeraFlowSDN ./deploy/tfs.sh diff --git a/manifests/nbiservice.yaml b/manifests/nbiservice.yaml index ec6db58b7..cac267495 100644 --- a/manifests/nbiservice.yaml +++ b/manifests/nbiservice.yaml @@ -39,7 +39,7 @@ spec: #- containerPort: 9192 env: - name: LOG_LEVEL - value: "DEBUG" + value: "INFO" - name: FLASK_ENV value: "production" # normal value is "production", change to "development" if developing - name: IETF_NETWORK_RENDERER diff --git a/manifests/pathcompservice.yaml b/manifests/pathcompservice.yaml index 71c7e4cd7..2db0d41b0 100644 --- a/manifests/pathcompservice.yaml +++ b/manifests/pathcompservice.yaml @@ -36,7 +36,7 @@ spec: - containerPort: 9192 env: - name: LOG_LEVEL - value: "DEBUG" + value: "INFO" - name: ENABLE_FORECASTER value: "NO" readinessProbe: diff --git a/manifests/serviceservice.yaml b/manifests/serviceservice.yaml index 8615e8879..8262550ef 100644 --- a/manifests/serviceservice.yaml +++ b/manifests/serviceservice.yaml @@ -36,7 +36,7 @@ spec: - containerPort: 9192 env: - name: LOG_LEVEL - value: "DEBUG" + value: "INFO" readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:3030"] diff --git a/manifests/simap_connectorservice.yaml b/manifests/simap_connectorservice.yaml index 90d55409c..a061e1f7f 100644 --- a/manifests/simap_connectorservice.yaml +++ b/manifests/simap_connectorservice.yaml @@ -36,7 +36,7 @@ spec: - containerPort: 9192 env: - name: LOG_LEVEL - value: "DEBUG" + value: "INFO" - name: SIMAP_SERVER_SCHEME value: "http" - name: SIMAP_SERVER_ADDRESS diff --git a/manifests/sliceservice.yaml b/manifests/sliceservice.yaml index a05798f0a..1df4797b7 100644 --- a/manifests/sliceservice.yaml +++ b/manifests/sliceservice.yaml @@ -36,7 +36,7 @@ spec: - containerPort: 9192 env: - name: LOG_LEVEL - value: "DEBUG" + value: "INFO" - name: SLICE_GROUPING value: "DISABLE" envFrom: diff --git a/manifests/webuiservice.yaml b/manifests/webuiservice.yaml index 3e1c13422..a241b31eb 100644 --- a/manifests/webuiservice.yaml +++ b/manifests/webuiservice.yaml @@ -72,51 +72,51 @@ spec: limits: cpu: 1000m memory: 1024Mi - # - name: grafana - # image: grafana/grafana:8.5.22 - # imagePullPolicy: IfNotPresent - # ports: - # - containerPort: 3000 - # name: http-grafana - # protocol: TCP - # env: - # - name: GF_SERVER_ROOT_URL - # value: "http://0.0.0.0:3000/grafana/" - # - name: GF_SERVER_SERVE_FROM_SUB_PATH - # value: "true" - # readinessProbe: - # failureThreshold: 60 - # httpGet: - # #path: /robots.txt - # path: /login - # port: 3000 - # scheme: HTTP - # initialDelaySeconds: 1 - # periodSeconds: 1 - # successThreshold: 1 - # timeoutSeconds: 2 - # livenessProbe: - # failureThreshold: 60 - # initialDelaySeconds: 1 - # periodSeconds: 1 - # successThreshold: 1 - # tcpSocket: - # port: 3000 - # timeoutSeconds: 1 - # resources: - # requests: - # cpu: 250m - # memory: 512Mi - # limits: - # cpu: 500m - # memory: 1024Mi - # volumeMounts: - # - mountPath: /var/lib/grafana - # name: grafana-pv - #volumes: - # - name: grafana-pv - # persistentVolumeClaim: - # claimName: grafana-pvc + - name: grafana + image: grafana/grafana:8.5.22 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 3000 + name: http-grafana + protocol: TCP + env: + - name: GF_SERVER_ROOT_URL + value: "http://0.0.0.0:3000/grafana/" + - name: GF_SERVER_SERVE_FROM_SUB_PATH + value: "true" + readinessProbe: + failureThreshold: 60 + httpGet: + #path: /robots.txt + path: /login + port: 3000 + scheme: HTTP + initialDelaySeconds: 1 + periodSeconds: 1 + successThreshold: 1 + timeoutSeconds: 2 + livenessProbe: + failureThreshold: 60 + initialDelaySeconds: 1 + periodSeconds: 1 + successThreshold: 1 + tcpSocket: + port: 3000 + timeoutSeconds: 1 + resources: + requests: + cpu: 250m + memory: 512Mi + limits: + cpu: 500m + memory: 1024Mi + volumeMounts: + - mountPath: /var/lib/grafana + name: grafana-pv + volumes: + - name: grafana-pv + persistentVolumeClaim: + claimName: grafana-pvc --- apiVersion: v1 kind: Service @@ -132,9 +132,9 @@ spec: - name: webui port: 8004 targetPort: 8004 - #- name: grafana - # port: 3000 - # targetPort: 3000 + - name: grafana + port: 3000 + targetPort: 3000 --- apiVersion: autoscaling/v2 kind: HorizontalPodAutoscaler -- GitLab From 5f6777d970e47cae2d267bd71429ce8cb2fd496c Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 9 Oct 2025 10:50:07 +0000 Subject: [PATCH 335/367] NBI component: - Removing unneeded database code - Remove unneeded data files - Remove unneeded requirements - Remove unneeded config settings --- manifests/nbiservice.yaml | 12 - src/nbi/requirements.in | 4 - src/nbi/service/NbiApplication.py | 18 - src/nbi/service/database/Engine.py | 67 - src/nbi/service/database/__init__.py | 14 - src/nbi/service/database/base.py | 25 - .../sse_telemetry/DeleteSubscription.py | 95 +- .../sse_telemetry/EstablishSubscription.py | 164 +- .../Full-Te-Topology-simap1.json | 3485 ----------------- .../Full-Te-Topology-simap2.json | 3485 ----------------- .../sse_telemetry/StreamSubscription.py | 74 +- .../sse_telemetry/database/Subscription.py | 143 - .../sse_telemetry/database/__init__.py | 14 - .../database/models/Subscription.py | 43 - .../sse_telemetry/database/models/__init__.py | 13 - src/nbi/service/sse_telemetry/topology.py | 212 - 16 files changed, 34 insertions(+), 7834 deletions(-) delete mode 100644 src/nbi/service/database/Engine.py delete mode 100644 src/nbi/service/database/__init__.py delete mode 100644 src/nbi/service/database/base.py delete mode 100644 src/nbi/service/sse_telemetry/Full-Te-Topology-simap1.json delete mode 100644 src/nbi/service/sse_telemetry/Full-Te-Topology-simap2.json delete mode 100644 src/nbi/service/sse_telemetry/database/Subscription.py delete mode 100644 src/nbi/service/sse_telemetry/database/__init__.py delete mode 100644 src/nbi/service/sse_telemetry/database/models/Subscription.py delete mode 100644 src/nbi/service/sse_telemetry/database/models/__init__.py delete mode 100644 src/nbi/service/sse_telemetry/topology.py diff --git a/manifests/nbiservice.yaml b/manifests/nbiservice.yaml index cac267495..27026cc0f 100644 --- a/manifests/nbiservice.yaml +++ b/manifests/nbiservice.yaml @@ -44,18 +44,6 @@ spec: value: "production" # normal value is "production", change to "development" if developing - name: IETF_NETWORK_RENDERER value: "LIBYANG" - - name: NBI_DATABASE - value: "tfs_nbi" - - name: CRDB_NAMESPACE - value: "crdb" - - name: CRDB_SQL_PORT - value: "26257" - - name: CRDB_USERNAME - value: "tfs" - - name: CRDB_PASSWORD - value: "tfs123" - - name: CRDB_SSLMODE - value: "require" envFrom: - secretRef: name: kfk-kpi-data diff --git a/src/nbi/requirements.in b/src/nbi/requirements.in index 72ca62b1e..6c176e3f0 100644 --- a/src/nbi/requirements.in +++ b/src/nbi/requirements.in @@ -35,7 +35,3 @@ requests==2.27.* werkzeug==2.3.7 #websockets==12.0 websocket-client==1.8.0 # used by socketio to upgrate to websocket -psycopg2-binary==2.9.* -SQLAlchemy==1.4.* -sqlalchemy-cockroachdb==1.4.* -SQLAlchemy-Utils==0.38.* diff --git a/src/nbi/service/NbiApplication.py b/src/nbi/service/NbiApplication.py index 8d9e7a879..ad02c754c 100644 --- a/src/nbi/service/NbiApplication.py +++ b/src/nbi/service/NbiApplication.py @@ -20,8 +20,6 @@ from flask_restful import Api, Resource from flask_socketio import Namespace, SocketIO from common.tools.kafka.Variables import KafkaConfig, KafkaTopic from nbi.Config import SECRET_KEY -from nbi.service.database.base import rebuild_database -from .database.Engine import Engine LOGGER = logging.getLogger(__name__) @@ -56,22 +54,6 @@ class NbiApplication: logger=True, engineio_logger=True ) - # Initialize the SQLAlchemy database engine - LOGGER.info('Getting SQLAlchemy DB Engine...') - self._db_engine = Engine.get_engine() - if self._db_engine is None: - LOGGER.error('Unable to get SQLAlchemy DB Engine. Exiting...') - raise Exception('Unable to get SQLAlchemy DB Engine') - - # Try creating the database or log any issues - try: - Engine.create_database(self._db_engine) - except Exception as e: # More specific exception handling - LOGGER.exception(f'Failed to check/create the database: {self._db_engine.url}. Error: {str(e)}') - raise e - - rebuild_database(self._db_engine) - def add_rest_api_resource(self, resource_class : Resource, *urls, **kwargs) -> None: self._api.add_resource(resource_class, *urls, **kwargs) diff --git a/src/nbi/service/database/Engine.py b/src/nbi/service/database/Engine.py deleted file mode 100644 index dd6916aed..000000000 --- a/src/nbi/service/database/Engine.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import logging, sqlalchemy, sqlalchemy_utils -from typing import Optional -from common.Settings import get_setting - -LOGGER = logging.getLogger(__name__) - -APP_NAME = 'tfs' -ECHO = False # true: dump SQL commands and transactions executed -CRDB_URI_TEMPLATE = ( - 'cockroachdb://{:s}:{:s}@cockroachdb-public.{:s}.svc.cluster.local:{:s}/{:s}?sslmode={:s}' -) - - -class Engine: - @staticmethod - def get_engine() -> Optional[sqlalchemy.engine.Engine]: - crdb_uri = get_setting('CRDB_URI', default=None) - if crdb_uri is None: - CRDB_NAMESPACE = get_setting('CRDB_NAMESPACE') - CRDB_SQL_PORT = get_setting('CRDB_SQL_PORT') - CRDB_DATABASE = get_setting('NBI_DATABASE') - CRDB_USERNAME = get_setting('CRDB_USERNAME') - CRDB_PASSWORD = get_setting('CRDB_PASSWORD') - CRDB_SSLMODE = get_setting('CRDB_SSLMODE') - crdb_uri = CRDB_URI_TEMPLATE.format( - CRDB_USERNAME, - CRDB_PASSWORD, - CRDB_NAMESPACE, - CRDB_SQL_PORT, - CRDB_DATABASE, - CRDB_SSLMODE, - ) - - try: - engine = sqlalchemy.create_engine( - crdb_uri, connect_args={'application_name': APP_NAME}, echo=ECHO, future=True - ) - except: # pylint: disable=bare-except # pragma: no cover - LOGGER.exception('Failed to connect to database: {:s}'.format(str(crdb_uri))) - return None - - return engine - - @staticmethod - def create_database(engine: sqlalchemy.engine.Engine) -> None: - if not sqlalchemy_utils.database_exists(engine.url): - sqlalchemy_utils.create_database(engine.url) - - @staticmethod - def drop_database(engine: sqlalchemy.engine.Engine) -> None: - if sqlalchemy_utils.database_exists(engine.url): - sqlalchemy_utils.drop_database(engine.url) diff --git a/src/nbi/service/database/__init__.py b/src/nbi/service/database/__init__.py deleted file mode 100644 index 3ccc21c7d..000000000 --- a/src/nbi/service/database/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - diff --git a/src/nbi/service/database/base.py b/src/nbi/service/database/base.py deleted file mode 100644 index 3cacca994..000000000 --- a/src/nbi/service/database/base.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import sqlalchemy -from sqlalchemy.orm import declarative_base - -_Base = declarative_base() - - -def rebuild_database(db_engine: sqlalchemy.engine.Engine, drop_if_exists: bool = False): - if drop_if_exists: - _Base.metadata.drop_all(db_engine) - _Base.metadata.create_all(db_engine) diff --git a/src/nbi/service/sse_telemetry/DeleteSubscription.py b/src/nbi/service/sse_telemetry/DeleteSubscription.py index 2c1f7be9c..dd60a4589 100644 --- a/src/nbi/service/sse_telemetry/DeleteSubscription.py +++ b/src/nbi/service/sse_telemetry/DeleteSubscription.py @@ -14,118 +14,41 @@ import logging -#from typing import Optional from flask import jsonify, request from flask_restful import Resource -from werkzeug.exceptions import BadRequest, UnsupportedMediaType #, NotFound, InternalServerError +from werkzeug.exceptions import BadRequest, UnsupportedMediaType from common.proto.simap_connector_pb2 import SubscriptionId +from nbi.service._tools.Authentication import HTTP_AUTH from simap_connector.client.SimapConnectorClient import SimapConnectorClient -#from common.proto.monitoring_pb2 import ( -# SSEMonitoringSubscriptionConfig, -# SSEMonitoringSubscriptionResponse, -#) -#from device.client.DeviceClient import DeviceClient -#from context.client.ContextClient import ContextClient -#from nbi.service._tools.Authentication import HTTP_AUTH -#from nbi.service.database.Engine import Engine -#from nbi.service.sse_telemetry.database.Subscription import ( -# get_main_subscription, -# get_sub_subscription, -# delete_subscription, -#) -#from nbi.service.sse_telemetry.topology import ( -# Controllers, -# UnsubscribedNotificationsSchema, -# get_controller_name, -#) LOGGER = logging.getLogger(__name__) class DeleteSubscription(Resource): - # @HTTP_AUTH.login_required + @HTTP_AUTH.login_required def post(self): -# db = Engine.get_engine() -# if db is None: -# LOGGER.error('Database engine is not initialized') -# raise InternalServerError('Database engine is not initialized') - if not request.is_json: -# LOGGER.error('JSON payload is required') raise UnsupportedMediaType('JSON payload is required') request_data = request.json LOGGER.debug('[post] Unsubscription request: {:s}'.format(str(request_data))) -# if request_data is None: -# LOGGER.error('JSON payload is required') -# raise UnsupportedMediaType('JSON payload is required') if 'ietf-subscribed-notifications:input' not in request_data: - raise BadRequest('Missing field(ietf-subscribed-notifications:input)') + path = 'ietf-subscribed-notifications:input' + MSG = 'Missing field({:s})'.format(str(path)) + raise BadRequest(MSG) input_data = request_data['ietf-subscribed-notifications:input'] subscription_id = SubscriptionId() if 'id' not in input_data: - raise BadRequest('Missing field(ietf-subscribed-notifications:input/id)') + path = 'ietf-subscribed-notifications:input/id' + MSG = 'Missing field({:s})'.format(str(path)) + raise BadRequest(MSG) subscription_id.subscription_id = input_data['id'] simap_connector_client = SimapConnectorClient() simap_connector_client.DeleteSubscription(subscription_id) -# main_subscription_id = request_data['delete-subscription']['identifier'] -# LOGGER.debug( -# 'Received delete subscription request for ID: {:s}'.format(main_subscription_id) -# ) -# -# # Get the main subscription -# main_subscription = get_main_subscription(db, main_subscription_id) -# if main_subscription is None: -# LOGGER.error('Subscription not found: {:s}'.format(main_subscription_id)) -# raise NotFound('Subscription not found') -# -# # Get all sub-subscriptions associated with this main subscription -# sub_subscriptions = get_sub_subscription(db, main_subscription_id) -# -# device_client = DeviceClient() -# context_client = ContextClient() -# -# # Unsubscribe from each sub-subscription -# for sub_sub in sub_subscriptions: -# # Create unsubscribe request -# SERVICE_ID = '' -# device_controller = get_controller_name(sub_sub['xpath'], SERVICE_ID, context_client) -# if device_controller == Controllers.CONTROLLERLESS: -# LOGGER.warning( -# 'Controllerless device detected, skipping subscription for: {:s}'.format( -# sub_sub['xpath'] -# ) -# ) -# continue -# unsub_req = SSEMonitoringSubscriptionConfig() -# unsub_req.device_id.device_uuid.uuid = device_controller.value -# unsub_req.config_type = SSEMonitoringSubscriptionConfig.Unsubscribe -# unsub_req.uri = sub_sub['xpath'] -# unsub_req.identifier = sub_sub['identifier'] -# -# # Send unsubscribe request to device -# device_client.SSETelemetrySubscribe(unsub_req) -# -# delete_subscription(db, sub_sub['identifier'], False) -# -# LOGGER.info('Unsubscribed from {:s} successfully'.format(sub_sub.get('uri', ''))) -# -# # Delete the main subscription from database -# delete_subscription(db, main_subscription_id, True) -# -# LOGGER.info('Successfully deleted main subscription: {:s}'.format(main_subscription_id)) -# -# #if SERVICE_ID == 'simap1': -# # SERVICE_ID = 'simap2' -# #elif SERVICE_ID == 'simap2': -# # SERVICE_ID = 'simap1' -# #else: -# # LOGGER.warning('Unknown service ID, not switching: {:s}'.format(SERVICE_ID)) -# return jsonify({}) diff --git a/src/nbi/service/sse_telemetry/EstablishSubscription.py b/src/nbi/service/sse_telemetry/EstablishSubscription.py index 46882b5bb..2f10fa43e 100644 --- a/src/nbi/service/sse_telemetry/EstablishSubscription.py +++ b/src/nbi/service/sse_telemetry/EstablishSubscription.py @@ -13,52 +13,19 @@ # limitations under the License. -import logging #, json -#from random import choice -#from typing import Dict, List, Optional, Set -#from uuid import uuid4 -#from typing_extensions import TypedDict +import logging from flask import jsonify, request, url_for from flask_restful import Resource -from werkzeug.exceptions import BadRequest, UnsupportedMediaType #, NotFound, InternalServerError -from common.proto.simap_connector_pb2 import Subscription #, SubscriptionId +from werkzeug.exceptions import BadRequest, UnsupportedMediaType +from common.proto.simap_connector_pb2 import Subscription +from nbi.service._tools.Authentication import HTTP_AUTH from simap_connector.client.SimapConnectorClient import SimapConnectorClient -#from common.proto.monitoring_pb2 import SSEMonitoringSubscriptionConfig -#from common.tools.context_queries.Device import get_device -#from common.tools.grpc.Tools import grpc_message_to_json_string -#from common.proto.monitoring_pb2 import ( -# SSEMonitoringSubscriptionConfig, -# SSEMonitoringSubscriptionResponse, -#) -#from common.tools.rest_conf.client.RestConfClient import RestConfClient -#from context.client.ContextClient import ContextClient -#from device.client.DeviceClient import DeviceClient -#from nbi.service._tools.Authentication import HTTP_AUTH -#from nbi.service.database.Engine import Engine -#from nbi.service.sse_telemetry.database.Subscription import ( -# SSESubsciprionDict, -# list_identifiers, -# set_subscription, -#) -#from .topology import ( -# Controllers, -# SubscribedNotificationsSchema, -# decompose_subscription, -# get_controller_name, -#) - - - -#class SubscriptionId(TypedDict): -# identifier: str -# uri: str - LOGGER = logging.getLogger(__name__) class EstablishSubscription(Resource): - # @HTTP_AUTH.login_required + @HTTP_AUTH.login_required def post(self): if not request.is_json: raise UnsupportedMediaType('JSON payload is required') @@ -67,25 +34,35 @@ class EstablishSubscription(Resource): LOGGER.debug('[post] Subscription request: {:s}'.format(str(request_data))) if 'ietf-subscribed-notifications:input' not in request_data: - raise BadRequest('Missing field(ietf-subscribed-notifications:input)') + path = 'ietf-subscribed-notifications:input' + MSG = 'Missing field({:s})'.format(str(path)) + raise BadRequest(MSG) input_data = request_data['ietf-subscribed-notifications:input'] subscription = Subscription() if 'datastore' not in input_data: - raise BadRequest('Missing field(ietf-subscribed-notifications:input/datastore)') + path = 'ietf-subscribed-notifications:input/datastore' + MSG = 'Missing field({:s})'.format(str(path)) + raise BadRequest(MSG) subscription.datastore = input_data['datastore'] if 'ietf-yang-push:datastore-xpath-filter' not in input_data: - raise BadRequest('Missing field(ietf-subscribed-notifications:input/ietf-yang-push:datastore-xpath-filter)') + path = 'ietf-subscribed-notifications:input/ietf-yang-push:datastore-xpath-filter' + MSG = 'Missing field({:s})'.format(str(path)) + raise BadRequest(MSG) subscription.xpath_filter = input_data['ietf-yang-push:datastore-xpath-filter'] if 'ietf-yang-push:periodic' not in input_data: - raise BadRequest('Missing field(ietf-subscribed-notifications:input/ietf-yang-push:periodic)') + path = 'ietf-subscribed-notifications:input/ietf-yang-push:periodic' + MSG = 'Missing field({:s})'.format(str(path)) + raise BadRequest(MSG) periodic = input_data['ietf-yang-push:periodic'] if 'ietf-yang-push:period' not in periodic: - raise BadRequest('Missing field(ietf-subscribed-notifications:input/ietf-yang-push:periodic/ietf-yang-push:period)') + path = 'ietf-subscribed-notifications:input/ietf-yang-push:periodic/ietf-yang-push:period' + MSG = 'Missing field({:s})'.format(str(path)) + raise BadRequest(MSG) subscription.period = float(periodic['ietf-yang-push:period']) simap_connector_client = SimapConnectorClient() @@ -95,104 +72,3 @@ class EstablishSubscription(Resource): subscription_uri = url_for('sse.stream', subscription_id=subscription_id) sub_id = {'id': subscription_id, 'uri': subscription_uri} return jsonify(sub_id) - - -# db = Engine.get_engine() -# if db is None: -# LOGGER.error('Database engine is not initialized') -# raise InternalServerError('Database engine is not initialized') -# rest_conf_client = RestConfClient( -# '10.254.0.9', port=8080, scheme='http', username='admin', password='admin', -# logger=logging.getLogger('RestConfClient') -# ) -# -# # break the request into its abstract components for telemetry subscription -# list_db_ids = list_identifiers(db) -# request_identifier = str( -# choice([x for x in range(1000, 10000) if x not in list_db_ids]) -# ) -# sub_subs = decompose_subscription(rest_conf_client, request_data) -# -# # subscribe to each component -# device_client = DeviceClient() -# context_client = ContextClient() -# for s in sub_subs: -# xpath_filter = s['ietf-subscribed-notifications:input'][ -# 'ietf-yang-push:datastore-xpath-filter' -# ] -# xpath_filter_prefix = xpath_filter.split('/ietf-network-topology:link')[0] -# xpath_network = rest_conf_client.get(xpath_filter_prefix) -# if not xpath_network: -# MSG = 'Resource({:s} => {:s}) not found in SIMAP Server' -# raise Exception(MSG.format(str(xpath_filter), str(xpath_filter_prefix))) -# networks = xpath_network.get('ietf-network:network', list()) -# if len(networks) != 1: -# MSG = 'Resource({:s} => {:s}) wrong number of entries: {:s}' -# raise Exception(MSG.format( -# str(xpath_filter), str(xpath_filter_prefix), str(xpath_network) -# )) -# network = networks[0] -# network_id = network['network-id'] -# -# controller_name_map = { -# 'e2e' : 'TFS-E2E', -# 'agg' : 'TFS-AGG', -# 'trans-pkt': 'TFS-IP', -# 'trans-opt': 'NCE-T', -# 'access' : 'NCE-FAN', -# } -# controller_name = controller_name_map.get(network_id) -# if controller_name is None: -# LOGGER.warning( -# 'Controllerless device detected, skipping subscription for: {:s}'.format(xpath_filter) -# ) -# continue -# -# #SERVICE_ID = '' -# #device_controller = get_controller_name(xpath, SERVICE_ID, context_client) -# #if device_controller == Controllers.CONTROLLERLESS: -# # LOGGER.warning( -# # 'Controllerless device detected, skipping subscription for: {:s}'.format(xpath) -# # ) -# # continue -# -# sampling_interval = s['ietf-subscribed-notifications:input'][ -# 'ietf-yang-push:periodic' -# ]['ietf-yang-push:period'] -# -# s_req = SSEMonitoringSubscriptionConfig() -# #s_req.device_id.device_uuid.uuid = device_controller.value -# s_req.device_id.device_uuid.uuid = controller_name -# s_req.config_type = SSEMonitoringSubscriptionConfig.Subscribe -# s_req.uri = xpath_filter -# s_req.sampling_interval = str(sampling_interval) -# r: SSEMonitoringSubscriptionResponse = device_client.SSETelemetrySubscribe(s_req) -# s = SSESubsciprionDict( -# uuid=str(uuid4()), -# identifier=r.identifier, -# uri=r.uri, -# xpath=xpath_filter, -# sampling_interval=sampling_interval, -# main_subscription=False, -# main_subscription_id=request_identifier, -# ) -# _ = set_subscription(db, s) -# -# # save the main subscription to the database -# r_uri = f'/restconf/data/subscriptions/{request_identifier}' -# s = SSESubsciprionDict( -# uuid=str(uuid4()), -# identifier=request_identifier, -# uri=r_uri, -# xpath=request_data['ietf-subscribed-notifications:input'][ -# 'ietf-yang-push:datastore-xpath-filter' -# ], -# sampling_interval=sampling_interval, -# main_subscription=True, -# main_subscription_id=None, -# ) -# _ = set_subscription(db, s) - -# # Return the subscription ID -# sub_id = SubscriptionId(identifier=request_identifier, uri=r_uri) -# return jsonify(sub_id) diff --git a/src/nbi/service/sse_telemetry/Full-Te-Topology-simap1.json b/src/nbi/service/sse_telemetry/Full-Te-Topology-simap1.json deleted file mode 100644 index 351ec13f7..000000000 --- a/src/nbi/service/sse_telemetry/Full-Te-Topology-simap1.json +++ /dev/null @@ -1,3485 +0,0 @@ -{ - "ietf-network:networks": { - "network": [ - { - "network-id": "providerId-10-clientId-0-topologyId-1", - "ietf-te-topology:te": { - "name": "Huawei-Network" - }, - "ietf-te-topology:te-topology-identifier": { - "provider-id": 10, - "client-id": 0, - "topology-id": "1" - }, - "network-types": { - "ietf-te-topology:te-topology": { - "ietf-otn-topology:otn-topology": {} - } - }, - "node": [ - { - "node-id": "172.16.182.25", - "ietf-te-topology:te-node-id": "172.16.182.25", - "ietf-network-topology:termination-point": [ - { - "tp-id": "501", - "ietf-te-topology:te-tp-id": 501, - "ietf-te-topology:te": { - "name": "1-1-1-1-1", - "admin-status": "up", - "oper-status": "up", - "ietf-otn-topology:client-svc": { - "client-facing": false - }, - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-oduk", - "switching-capability": "ietf-te-types:switching-otn", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-otn-topology:otn": { - "odu-type": "ietf-layer1-types:ODU4" - } - } - } - ] - } - ] - } - }, - { - "tp-id": "500", - "ietf-te-topology:te-tp-id": 500, - "ietf-te-topology:te": { - "name": "1-1-1-1-1", - "admin-status": "up", - "oper-status": "up", - "ietf-otn-topology:client-svc": { - "client-facing": false - }, - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-oduk", - "switching-capability": "ietf-te-types:switching-otn", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-otn-topology:otn": { - "odu-type": "ietf-layer1-types:ODU4" - } - } - } - ] - } - ] - } - } - ], - "ietf-te-topology:te": { - "oper-status": "up", - "te-node-attributes": { - "admin-status": "up", - "name": "OA" - }, - "tunnel-termination-point": [ - { - "tunnel-tp-id": "NTAx", - "admin-status": "up", - "oper-status": "up", - "encoding": "ietf-te-types:lsp-encoding-oduk", - "name": "1-1-1-1-1", - "protection-type": "ietf-te-types:lsp-protection-unprotected", - "switching-capability": "ietf-te-types:switching-otn", - "local-link-connectivities": { - "local-link-connectivity": [ - { - "is-allowed": true, - "link-tp-ref": "501" - } - ] - } - }, - { - "tunnel-tp-id": "NTAw", - "admin-status": "up", - "oper-status": "up", - "encoding": "ietf-te-types:lsp-encoding-oduk", - "name": "1-1-1-1-1", - "protection-type": "ietf-te-types:lsp-protection-unprotected", - "switching-capability": "ietf-te-types:switching-otn", - "local-link-connectivities": { - "local-link-connectivity": [ - { - "is-allowed": true, - "link-tp-ref": "500" - } - ] - } - } - ] - } - }, - { - "node-id": "172.16.185.31", - "ietf-te-topology:te-node-id": "172.16.185.31", - "ietf-network-topology:termination-point": [ - { - "tp-id": "501", - "ietf-te-topology:te-tp-id": 501, - "ietf-te-topology:te": { - "name": "1-1-1-1-1", - "admin-status": "up", - "oper-status": "up", - "ietf-otn-topology:client-svc": { - "client-facing": false - }, - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-oduk", - "switching-capability": "ietf-te-types:switching-otn", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-otn-topology:otn": { - "odu-type": "ietf-layer1-types:ODU4" - } - } - } - ] - } - ] - } - }, - { - "tp-id": "500", - "ietf-te-topology:te-tp-id": 500, - "ietf-te-topology:te": { - "name": "1-1-1-1-1", - "admin-status": "up", - "oper-status": "up", - "ietf-otn-topology:client-svc": { - "client-facing": false - }, - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-oduk", - "switching-capability": "ietf-te-types:switching-otn", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-otn-topology:otn": { - "odu-type": "ietf-layer1-types:ODU4" - } - } - } - ] - } - ] - } - } - ], - "ietf-te-topology:te": { - "oper-status": "up", - "te-node-attributes": { - "admin-status": "up", - "name": "P" - }, - "tunnel-termination-point": [ - { - "tunnel-tp-id": "NTAx", - "admin-status": "up", - "oper-status": "up", - "encoding": "ietf-te-types:lsp-encoding-oduk", - "name": "1-1-1-1-1", - "protection-type": "ietf-te-types:lsp-protection-unprotected", - "switching-capability": "ietf-te-types:switching-otn", - "local-link-connectivities": { - "local-link-connectivity": [ - { - "is-allowed": true, - "link-tp-ref": "501" - } - ] - } - }, - { - "tunnel-tp-id": "NTAw", - "admin-status": "up", - "oper-status": "up", - "encoding": "ietf-te-types:lsp-encoding-oduk", - "name": "1-1-1-1-1", - "protection-type": "ietf-te-types:lsp-protection-unprotected", - "switching-capability": "ietf-te-types:switching-otn", - "local-link-connectivities": { - "local-link-connectivity": [ - { - "is-allowed": true, - "link-tp-ref": "500" - } - ] - } - } - ] - } - }, - { - "node-id": "172.16.185.33", - "ietf-te-topology:te-node-id": "172.16.185.33", - "ietf-network-topology:termination-point": [ - { - "tp-id": "500", - "ietf-te-topology:te-tp-id": 500, - "ietf-te-topology:te": { - "name": "1-1-1-1-1", - "admin-status": "up", - "oper-status": "up", - "ietf-otn-topology:client-svc": { - "client-facing": false - }, - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-oduk", - "switching-capability": "ietf-te-types:switching-otn", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-otn-topology:otn": { - "odu-type": "ietf-layer1-types:ODU4" - } - } - } - ] - } - ] - } - }, - { - "tp-id": "501", - "ietf-te-topology:te-tp-id": 501, - "ietf-te-topology:te": { - "name": "1-1-1-1-1", - "admin-status": "up", - "oper-status": "up", - "ietf-otn-topology:client-svc": { - "client-facing": false - }, - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-oduk", - "switching-capability": "ietf-te-types:switching-otn", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-otn-topology:otn": { - "odu-type": "ietf-layer1-types:ODU4" - } - } - } - ] - } - ] - } - } - ], - "ietf-te-topology:te": { - "oper-status": "up", - "te-node-attributes": { - "admin-status": "up", - "name": "P" - }, - "tunnel-termination-point": [ - { - "tunnel-tp-id": "NTAw", - "admin-status": "up", - "oper-status": "up", - "encoding": "ietf-te-types:lsp-encoding-oduk", - "name": "1-1-1-1-1", - "protection-type": "ietf-te-types:lsp-protection-unprotected", - "switching-capability": "ietf-te-types:switching-otn", - "local-link-connectivities": { - "local-link-connectivity": [ - { - "is-allowed": true, - "link-tp-ref": "500" - } - ] - } - }, - { - "tunnel-tp-id": "NTAx", - "admin-status": "up", - "oper-status": "up", - "encoding": "ietf-te-types:lsp-encoding-oduk", - "name": "1-1-1-1-1", - "protection-type": "ietf-te-types:lsp-protection-unprotected", - "switching-capability": "ietf-te-types:switching-otn", - "local-link-connectivities": { - "local-link-connectivity": [ - { - "is-allowed": true, - "link-tp-ref": "501" - } - ] - } - } - ] - } - }, - { - "node-id": "172.16.185.32", - "ietf-te-topology:te-node-id": "172.16.185.32", - "ietf-network-topology:termination-point": [ - { - "tp-id": "500", - "ietf-te-topology:te-tp-id": 500, - "ietf-te-topology:te": { - "name": "1-1-1-1-1", - "admin-status": "up", - "oper-status": "up", - "ietf-otn-topology:client-svc": { - "client-facing": false - }, - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-oduk", - "switching-capability": "ietf-te-types:switching-otn", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-otn-topology:otn": { - "odu-type": "ietf-layer1-types:ODU4" - } - } - } - ] - } - ] - } - }, - { - "tp-id": "501", - "ietf-te-topology:te-tp-id": 501, - "ietf-te-topology:te": { - "name": "1-1-1-1-1", - "admin-status": "up", - "oper-status": "up", - "ietf-otn-topology:client-svc": { - "client-facing": false - }, - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-oduk", - "switching-capability": "ietf-te-types:switching-otn", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-otn-topology:otn": { - "odu-type": "ietf-layer1-types:ODU4" - } - } - } - ] - } - ] - } - } - ], - "ietf-te-topology:te": { - "oper-status": "up", - "te-node-attributes": { - "admin-status": "up", - "name": "OE" - }, - "tunnel-termination-point": [ - { - "tunnel-tp-id": "NTAw", - "admin-status": "up", - "oper-status": "up", - "encoding": "ietf-te-types:lsp-encoding-oduk", - "name": "1-1-1-1-1", - "protection-type": "ietf-te-types:lsp-protection-unprotected", - "switching-capability": "ietf-te-types:switching-otn", - "local-link-connectivities": { - "local-link-connectivity": [ - { - "is-allowed": true, - "link-tp-ref": "500" - } - ] - } - }, - { - "tunnel-tp-id": "NTAx", - "admin-status": "up", - "oper-status": "up", - "encoding": "ietf-te-types:lsp-encoding-oduk", - "name": "1-1-1-1-1", - "protection-type": "ietf-te-types:lsp-protection-unprotected", - "switching-capability": "ietf-te-types:switching-otn", - "local-link-connectivities": { - "local-link-connectivity": [ - { - "is-allowed": true, - "link-tp-ref": "501" - } - ] - } - } - ] - } - } - ], - "ietf-network-topology:link": [ - { - "link-id": "172.16.182.25-501", - "source": { - "source-node": "172.16.182.25", - "source-tp": "501" - }, - "destination": { - "dest-node": "172.16.185.31", - "dest-tp": "501" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.182.25-501", - "te-delay-metric": 1, - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-otn-topology:odulist": [ - { - "odu-type": "ietf-layer1-types:ODU0", - "number": 80 - } - ] - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-otn-topology:odulist": [ - { - "number": 80, - "odu-type": "ietf-layer1-types:ODU0" - } - ] - } - } - ] - } - } - }, - { - "link-id": "172.16.182.25-500", - "source": { - "source-node": "172.16.182.25", - "source-tp": "500" - }, - "destination": { - "dest-node": "172.16.185.33", - "dest-tp": "500" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.182.25-500", - "te-delay-metric": 1, - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-otn-topology:odulist": [ - { - "odu-type": "ietf-layer1-types:ODU0", - "number": 80 - } - ] - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-otn-topology:odulist": [ - { - "number": 80, - "odu-type": "ietf-layer1-types:ODU0" - } - ] - } - } - ] - } - } - }, - { - "link-id": "172.16.185.31-501", - "source": { - "source-node": "172.16.185.31", - "source-tp": "501" - }, - "destination": { - "dest-node": "172.16.182.25", - "dest-tp": "501" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.185.31-501", - "te-delay-metric": 1, - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-otn-topology:odulist": [ - { - "odu-type": "ietf-layer1-types:ODU0", - "number": 80 - } - ] - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-otn-topology:odulist": [ - { - "number": 80, - "odu-type": "ietf-layer1-types:ODU0" - } - ] - } - } - ] - } - } - }, - { - "link-id": "172.16.185.31-500", - "source": { - "source-node": "172.16.185.31", - "source-tp": "500" - }, - "destination": { - "dest-node": "172.16.185.32", - "dest-tp": "500" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.185.31-500", - "te-delay-metric": 1, - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-otn-topology:odulist": [ - { - "odu-type": "ietf-layer1-types:ODU0", - "number": 80 - } - ] - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-otn-topology:odulist": [ - { - "number": 80, - "odu-type": "ietf-layer1-types:ODU0" - } - ] - } - } - ] - } - } - }, - { - "link-id": "172.16.185.33-500", - "source": { - "source-node": "172.16.185.33", - "source-tp": "500" - }, - "destination": { - "dest-node": "172.16.182.25", - "dest-tp": "500" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.185.33-500", - "te-delay-metric": 1, - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-otn-topology:odulist": [ - { - "odu-type": "ietf-layer1-types:ODU0", - "number": 80 - } - ] - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-otn-topology:odulist": [ - { - "number": 80, - "odu-type": "ietf-layer1-types:ODU0" - } - ] - } - } - ] - } - } - }, - { - "link-id": "172.16.185.33-501", - "source": { - "source-node": "172.16.185.33", - "source-tp": "501" - }, - "destination": { - "dest-node": "172.16.185.32", - "dest-tp": "501" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.185.33-501", - "te-delay-metric": 1, - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-otn-topology:odulist": [ - { - "odu-type": "ietf-layer1-types:ODU0", - "number": 80 - } - ] - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-otn-topology:odulist": [ - { - "number": 80, - "odu-type": "ietf-layer1-types:ODU0" - } - ] - } - } - ] - } - } - }, - { - "link-id": "172.16.185.32-500", - "source": { - "source-node": "172.16.185.32", - "source-tp": "500" - }, - "destination": { - "dest-node": "172.16.185.31", - "dest-tp": "500" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.185.32-500", - "te-delay-metric": 1, - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-otn-topology:odulist": [ - { - "odu-type": "ietf-layer1-types:ODU0", - "number": 80 - } - ] - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-otn-topology:odulist": [ - { - "number": 80, - "odu-type": "ietf-layer1-types:ODU0" - } - ] - } - } - ] - } - } - }, - { - "link-id": "172.16.185.32-501", - "source": { - "source-node": "172.16.185.32", - "source-tp": "501" - }, - "destination": { - "dest-node": "172.16.185.33", - "dest-tp": "501" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.185.32-501", - "te-delay-metric": 1, - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-otn-topology:odulist": [ - { - "odu-type": "ietf-layer1-types:ODU0", - "number": 80 - } - ] - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-otn-topology:odulist": [ - { - "number": 80, - "odu-type": "ietf-layer1-types:ODU0" - } - ] - } - } - ] - } - } - } - ] - }, - { - "network-id": "providerId-10-clientId-0-topologyId-2", - "ietf-te-topology:te": { - "name": "Huawei-Network" - }, - "ietf-te-topology:te-topology-identifier": { - "provider-id": 10, - "client-id": 0, - "topology-id": "2" - }, - "network-types": { - "ietf-te-topology:te-topology": { - "ietf-eth-te-topology:eth-tran-topology": {} - } - }, - "node": [ - { - "node-id": "172.1.201.22", - "ietf-te-topology:te-node-id": "172.1.201.22", - "ietf-network-topology:termination-point": [ - { - "tp-id": "500", - "ietf-te-topology:te-tp-id": 500, - "ietf-eth-te-topology:eth-svc": { - "client-facing": true, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - } - ], - "ietf-te-topology:te": { - "oper-status": "up", - "te-node-attributes": { - "admin-status": "up", - "name": "VM2" - } - } - }, - { - "node-id": "172.1.101.22", - "ietf-te-topology:te-node-id": "172.1.101.22", - "ietf-network-topology:termination-point": [ - { - "tp-id": "500", - "ietf-te-topology:te-tp-id": 500, - "ietf-eth-te-topology:eth-svc": { - "client-facing": true, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - } - ], - "ietf-te-topology:te": { - "oper-status": "up", - "te-node-attributes": { - "admin-status": "up", - "name": "VM1" - } - } - }, - { - "node-id": "172.16.204.221", - "ietf-te-topology:te-node-id": "172.16.204.221", - "ietf-network-topology:termination-point": [ - { - "tp-id": "500", - "ietf-te-topology:te-tp-id": "172.10.33.1", - "ietf-eth-te-topology:eth-svc": { - "client-facing": true, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - }, - { - "tp-id": "200", - "ietf-te-topology:te-tp-id": 200, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4096" - } - } - } - } - }, - { - "tp-id": "201", - "ietf-te-topology:te-tp-id": 201, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4096" - } - } - } - } - } - ], - "ietf-te-topology:te": { - "oper-status": "up", - "te-node-attributes": { - "admin-status": "up", - "name": "POP2" - } - } - }, - { - "node-id": "172.16.204.220", - "ietf-te-topology:te-node-id": "172.16.204.220", - "ietf-network-topology:termination-point": [ - { - "tp-id": "500", - "ietf-te-topology:te-tp-id": "172.10.33.2", - "ietf-eth-te-topology:eth-svc": { - "client-facing": true, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - }, - { - "tp-id": "200", - "ietf-te-topology:te-tp-id": 200, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4096" - } - } - } - } - }, - { - "tp-id": "201", - "ietf-te-topology:te-tp-id": 201, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4096" - } - } - } - } - } - ], - "ietf-te-topology:te": { - "oper-status": "up", - "te-node-attributes": { - "admin-status": "up", - "name": "POP1", - "connectivity-matrices": { - "label-restrictions": { - "label-restriction": [ - { - "index": 1, - "label-start": { - "te-label": { - "ietf-eth-te-topology:vlanid": 101 - } - }, - "label-end": { - "te-label": { - "ietf-eth-te-topology:vlanid": 101 - } - } - }, - { - "index": 2, - "label-start": { - "te-label": { - "ietf-eth-te-topology:vlanid": 201 - } - }, - "label-end": { - "te-label": { - "ietf-eth-te-topology:vlanid": 201 - } - } - } - ] - } - } - } - } - }, - { - "node-id": "172.16.122.25", - "ietf-te-topology:te-node-id": "172.16.122.25", - "ietf-network-topology:termination-point": [ - { - "tp-id": "200", - "ietf-te-topology:te-tp-id": "128.32.44.254", - "ietf-eth-te-topology:eth-svc": { - "client-facing": true, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - }, - { - "tp-id": "501", - "ietf-te-topology:te-tp-id": 501, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - }, - { - "tp-id": "500", - "ietf-te-topology:te-tp-id": 500, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - } - ], - "ietf-te-topology:te": { - "oper-status": "up", - "te-node-attributes": { - "admin-status": "up", - "name": "PE" - } - } - }, - { - "node-id": "172.16.125.31", - "ietf-te-topology:te-node-id": "172.16.125.31", - "ietf-network-topology:termination-point": [ - { - "tp-id": "501", - "ietf-te-topology:te-tp-id": 501, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - }, - { - "tp-id": "500", - "ietf-te-topology:te-tp-id": 500, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - } - ], - "ietf-te-topology:te": { - "oper-status": "up", - "te-node-attributes": { - "admin-status": "up", - "name": "P" - } - } - }, - { - "node-id": "172.16.125.33", - "ietf-te-topology:te-node-id": "172.16.125.33", - "ietf-network-topology:termination-point": [ - { - "tp-id": "501", - "ietf-te-topology:te-tp-id": 501, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - }, - { - "tp-id": "500", - "ietf-te-topology:te-tp-id": 500, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - } - ], - "ietf-te-topology:te": { - "oper-status": "up", - "te-node-attributes": { - "admin-status": "up", - "name": "P" - } - } - }, - { - "node-id": "172.16.125.32", - "ietf-te-topology:te-node-id": "172.16.125.32", - "ietf-network-topology:termination-point": [ - { - "tp-id": "200", - "ietf-te-topology:te-tp-id": "172.10.44.254", - "ietf-eth-te-topology:eth-svc": { - "client-facing": true, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - }, - { - "tp-id": "501", - "ietf-te-topology:te-tp-id": 501, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - }, - { - "tp-id": "500", - "ietf-te-topology:te-tp-id": 500, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - } - ], - "ietf-te-topology:te": { - "oper-status": "up", - "te-node-attributes": { - "admin-status": "up", - "name": "PE" - } - } - }, - { - "node-id": "172.16.182.25", - "ietf-te-topology:te-node-id": "172.16.182.25", - "ietf-network-topology:termination-point": [ - { - "tp-id": "200", - "ietf-te-topology:te-tp-id": "128.32.33.254", - "ietf-eth-te-topology:eth-svc": { - "client-facing": true, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - }, - { - "tp-id": "501", - "ietf-te-topology:te-tp-id": 501, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - }, - { - "tp-id": "500", - "ietf-te-topology:te-tp-id": 500, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - } - ], - "ietf-te-topology:te": { - "oper-status": "up", - "te-node-attributes": { - "admin-status": "up", - "name": "OA" - } - } - }, - { - "node-id": "172.16.185.31", - "ietf-te-topology:te-node-id": "172.16.185.31", - "ietf-network-topology:termination-point": [ - { - "tp-id": "501", - "ietf-te-topology:te-tp-id": 501, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - }, - { - "tp-id": "500", - "ietf-te-topology:te-tp-id": 500, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - } - ], - "ietf-te-topology:te": { - "oper-status": "up", - "te-node-attributes": { - "admin-status": "up", - "name": "P" - } - } - }, - { - "node-id": "172.16.185.33", - "ietf-te-topology:te-node-id": "172.16.185.33", - "ietf-network-topology:termination-point": [ - { - "tp-id": "501", - "ietf-te-topology:te-tp-id": 501, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - }, - { - "tp-id": "500", - "ietf-te-topology:te-tp-id": 500, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - } - ], - "ietf-te-topology:te": { - "oper-status": "up", - "te-node-attributes": { - "admin-status": "up", - "name": "P" - } - } - }, - { - "node-id": "172.16.185.32", - "ietf-te-topology:te-node-id": "172.16.185.32", - "ietf-network-topology:termination-point": [ - { - "tp-id": "200", - "ietf-te-topology:te-tp-id": "172.10.33.254", - "ietf-eth-te-topology:eth-svc": { - "client-facing": true, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - }, - { - "tp-id": "501", - "ietf-te-topology:te-tp-id": 501, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - }, - { - "tp-id": "500", - "ietf-te-topology:te-tp-id": 500, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - } - ], - "ietf-te-topology:te": { - "oper-status": "up", - "te-node-attributes": { - "admin-status": "up", - "name": "OE" - } - } - }, - { - "node-id": "172.16.58.10", - "ietf-te-topology:te-node-id": "172.16.58.10", - "ietf-network-topology:termination-point": [ - { - "tp-id": "501", - "ietf-te-topology:te-tp-id": "128.32.44.2", - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - }, - { - "tp-id": "500", - "ietf-te-topology:te-tp-id": "128.32.33.2", - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - }, - { - "tp-id": "200", - "ietf-te-topology:te-tp-id": 200, - "ietf-eth-te-topology:eth-svc": { - "client-facing": true, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4096" - } - } - } - } - }, - { - "tp-id": "201", - "ietf-te-topology:te-tp-id": 201, - "ietf-eth-te-topology:eth-svc": { - "client-facing": true, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4096" - } - } - } - } - } - ], - "ietf-te-topology:te": { - "oper-status": "up", - "te-node-attributes": { - "admin-status": "up", - "name": "OLT", - "connectivity-matrices": { - "label-restrictions": { - "label-restriction": [ - { - "index": 1, - "label-start": { - "te-label": { - "ietf-eth-te-topology:vlanid": 21 - } - }, - "label-end": { - "te-label": { - "ietf-eth-te-topology:vlanid": 21 - } - } - }, - { - "index": 2, - "label-start": { - "te-label": { - "ietf-eth-te-topology:vlanid": 31 - } - }, - "label-end": { - "te-label": { - "ietf-eth-te-topology:vlanid": 31 - } - } - } - ] - } - } - } - } - }, - { - "node-id": "172.16.61.10", - "ietf-te-topology:te-node-id": "172.16.61.10", - "ietf-network-topology:termination-point": [ - { - "tp-id": "500", - "ietf-te-topology:te-tp-id": 500, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - } - }, - { - "tp-id": "200", - "ietf-te-topology:te-tp-id": 200, - "ietf-eth-te-topology:eth-svc": { - "client-facing": true, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4096" - } - } - } - } - } - ], - "ietf-te-topology:te": { - "oper-status": "up", - "te-node-attributes": { - "admin-status": "up", - "name": "ONT1" - } - } - }, - { - "node-id": "172.16.61.11", - "ietf-te-topology:te-node-id": "172.16.61.11", - "ietf-network-topology:termination-point": [ - { - "tp-id": "500", - "ietf-te-topology:te-tp-id": 500, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - } - }, - { - "tp-id": "200", - "ietf-te-topology:te-tp-id": 200, - "ietf-eth-te-topology:eth-svc": { - "client-facing": true, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4096" - } - } - } - } - } - ], - "ietf-te-topology:te": { - "oper-status": "up", - "te-node-attributes": { - "admin-status": "up", - "name": "ONT2" - } - } - } - ], - "ietf-network-topology:link": [ - { - "link-id": "172.16.185.32-200", - "source": { - "source-node": "172.16.185.32", - "source-tp": "200" - }, - "destination": { - "dest-node": "172.16.204.220", - "dest-tp": "500" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.185.32-200", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.125.32-200", - "source": { - "source-node": "172.16.125.32", - "source-tp": "200" - }, - "destination": { - "dest-node": "172.16.204.221", - "dest-tp": "500" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.125.32-200", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.204.220-500", - "source": { - "source-node": "172.16.204.220", - "source-tp": "500" - }, - "destination": { - "dest-node": "172.16.185.32", - "dest-tp": "200" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.204.220-500", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.204.221-500", - "source": { - "source-node": "172.16.204.221", - "source-tp": "500" - }, - "destination": { - "dest-node": "172.16.125.32", - "dest-tp": "200" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.204.221-500", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.204.221-200", - "source": { - "source-node": "172.16.204.221", - "source-tp": "200" - }, - "destination": { - "dest-node": "172.1.101.22", - "dest-tp": "500" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.204.221-200", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.204.220-200", - "source": { - "source-node": "172.16.204.220", - "source-tp": "200" - }, - "destination": { - "dest-node": "172.1.201.22", - "dest-tp": "500" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.204.220-200", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.1.101.22-500", - "source": { - "source-node": "172.1.101.22", - "source-tp": "500" - }, - "destination": { - "dest-node": "172.16.204.221", - "dest-tp": "200" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.1.101.22-500", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.1.201.22-500", - "source": { - "source-node": "172.1.201.22", - "source-tp": "500" - }, - "destination": { - "dest-node": "172.16.204.220", - "dest-tp": "200" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.1.201.22-500", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.182.25-200", - "source": { - "source-node": "172.16.182.25", - "source-tp": "200" - }, - "destination": { - "dest-node": "172.16.58.10", - "dest-tp": "500" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.182.25-200", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.122.25-200", - "source": { - "source-node": "172.16.122.25", - "source-tp": "200" - }, - "destination": { - "dest-node": "172.16.58.10", - "dest-tp": "501" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.122.25-200", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.58.10-500", - "source": { - "source-node": "172.16.58.10", - "source-tp": "500" - }, - "destination": { - "dest-node": "172.16.182.25", - "dest-tp": "200" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.58.10-500", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.58.10-501", - "source": { - "source-node": "172.16.58.10", - "source-tp": "501" - }, - "destination": { - "dest-node": "172.16.122.25", - "dest-tp": "200" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.58.10-501", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.182.25-501", - "source": { - "source-node": "172.16.182.25", - "source-tp": "501" - }, - "destination": { - "dest-node": "172.16.185.31", - "dest-tp": "501" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.182.25-501", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.182.25-500", - "source": { - "source-node": "172.16.182.25", - "source-tp": "500" - }, - "destination": { - "dest-node": "172.16.185.33", - "dest-tp": "500" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.182.25-500", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.185.31-501", - "source": { - "source-node": "172.16.185.31", - "source-tp": "501" - }, - "destination": { - "dest-node": "172.16.182.25", - "dest-tp": "501" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.185.31-501", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.185.31-500", - "source": { - "source-node": "172.16.185.31", - "source-tp": "500" - }, - "destination": { - "dest-node": "172.16.185.32", - "dest-tp": "500" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.185.31-500", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.185.33-501", - "source": { - "source-node": "172.16.185.33", - "source-tp": "501" - }, - "destination": { - "dest-node": "172.16.185.32", - "dest-tp": "501" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.185.33-501", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.185.33-500", - "source": { - "source-node": "172.16.185.33", - "source-tp": "500" - }, - "destination": { - "dest-node": "172.16.182.25", - "dest-tp": "500" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.185.33-500", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.185.32-501", - "source": { - "source-node": "172.16.185.32", - "source-tp": "501" - }, - "destination": { - "dest-node": "172.16.185.33", - "dest-tp": "501" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.185.32-501", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.185.32-500", - "source": { - "source-node": "172.16.185.32", - "source-tp": "500" - }, - "destination": { - "dest-node": "172.16.185.31", - "dest-tp": "500" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.185.32-500", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.122.25-501", - "source": { - "source-node": "172.16.122.25", - "source-tp": "501" - }, - "destination": { - "dest-node": "172.16.125.31", - "dest-tp": "501" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.122.25-501", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.122.25-500", - "source": { - "source-node": "172.16.122.25", - "source-tp": "500" - }, - "destination": { - "dest-node": "172.16.125.33", - "dest-tp": "500" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.122.25-500", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.125.31-501", - "source": { - "source-node": "172.16.125.31", - "source-tp": "501" - }, - "destination": { - "dest-node": "172.16.122.25", - "dest-tp": "501" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.125.31-501", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.125.31-500", - "source": { - "source-node": "172.16.125.31", - "source-tp": "500" - }, - "destination": { - "dest-node": "172.16.125.32", - "dest-tp": "500" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.125.31-500", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.125.33-501", - "source": { - "source-node": "172.16.125.33", - "source-tp": "501" - }, - "destination": { - "dest-node": "172.16.125.32", - "dest-tp": "501" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.125.33-501", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.125.33-500", - "source": { - "source-node": "172.16.125.33", - "source-tp": "500" - }, - "destination": { - "dest-node": "172.16.122.25", - "dest-tp": "500" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.125.33-500", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.125.32-501", - "source": { - "source-node": "172.16.125.32", - "source-tp": "501" - }, - "destination": { - "dest-node": "172.16.125.33", - "dest-tp": "501" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.125.32-501", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.125.32-500", - "source": { - "source-node": "172.16.125.32", - "source-tp": "500" - }, - "destination": { - "dest-node": "172.16.125.31", - "dest-tp": "500" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.125.32-500", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.58.10-200", - "source": { - "source-node": "172.16.58.10", - "source-tp": "200" - }, - "destination": { - "dest-node": "172.16.61.10", - "dest-tp": "500" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.58.10-200", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.61.10-500", - "source": { - "source-node": "172.16.61.10", - "source-tp": "500" - }, - "destination": { - "dest-node": "172.16.58.10", - "dest-tp": "200" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.61.10-500", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.58.10-201", - "source": { - "source-node": "172.16.58.10", - "source-tp": "201" - }, - "destination": { - "dest-node": "172.16.61.11", - "dest-tp": "500" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.58.10-201", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.61.11-500", - "source": { - "source-node": "172.16.61.11", - "source-tp": "500" - }, - "destination": { - "dest-node": "172.16.58.10", - "dest-tp": "201" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.61.11-500", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - } - ] - }, - { - "network-id": "e2e-slice-simap-1", - "supporting-network": [ - { - "network-ref": "providerId-10-clientId-0-topologyId-2" - }, - { - "network-ref": "aggnet-simap-1" - } - ], - "node": [ - { - "node-id": "node-1", - "supporting-node": [ - { - "network-ref": "providerId-10-clientId-0-topologyId-2", - "node-ref": "172.16.61.10" - } - ] - }, - { - "node-id": "node-2", - "supporting-node": [ - { - "network-ref": "aggnet-simap-1", - "node-ref": "node-2" - } - ] - } - ], - "ietf-network-topology:link": [ - { - "link-id": "link-1", - "source": { - "source-node": "node-1", - "source-tp": "tp-1" - }, - "destination": { - "dest-node": "node-2", - "dest-tp": "tp-2" - }, - "simap-telemetry": { - "bandwidth-utilization": 76.51, - "latency": 2.321, - "related-service-ids": ["e2e-network-slice-1"] - }, - "ietf-network-topology:supporting-link": [ - { - "network-ref": "providerId-10-clientId-0-topologyId-2", - "link-ref": "172.16.61.10-500" - }, - { - "network-ref": "aggnet-simap-1", - "link-ref": "link-1" - } - ] - } - ] - }, - { - "network-id": "aggnet-simap-1", - "supporting-network": [ - { - "network-ref": "providerId-10-clientId-0-topologyId-2" - }, - { - "network-ref": "trans-simap-1" - } - ], - "node": [ - { - "node-id": "node-1", - "supporting-node": [ - { - "network-ref": "providerId-10-clientId-0-topologyId-2", - "node-ref": "172.16.58.10" - } - ] - }, - { - "node-id": "node-2", - "supporting-node": [ - { - "network-ref": "providerId-10-clientId-0-topologyId-2", - "node-ref": "172.16.204.221" - } - ] - } - ], - "ietf-network-topology:link": [ - { - "link-id": "link-1", - "source": { - "source-node": "node-1", - "source-tp": "tp-1" - }, - "destination": { - "dest-node": "node-2", - "dest-tp": "tp-2" - }, - "simap-telemetry": { - "bandwidth-utilization": 76.51, - "latency": 2.321, - "related-service-ids": ["trans-network-slice-1"] - }, - "ietf-network-topology:supporting-link": [ - { - "network-ref": "providerId-10-clientId-0-topologyId-2", - "link-ref": "172.16.58.10-501" - }, - { - "network-ref": "providerId-10-clientId-0-topologyId-2", - "link-ref": "172.16.204.221-500" - }, - { - "network-ref": "trans-simap-1", - "link-ref": "link-1" - } - ] - } - ] - }, - { - "network-id": "trans-simap-1", - "supporting-network": [ - { - "network-ref": "providerId-10-clientId-0-topologyId-2" - } - ], - "node": [ - { - "node-id": "node-1", - "supporting-node": [ - { - "network-ref": "providerId-10-clientId-0-topologyId-2", - "node-ref": "172.16.122.25" - } - ] - }, - { - "node-id": "node-2", - "supporting-node": [ - { - "network-ref": "providerId-10-clientId-0-topologyId-2", - "node-ref": "172.16.125.32" - } - ] - } - ], - "ietf-network-topology:link": [ - { - "link-id": "link-1", - "source": { - "source-node": "node-1", - "source-tp": "tp-1" - }, - "destination": { - "dest-node": "node-2", - "dest-tp": "tp-2" - }, - "simap-telemetry": { - "bandwidth-utilization": 76.51, - "latency": 2.321, - "related-service-ids": ["l3sm-instance-1"] - }, - "ietf-network-topology:supporting-link": [ - { - "network-ref": "providerId-10-clientId-0-topologyId-2", - "link-ref": "172.16.122.25-500" - }, - { - "network-ref": "providerId-10-clientId-0-topologyId-2", - "link-ref": "172.16.125.33-501" - } - ] - } - ] - } - ] - } -} diff --git a/src/nbi/service/sse_telemetry/Full-Te-Topology-simap2.json b/src/nbi/service/sse_telemetry/Full-Te-Topology-simap2.json deleted file mode 100644 index f9019c481..000000000 --- a/src/nbi/service/sse_telemetry/Full-Te-Topology-simap2.json +++ /dev/null @@ -1,3485 +0,0 @@ -{ - "ietf-network:networks": { - "network": [ - { - "network-id": "providerId-10-clientId-0-topologyId-1", - "ietf-te-topology:te": { - "name": "Huawei-Network" - }, - "ietf-te-topology:te-topology-identifier": { - "provider-id": 10, - "client-id": 0, - "topology-id": "1" - }, - "network-types": { - "ietf-te-topology:te-topology": { - "ietf-otn-topology:otn-topology": {} - } - }, - "node": [ - { - "node-id": "172.16.182.25", - "ietf-te-topology:te-node-id": "172.16.182.25", - "ietf-network-topology:termination-point": [ - { - "tp-id": "501", - "ietf-te-topology:te-tp-id": 501, - "ietf-te-topology:te": { - "name": "1-1-1-1-1", - "admin-status": "up", - "oper-status": "up", - "ietf-otn-topology:client-svc": { - "client-facing": false - }, - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-oduk", - "switching-capability": "ietf-te-types:switching-otn", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-otn-topology:otn": { - "odu-type": "ietf-layer1-types:ODU4" - } - } - } - ] - } - ] - } - }, - { - "tp-id": "500", - "ietf-te-topology:te-tp-id": 500, - "ietf-te-topology:te": { - "name": "1-1-1-1-1", - "admin-status": "up", - "oper-status": "up", - "ietf-otn-topology:client-svc": { - "client-facing": false - }, - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-oduk", - "switching-capability": "ietf-te-types:switching-otn", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-otn-topology:otn": { - "odu-type": "ietf-layer1-types:ODU4" - } - } - } - ] - } - ] - } - } - ], - "ietf-te-topology:te": { - "oper-status": "up", - "te-node-attributes": { - "admin-status": "up", - "name": "OA" - }, - "tunnel-termination-point": [ - { - "tunnel-tp-id": "NTAx", - "admin-status": "up", - "oper-status": "up", - "encoding": "ietf-te-types:lsp-encoding-oduk", - "name": "1-1-1-1-1", - "protection-type": "ietf-te-types:lsp-protection-unprotected", - "switching-capability": "ietf-te-types:switching-otn", - "local-link-connectivities": { - "local-link-connectivity": [ - { - "is-allowed": true, - "link-tp-ref": "501" - } - ] - } - }, - { - "tunnel-tp-id": "NTAw", - "admin-status": "up", - "oper-status": "up", - "encoding": "ietf-te-types:lsp-encoding-oduk", - "name": "1-1-1-1-1", - "protection-type": "ietf-te-types:lsp-protection-unprotected", - "switching-capability": "ietf-te-types:switching-otn", - "local-link-connectivities": { - "local-link-connectivity": [ - { - "is-allowed": true, - "link-tp-ref": "500" - } - ] - } - } - ] - } - }, - { - "node-id": "172.16.185.31", - "ietf-te-topology:te-node-id": "172.16.185.31", - "ietf-network-topology:termination-point": [ - { - "tp-id": "501", - "ietf-te-topology:te-tp-id": 501, - "ietf-te-topology:te": { - "name": "1-1-1-1-1", - "admin-status": "up", - "oper-status": "up", - "ietf-otn-topology:client-svc": { - "client-facing": false - }, - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-oduk", - "switching-capability": "ietf-te-types:switching-otn", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-otn-topology:otn": { - "odu-type": "ietf-layer1-types:ODU4" - } - } - } - ] - } - ] - } - }, - { - "tp-id": "500", - "ietf-te-topology:te-tp-id": 500, - "ietf-te-topology:te": { - "name": "1-1-1-1-1", - "admin-status": "up", - "oper-status": "up", - "ietf-otn-topology:client-svc": { - "client-facing": false - }, - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-oduk", - "switching-capability": "ietf-te-types:switching-otn", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-otn-topology:otn": { - "odu-type": "ietf-layer1-types:ODU4" - } - } - } - ] - } - ] - } - } - ], - "ietf-te-topology:te": { - "oper-status": "up", - "te-node-attributes": { - "admin-status": "up", - "name": "P" - }, - "tunnel-termination-point": [ - { - "tunnel-tp-id": "NTAx", - "admin-status": "up", - "oper-status": "up", - "encoding": "ietf-te-types:lsp-encoding-oduk", - "name": "1-1-1-1-1", - "protection-type": "ietf-te-types:lsp-protection-unprotected", - "switching-capability": "ietf-te-types:switching-otn", - "local-link-connectivities": { - "local-link-connectivity": [ - { - "is-allowed": true, - "link-tp-ref": "501" - } - ] - } - }, - { - "tunnel-tp-id": "NTAw", - "admin-status": "up", - "oper-status": "up", - "encoding": "ietf-te-types:lsp-encoding-oduk", - "name": "1-1-1-1-1", - "protection-type": "ietf-te-types:lsp-protection-unprotected", - "switching-capability": "ietf-te-types:switching-otn", - "local-link-connectivities": { - "local-link-connectivity": [ - { - "is-allowed": true, - "link-tp-ref": "500" - } - ] - } - } - ] - } - }, - { - "node-id": "172.16.185.33", - "ietf-te-topology:te-node-id": "172.16.185.33", - "ietf-network-topology:termination-point": [ - { - "tp-id": "500", - "ietf-te-topology:te-tp-id": 500, - "ietf-te-topology:te": { - "name": "1-1-1-1-1", - "admin-status": "up", - "oper-status": "up", - "ietf-otn-topology:client-svc": { - "client-facing": false - }, - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-oduk", - "switching-capability": "ietf-te-types:switching-otn", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-otn-topology:otn": { - "odu-type": "ietf-layer1-types:ODU4" - } - } - } - ] - } - ] - } - }, - { - "tp-id": "501", - "ietf-te-topology:te-tp-id": 501, - "ietf-te-topology:te": { - "name": "1-1-1-1-1", - "admin-status": "up", - "oper-status": "up", - "ietf-otn-topology:client-svc": { - "client-facing": false - }, - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-oduk", - "switching-capability": "ietf-te-types:switching-otn", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-otn-topology:otn": { - "odu-type": "ietf-layer1-types:ODU4" - } - } - } - ] - } - ] - } - } - ], - "ietf-te-topology:te": { - "oper-status": "up", - "te-node-attributes": { - "admin-status": "up", - "name": "P" - }, - "tunnel-termination-point": [ - { - "tunnel-tp-id": "NTAw", - "admin-status": "up", - "oper-status": "up", - "encoding": "ietf-te-types:lsp-encoding-oduk", - "name": "1-1-1-1-1", - "protection-type": "ietf-te-types:lsp-protection-unprotected", - "switching-capability": "ietf-te-types:switching-otn", - "local-link-connectivities": { - "local-link-connectivity": [ - { - "is-allowed": true, - "link-tp-ref": "500" - } - ] - } - }, - { - "tunnel-tp-id": "NTAx", - "admin-status": "up", - "oper-status": "up", - "encoding": "ietf-te-types:lsp-encoding-oduk", - "name": "1-1-1-1-1", - "protection-type": "ietf-te-types:lsp-protection-unprotected", - "switching-capability": "ietf-te-types:switching-otn", - "local-link-connectivities": { - "local-link-connectivity": [ - { - "is-allowed": true, - "link-tp-ref": "501" - } - ] - } - } - ] - } - }, - { - "node-id": "172.16.185.32", - "ietf-te-topology:te-node-id": "172.16.185.32", - "ietf-network-topology:termination-point": [ - { - "tp-id": "500", - "ietf-te-topology:te-tp-id": 500, - "ietf-te-topology:te": { - "name": "1-1-1-1-1", - "admin-status": "up", - "oper-status": "up", - "ietf-otn-topology:client-svc": { - "client-facing": false - }, - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-oduk", - "switching-capability": "ietf-te-types:switching-otn", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-otn-topology:otn": { - "odu-type": "ietf-layer1-types:ODU4" - } - } - } - ] - } - ] - } - }, - { - "tp-id": "501", - "ietf-te-topology:te-tp-id": 501, - "ietf-te-topology:te": { - "name": "1-1-1-1-1", - "admin-status": "up", - "oper-status": "up", - "ietf-otn-topology:client-svc": { - "client-facing": false - }, - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-oduk", - "switching-capability": "ietf-te-types:switching-otn", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-otn-topology:otn": { - "odu-type": "ietf-layer1-types:ODU4" - } - } - } - ] - } - ] - } - } - ], - "ietf-te-topology:te": { - "oper-status": "up", - "te-node-attributes": { - "admin-status": "up", - "name": "OE" - }, - "tunnel-termination-point": [ - { - "tunnel-tp-id": "NTAw", - "admin-status": "up", - "oper-status": "up", - "encoding": "ietf-te-types:lsp-encoding-oduk", - "name": "1-1-1-1-1", - "protection-type": "ietf-te-types:lsp-protection-unprotected", - "switching-capability": "ietf-te-types:switching-otn", - "local-link-connectivities": { - "local-link-connectivity": [ - { - "is-allowed": true, - "link-tp-ref": "500" - } - ] - } - }, - { - "tunnel-tp-id": "NTAx", - "admin-status": "up", - "oper-status": "up", - "encoding": "ietf-te-types:lsp-encoding-oduk", - "name": "1-1-1-1-1", - "protection-type": "ietf-te-types:lsp-protection-unprotected", - "switching-capability": "ietf-te-types:switching-otn", - "local-link-connectivities": { - "local-link-connectivity": [ - { - "is-allowed": true, - "link-tp-ref": "501" - } - ] - } - } - ] - } - } - ], - "ietf-network-topology:link": [ - { - "link-id": "172.16.182.25-501", - "source": { - "source-node": "172.16.182.25", - "source-tp": "501" - }, - "destination": { - "dest-node": "172.16.185.31", - "dest-tp": "501" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.182.25-501", - "te-delay-metric": 1, - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-otn-topology:odulist": [ - { - "odu-type": "ietf-layer1-types:ODU0", - "number": 80 - } - ] - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-otn-topology:odulist": [ - { - "number": 80, - "odu-type": "ietf-layer1-types:ODU0" - } - ] - } - } - ] - } - } - }, - { - "link-id": "172.16.182.25-500", - "source": { - "source-node": "172.16.182.25", - "source-tp": "500" - }, - "destination": { - "dest-node": "172.16.185.33", - "dest-tp": "500" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.182.25-500", - "te-delay-metric": 1, - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-otn-topology:odulist": [ - { - "odu-type": "ietf-layer1-types:ODU0", - "number": 80 - } - ] - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-otn-topology:odulist": [ - { - "number": 80, - "odu-type": "ietf-layer1-types:ODU0" - } - ] - } - } - ] - } - } - }, - { - "link-id": "172.16.185.31-501", - "source": { - "source-node": "172.16.185.31", - "source-tp": "501" - }, - "destination": { - "dest-node": "172.16.182.25", - "dest-tp": "501" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.185.31-501", - "te-delay-metric": 1, - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-otn-topology:odulist": [ - { - "odu-type": "ietf-layer1-types:ODU0", - "number": 80 - } - ] - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-otn-topology:odulist": [ - { - "number": 80, - "odu-type": "ietf-layer1-types:ODU0" - } - ] - } - } - ] - } - } - }, - { - "link-id": "172.16.185.31-500", - "source": { - "source-node": "172.16.185.31", - "source-tp": "500" - }, - "destination": { - "dest-node": "172.16.185.32", - "dest-tp": "500" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.185.31-500", - "te-delay-metric": 1, - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-otn-topology:odulist": [ - { - "odu-type": "ietf-layer1-types:ODU0", - "number": 80 - } - ] - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-otn-topology:odulist": [ - { - "number": 80, - "odu-type": "ietf-layer1-types:ODU0" - } - ] - } - } - ] - } - } - }, - { - "link-id": "172.16.185.33-500", - "source": { - "source-node": "172.16.185.33", - "source-tp": "500" - }, - "destination": { - "dest-node": "172.16.182.25", - "dest-tp": "500" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.185.33-500", - "te-delay-metric": 1, - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-otn-topology:odulist": [ - { - "odu-type": "ietf-layer1-types:ODU0", - "number": 80 - } - ] - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-otn-topology:odulist": [ - { - "number": 80, - "odu-type": "ietf-layer1-types:ODU0" - } - ] - } - } - ] - } - } - }, - { - "link-id": "172.16.185.33-501", - "source": { - "source-node": "172.16.185.33", - "source-tp": "501" - }, - "destination": { - "dest-node": "172.16.185.32", - "dest-tp": "501" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.185.33-501", - "te-delay-metric": 1, - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-otn-topology:odulist": [ - { - "odu-type": "ietf-layer1-types:ODU0", - "number": 80 - } - ] - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-otn-topology:odulist": [ - { - "number": 80, - "odu-type": "ietf-layer1-types:ODU0" - } - ] - } - } - ] - } - } - }, - { - "link-id": "172.16.185.32-500", - "source": { - "source-node": "172.16.185.32", - "source-tp": "500" - }, - "destination": { - "dest-node": "172.16.185.31", - "dest-tp": "500" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.185.32-500", - "te-delay-metric": 1, - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-otn-topology:odulist": [ - { - "odu-type": "ietf-layer1-types:ODU0", - "number": 80 - } - ] - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-otn-topology:odulist": [ - { - "number": 80, - "odu-type": "ietf-layer1-types:ODU0" - } - ] - } - } - ] - } - } - }, - { - "link-id": "172.16.185.32-501", - "source": { - "source-node": "172.16.185.32", - "source-tp": "501" - }, - "destination": { - "dest-node": "172.16.185.33", - "dest-tp": "501" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.185.32-501", - "te-delay-metric": 1, - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-otn-topology:odulist": [ - { - "odu-type": "ietf-layer1-types:ODU0", - "number": 80 - } - ] - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-otn-topology:odulist": [ - { - "number": 80, - "odu-type": "ietf-layer1-types:ODU0" - } - ] - } - } - ] - } - } - } - ] - }, - { - "network-id": "providerId-10-clientId-0-topologyId-2", - "ietf-te-topology:te": { - "name": "Huawei-Network" - }, - "ietf-te-topology:te-topology-identifier": { - "provider-id": 10, - "client-id": 0, - "topology-id": "2" - }, - "network-types": { - "ietf-te-topology:te-topology": { - "ietf-eth-te-topology:eth-tran-topology": {} - } - }, - "node": [ - { - "node-id": "172.1.201.22", - "ietf-te-topology:te-node-id": "172.1.201.22", - "ietf-network-topology:termination-point": [ - { - "tp-id": "500", - "ietf-te-topology:te-tp-id": 500, - "ietf-eth-te-topology:eth-svc": { - "client-facing": true, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - } - ], - "ietf-te-topology:te": { - "oper-status": "up", - "te-node-attributes": { - "admin-status": "up", - "name": "VM2" - } - } - }, - { - "node-id": "172.1.101.22", - "ietf-te-topology:te-node-id": "172.1.101.22", - "ietf-network-topology:termination-point": [ - { - "tp-id": "500", - "ietf-te-topology:te-tp-id": 500, - "ietf-eth-te-topology:eth-svc": { - "client-facing": true, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - } - ], - "ietf-te-topology:te": { - "oper-status": "up", - "te-node-attributes": { - "admin-status": "up", - "name": "VM1" - } - } - }, - { - "node-id": "172.16.204.221", - "ietf-te-topology:te-node-id": "172.16.204.221", - "ietf-network-topology:termination-point": [ - { - "tp-id": "500", - "ietf-te-topology:te-tp-id": "172.10.33.1", - "ietf-eth-te-topology:eth-svc": { - "client-facing": true, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - }, - { - "tp-id": "200", - "ietf-te-topology:te-tp-id": 200, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4096" - } - } - } - } - }, - { - "tp-id": "201", - "ietf-te-topology:te-tp-id": 201, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4096" - } - } - } - } - } - ], - "ietf-te-topology:te": { - "oper-status": "up", - "te-node-attributes": { - "admin-status": "up", - "name": "POP2" - } - } - }, - { - "node-id": "172.16.204.220", - "ietf-te-topology:te-node-id": "172.16.204.220", - "ietf-network-topology:termination-point": [ - { - "tp-id": "500", - "ietf-te-topology:te-tp-id": "172.10.33.2", - "ietf-eth-te-topology:eth-svc": { - "client-facing": true, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - }, - { - "tp-id": "200", - "ietf-te-topology:te-tp-id": 200, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4096" - } - } - } - } - }, - { - "tp-id": "201", - "ietf-te-topology:te-tp-id": 201, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4096" - } - } - } - } - } - ], - "ietf-te-topology:te": { - "oper-status": "up", - "te-node-attributes": { - "admin-status": "up", - "name": "POP1", - "connectivity-matrices": { - "label-restrictions": { - "label-restriction": [ - { - "index": 1, - "label-start": { - "te-label": { - "ietf-eth-te-topology:vlanid": 101 - } - }, - "label-end": { - "te-label": { - "ietf-eth-te-topology:vlanid": 101 - } - } - }, - { - "index": 2, - "label-start": { - "te-label": { - "ietf-eth-te-topology:vlanid": 201 - } - }, - "label-end": { - "te-label": { - "ietf-eth-te-topology:vlanid": 201 - } - } - } - ] - } - } - } - } - }, - { - "node-id": "172.16.122.25", - "ietf-te-topology:te-node-id": "172.16.122.25", - "ietf-network-topology:termination-point": [ - { - "tp-id": "200", - "ietf-te-topology:te-tp-id": "128.32.44.254", - "ietf-eth-te-topology:eth-svc": { - "client-facing": true, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - }, - { - "tp-id": "501", - "ietf-te-topology:te-tp-id": 501, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - }, - { - "tp-id": "500", - "ietf-te-topology:te-tp-id": 500, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - } - ], - "ietf-te-topology:te": { - "oper-status": "up", - "te-node-attributes": { - "admin-status": "up", - "name": "PE" - } - } - }, - { - "node-id": "172.16.125.31", - "ietf-te-topology:te-node-id": "172.16.125.31", - "ietf-network-topology:termination-point": [ - { - "tp-id": "501", - "ietf-te-topology:te-tp-id": 501, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - }, - { - "tp-id": "500", - "ietf-te-topology:te-tp-id": 500, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - } - ], - "ietf-te-topology:te": { - "oper-status": "up", - "te-node-attributes": { - "admin-status": "up", - "name": "P" - } - } - }, - { - "node-id": "172.16.125.33", - "ietf-te-topology:te-node-id": "172.16.125.33", - "ietf-network-topology:termination-point": [ - { - "tp-id": "501", - "ietf-te-topology:te-tp-id": 501, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - }, - { - "tp-id": "500", - "ietf-te-topology:te-tp-id": 500, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - } - ], - "ietf-te-topology:te": { - "oper-status": "up", - "te-node-attributes": { - "admin-status": "up", - "name": "P" - } - } - }, - { - "node-id": "172.16.125.32", - "ietf-te-topology:te-node-id": "172.16.125.32", - "ietf-network-topology:termination-point": [ - { - "tp-id": "200", - "ietf-te-topology:te-tp-id": "172.10.44.254", - "ietf-eth-te-topology:eth-svc": { - "client-facing": true, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - }, - { - "tp-id": "501", - "ietf-te-topology:te-tp-id": 501, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - }, - { - "tp-id": "500", - "ietf-te-topology:te-tp-id": 500, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - } - ], - "ietf-te-topology:te": { - "oper-status": "up", - "te-node-attributes": { - "admin-status": "up", - "name": "PE" - } - } - }, - { - "node-id": "172.16.182.25", - "ietf-te-topology:te-node-id": "172.16.182.25", - "ietf-network-topology:termination-point": [ - { - "tp-id": "200", - "ietf-te-topology:te-tp-id": "128.32.33.254", - "ietf-eth-te-topology:eth-svc": { - "client-facing": true, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - }, - { - "tp-id": "501", - "ietf-te-topology:te-tp-id": 501, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - }, - { - "tp-id": "500", - "ietf-te-topology:te-tp-id": 500, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - } - ], - "ietf-te-topology:te": { - "oper-status": "up", - "te-node-attributes": { - "admin-status": "up", - "name": "OA" - } - } - }, - { - "node-id": "172.16.185.31", - "ietf-te-topology:te-node-id": "172.16.185.31", - "ietf-network-topology:termination-point": [ - { - "tp-id": "501", - "ietf-te-topology:te-tp-id": 501, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - }, - { - "tp-id": "500", - "ietf-te-topology:te-tp-id": 500, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - } - ], - "ietf-te-topology:te": { - "oper-status": "up", - "te-node-attributes": { - "admin-status": "up", - "name": "P" - } - } - }, - { - "node-id": "172.16.185.33", - "ietf-te-topology:te-node-id": "172.16.185.33", - "ietf-network-topology:termination-point": [ - { - "tp-id": "501", - "ietf-te-topology:te-tp-id": 501, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - }, - { - "tp-id": "500", - "ietf-te-topology:te-tp-id": 500, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - } - ], - "ietf-te-topology:te": { - "oper-status": "up", - "te-node-attributes": { - "admin-status": "up", - "name": "P" - } - } - }, - { - "node-id": "172.16.185.32", - "ietf-te-topology:te-node-id": "172.16.185.32", - "ietf-network-topology:termination-point": [ - { - "tp-id": "200", - "ietf-te-topology:te-tp-id": "172.10.33.254", - "ietf-eth-te-topology:eth-svc": { - "client-facing": true, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - }, - { - "tp-id": "501", - "ietf-te-topology:te-tp-id": 501, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - }, - { - "tp-id": "500", - "ietf-te-topology:te-tp-id": 500, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - } - ], - "ietf-te-topology:te": { - "oper-status": "up", - "te-node-attributes": { - "admin-status": "up", - "name": "OE" - } - } - }, - { - "node-id": "172.16.58.10", - "ietf-te-topology:te-node-id": "172.16.58.10", - "ietf-network-topology:termination-point": [ - { - "tp-id": "501", - "ietf-te-topology:te-tp-id": "128.32.44.2", - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - }, - { - "tp-id": "500", - "ietf-te-topology:te-tp-id": "128.32.33.2", - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - }, - "ietf-te-topology:te": { - "name": "endpoint:111", - "admin-status": "up", - "oper-status": "up", - "interface-switching-capability": [ - { - "encoding": "ietf-te-types:lsp-encoding-ethernet", - "switching-capability": "ietf-te-types:switching-l2sc", - "max-lsp-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - ] - } - }, - { - "tp-id": "200", - "ietf-te-topology:te-tp-id": 200, - "ietf-eth-te-topology:eth-svc": { - "client-facing": true, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4096" - } - } - } - } - }, - { - "tp-id": "201", - "ietf-te-topology:te-tp-id": 201, - "ietf-eth-te-topology:eth-svc": { - "client-facing": true, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4096" - } - } - } - } - } - ], - "ietf-te-topology:te": { - "oper-status": "up", - "te-node-attributes": { - "admin-status": "up", - "name": "OLT", - "connectivity-matrices": { - "label-restrictions": { - "label-restriction": [ - { - "index": 1, - "label-start": { - "te-label": { - "ietf-eth-te-topology:vlanid": 21 - } - }, - "label-end": { - "te-label": { - "ietf-eth-te-topology:vlanid": 21 - } - } - }, - { - "index": 2, - "label-start": { - "te-label": { - "ietf-eth-te-topology:vlanid": 31 - } - }, - "label-end": { - "te-label": { - "ietf-eth-te-topology:vlanid": 31 - } - } - } - ] - } - } - } - } - }, - { - "node-id": "172.16.61.10", - "ietf-te-topology:te-node-id": "172.16.61.10", - "ietf-network-topology:termination-point": [ - { - "tp-id": "500", - "ietf-te-topology:te-tp-id": 500, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - } - }, - { - "tp-id": "200", - "ietf-te-topology:te-tp-id": 200, - "ietf-eth-te-topology:eth-svc": { - "client-facing": true, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4096" - } - } - } - } - } - ], - "ietf-te-topology:te": { - "oper-status": "up", - "te-node-attributes": { - "admin-status": "up", - "name": "ONT1" - } - } - }, - { - "node-id": "172.16.61.11", - "ietf-te-topology:te-node-id": "172.16.61.11", - "ietf-network-topology:termination-point": [ - { - "tp-id": "500", - "ietf-te-topology:te-tp-id": 500, - "ietf-eth-te-topology:eth-svc": { - "client-facing": false, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4094" - } - } - } - } - }, - { - "tp-id": "200", - "ietf-te-topology:te-tp-id": 200, - "ietf-eth-te-topology:eth-svc": { - "client-facing": true, - "supported-classification": { - "port-classification": true, - "vlan-classification": { - "outer-tag": { - "supported-tag-types": [ - "ietf-eth-tran-types:classify-c-vlan", - "ietf-eth-tran-types:classify-s-vlan" - ], - "vlan-bundling": false, - "vlan-range": "1-4096" - } - } - } - } - } - ], - "ietf-te-topology:te": { - "oper-status": "up", - "te-node-attributes": { - "admin-status": "up", - "name": "ONT2" - } - } - } - ], - "ietf-network-topology:link": [ - { - "link-id": "172.16.185.32-200", - "source": { - "source-node": "172.16.185.32", - "source-tp": "200" - }, - "destination": { - "dest-node": "172.16.204.220", - "dest-tp": "500" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.185.32-200", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.125.32-200", - "source": { - "source-node": "172.16.125.32", - "source-tp": "200" - }, - "destination": { - "dest-node": "172.16.204.221", - "dest-tp": "500" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.125.32-200", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.204.220-500", - "source": { - "source-node": "172.16.204.220", - "source-tp": "500" - }, - "destination": { - "dest-node": "172.16.185.32", - "dest-tp": "200" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.204.220-500", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.204.221-500", - "source": { - "source-node": "172.16.204.221", - "source-tp": "500" - }, - "destination": { - "dest-node": "172.16.125.32", - "dest-tp": "200" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.204.221-500", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.204.221-200", - "source": { - "source-node": "172.16.204.221", - "source-tp": "200" - }, - "destination": { - "dest-node": "172.1.101.22", - "dest-tp": "500" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.204.221-200", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.204.220-200", - "source": { - "source-node": "172.16.204.220", - "source-tp": "200" - }, - "destination": { - "dest-node": "172.1.201.22", - "dest-tp": "500" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.204.220-200", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.1.101.22-500", - "source": { - "source-node": "172.1.101.22", - "source-tp": "500" - }, - "destination": { - "dest-node": "172.16.204.221", - "dest-tp": "200" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.1.101.22-500", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.1.201.22-500", - "source": { - "source-node": "172.1.201.22", - "source-tp": "500" - }, - "destination": { - "dest-node": "172.16.204.220", - "dest-tp": "200" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.1.201.22-500", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.182.25-200", - "source": { - "source-node": "172.16.182.25", - "source-tp": "200" - }, - "destination": { - "dest-node": "172.16.58.10", - "dest-tp": "500" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.182.25-200", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.122.25-200", - "source": { - "source-node": "172.16.122.25", - "source-tp": "200" - }, - "destination": { - "dest-node": "172.16.58.10", - "dest-tp": "501" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.122.25-200", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.58.10-500", - "source": { - "source-node": "172.16.58.10", - "source-tp": "500" - }, - "destination": { - "dest-node": "172.16.182.25", - "dest-tp": "200" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.58.10-500", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.58.10-501", - "source": { - "source-node": "172.16.58.10", - "source-tp": "501" - }, - "destination": { - "dest-node": "172.16.122.25", - "dest-tp": "200" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.58.10-501", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.182.25-501", - "source": { - "source-node": "172.16.182.25", - "source-tp": "501" - }, - "destination": { - "dest-node": "172.16.185.31", - "dest-tp": "501" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.182.25-501", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.182.25-500", - "source": { - "source-node": "172.16.182.25", - "source-tp": "500" - }, - "destination": { - "dest-node": "172.16.185.33", - "dest-tp": "500" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.182.25-500", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.185.31-501", - "source": { - "source-node": "172.16.185.31", - "source-tp": "501" - }, - "destination": { - "dest-node": "172.16.182.25", - "dest-tp": "501" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.185.31-501", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.185.31-500", - "source": { - "source-node": "172.16.185.31", - "source-tp": "500" - }, - "destination": { - "dest-node": "172.16.185.32", - "dest-tp": "500" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.185.31-500", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.185.33-501", - "source": { - "source-node": "172.16.185.33", - "source-tp": "501" - }, - "destination": { - "dest-node": "172.16.185.32", - "dest-tp": "501" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.185.33-501", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.185.33-500", - "source": { - "source-node": "172.16.185.33", - "source-tp": "500" - }, - "destination": { - "dest-node": "172.16.182.25", - "dest-tp": "500" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.185.33-500", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.185.32-501", - "source": { - "source-node": "172.16.185.32", - "source-tp": "501" - }, - "destination": { - "dest-node": "172.16.185.33", - "dest-tp": "501" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.185.32-501", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.185.32-500", - "source": { - "source-node": "172.16.185.32", - "source-tp": "500" - }, - "destination": { - "dest-node": "172.16.185.31", - "dest-tp": "500" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.185.32-500", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.122.25-501", - "source": { - "source-node": "172.16.122.25", - "source-tp": "501" - }, - "destination": { - "dest-node": "172.16.125.31", - "dest-tp": "501" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.122.25-501", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.122.25-500", - "source": { - "source-node": "172.16.122.25", - "source-tp": "500" - }, - "destination": { - "dest-node": "172.16.125.33", - "dest-tp": "500" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.122.25-500", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.125.31-501", - "source": { - "source-node": "172.16.125.31", - "source-tp": "501" - }, - "destination": { - "dest-node": "172.16.122.25", - "dest-tp": "501" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.125.31-501", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.125.31-500", - "source": { - "source-node": "172.16.125.31", - "source-tp": "500" - }, - "destination": { - "dest-node": "172.16.125.32", - "dest-tp": "500" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.125.31-500", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.125.33-501", - "source": { - "source-node": "172.16.125.33", - "source-tp": "501" - }, - "destination": { - "dest-node": "172.16.125.32", - "dest-tp": "501" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.125.33-501", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.125.33-500", - "source": { - "source-node": "172.16.125.33", - "source-tp": "500" - }, - "destination": { - "dest-node": "172.16.122.25", - "dest-tp": "500" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.125.33-500", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.125.32-501", - "source": { - "source-node": "172.16.125.32", - "source-tp": "501" - }, - "destination": { - "dest-node": "172.16.125.33", - "dest-tp": "501" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.125.32-501", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.125.32-500", - "source": { - "source-node": "172.16.125.32", - "source-tp": "500" - }, - "destination": { - "dest-node": "172.16.125.31", - "dest-tp": "500" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.125.32-500", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.58.10-200", - "source": { - "source-node": "172.16.58.10", - "source-tp": "200" - }, - "destination": { - "dest-node": "172.16.61.10", - "dest-tp": "500" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.58.10-200", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.61.10-500", - "source": { - "source-node": "172.16.61.10", - "source-tp": "500" - }, - "destination": { - "dest-node": "172.16.58.10", - "dest-tp": "200" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.61.10-500", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.58.10-201", - "source": { - "source-node": "172.16.58.10", - "source-tp": "201" - }, - "destination": { - "dest-node": "172.16.61.11", - "dest-tp": "500" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.58.10-201", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - }, - { - "link-id": "172.16.61.11-500", - "source": { - "source-node": "172.16.61.11", - "source-tp": "500" - }, - "destination": { - "dest-node": "172.16.58.10", - "dest-tp": "201" - }, - "ietf-te-topology:te": { - "oper-status": "up", - "te-link-attributes": { - "access-type": "point-to-point", - "admin-status": "up", - "name": "172.16.61.11-500", - "max-link-bandwidth": { - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - }, - "unreserved-bandwidth": [ - { - "priority": 7, - "te-bandwidth": { - "ietf-eth-te-topology:eth-bandwidth": 10000000 - } - } - ] - } - } - } - ] - }, - { - "network-id": "e2e-slice-simap-2", - "supporting-network": [ - { - "network-ref": "providerId-10-clientId-0-topologyId-2" - }, - { - "network-ref": "aggnet-simap-2" - } - ], - "node": [ - { - "node-id": "node-1", - "supporting-node": [ - { - "network-ref": "providerId-10-clientId-0-topologyId-2", - "node-ref": "172.16.61.10" - } - ] - }, - { - "node-id": "node-2", - "supporting-node": [ - { - "network-ref": "aggnet-simap-2", - "node-ref": "node-2" - } - ] - } - ], - "ietf-network-topology:link": [ - { - "link-id": "link-1", - "source": { - "source-node": "node-1", - "source-tp": "tp-1" - }, - "destination": { - "dest-node": "node-2", - "dest-tp": "tp-2" - }, - "simap-telemetry": { - "bandwidth-utilization": 76.51, - "latency": 2.321, - "related-service-ids": ["e2e-network-slice-1"] - }, - "ietf-network-topology:supporting-link": [ - { - "network-ref": "providerId-10-clientId-0-topologyId-2", - "link-ref": "172.16.61.10-500" - }, - { - "network-ref": "aggnet-simap-2", - "link-ref": "link-1" - } - ] - } - ] - }, - { - "network-id": "aggnet-simap-2", - "supporting-network": [ - { - "network-ref": "providerId-10-clientId-0-topologyId-2" - }, - { - "network-ref": "trans-simap-2" - } - ], - "node": [ - { - "node-id": "node-1", - "supporting-node": [ - { - "network-ref": "providerId-10-clientId-0-topologyId-2", - "node-ref": "172.16.58.10" - } - ] - }, - { - "node-id": "node-2", - "supporting-node": [ - { - "network-ref": "providerId-10-clientId-0-topologyId-2", - "node-ref": "172.16.204.220" - } - ] - } - ], - "ietf-network-topology:link": [ - { - "link-id": "link-1", - "source": { - "source-node": "node-1", - "source-tp": "tp-1" - }, - "destination": { - "dest-node": "node-2", - "dest-tp": "tp-2" - }, - "simap-telemetry": { - "bandwidth-utilization": 76.51, - "latency": 2.321, - "related-service-ids": ["trans-network-slice-1"] - }, - "ietf-network-topology:supporting-link": [ - { - "network-ref": "providerId-10-clientId-0-topologyId-2", - "link-ref": "172.16.58.10-500" - }, - { - "network-ref": "providerId-10-clientId-0-topologyId-2", - "link-ref": "172.16.204.220-500" - }, - { - "network-ref": "trans-simap-1", - "link-ref": "link-1" - } - ] - } - ] - }, - { - "network-id": "trans-simap-2", - "supporting-network": [ - { - "network-ref": "providerId-10-clientId-0-topologyId-2" - } - ], - "node": [ - { - "node-id": "node-1", - "supporting-node": [ - { - "network-ref": "providerId-10-clientId-0-topologyId-2", - "node-ref": "172.16.182.25" - } - ] - }, - { - "node-id": "node-2", - "supporting-node": [ - { - "network-ref": "providerId-10-clientId-0-topologyId-2", - "node-ref": "172.16.185.32" - } - ] - } - ], - "ietf-network-topology:link": [ - { - "link-id": "link-1", - "source": { - "source-node": "node-1", - "source-tp": "tp-1" - }, - "destination": { - "dest-node": "node-2", - "dest-tp": "tp-2" - }, - "simap-telemetry": { - "bandwidth-utilization": 76.51, - "latency": 2.321, - "related-service-ids": ["l3sm-instance-1"] - }, - "ietf-network-topology:supporting-link": [ - { - "network-ref": "providerId-10-clientId-0-topologyId-2", - "link-ref": "172.16.182.25-500" - }, - { - "network-ref": "providerId-10-clientId-0-topologyId-2", - "link-ref": "172.16.185.33-501" - } - ] - } - ] - } - ] - } -} \ No newline at end of file diff --git a/src/nbi/service/sse_telemetry/StreamSubscription.py b/src/nbi/service/sse_telemetry/StreamSubscription.py index 83b319e2d..02b4dd0c9 100644 --- a/src/nbi/service/sse_telemetry/StreamSubscription.py +++ b/src/nbi/service/sse_telemetry/StreamSubscription.py @@ -13,37 +13,16 @@ # limitations under the License. -import logging, time #, asyncio, json, uuid -#from dataclasses import dataclass -#from datetime import datetime -from typing import Dict, List #, Optional, Union -from flask import Response #, jsonify, request +import logging, time +from typing import Dict, List +from flask import Response from flask_restful import Resource -#from flask_sse import sse from kafka import KafkaConsumer, TopicPartition from kafka.admin import KafkaAdminClient, NewTopic from kafka.consumer.fetcher import ConsumerRecord from kafka.errors import TopicAlreadyExistsError -#from werkzeug.exceptions import NotFound, InternalServerError, UnsupportedMediaType -#from common.proto.monitoring_pb2 import ( -# SSEMonitoringSubscriptionConfig, -# SSEMonitoringSubscriptionResponse, -#) from common.tools.kafka.Variables import KafkaConfig -#from device.client.DeviceClient import DeviceClient -#from context.client.ContextClient import ContextClient -#from nbi.service._tools.Authentication import HTTP_AUTH -#from nbi.service.database.Engine import Engine -#from nbi.service.sse_telemetry.database.Subscription import ( -# get_main_subscription, -# get_sub_subscription, -# delete_subscription, -#) -#from nbi.service.sse_telemetry.topology import ( -# Controllers, -# UnsubscribedNotificationsSchema, -# get_controller_name, -#) +from nbi.service._tools.Authentication import HTTP_AUTH LOGGER = logging.getLogger(__name__) @@ -53,23 +32,10 @@ KAFKA_BOOT_SERVERS = KafkaConfig.get_kafka_address() class StreamSubscription(Resource): - # @HTTP_AUTH.login_required + @HTTP_AUTH.login_required def get(self, subscription_id : int): LOGGER.debug('[get] begin') - #db = Engine.get_engine() - #if db is None: - # LOGGER.error('Database engine is not initialized') - # raise InternalServerError('Database engine is not initialized') - - ## Get the main subscription - #main_subscription = get_main_subscription(db, subscription_id) - #if main_subscription is None: - # MSG = 'Subscription({:s}) not found' - # msg = MSG.format(str(subscription_id)) - # LOGGER.error(msg) - # raise NotFound(description=msg) - def event_stream(): LOGGER.debug('[stream:event_stream] begin') topic = 'subscription.{:s}'.format(str(subscription_id)) @@ -122,33 +88,3 @@ class StreamSubscription(Resource): LOGGER.info('[stream] Ready to stream...') return Response(event_stream(), mimetype='text/event-stream') - - #update_counter = 1 - #sampling_interval = float(main_subscription['sampling_interval']) - - #try: - # while True: - # simap_telemetry = { - # 'bandwidth-utilization': str(round(bandwidth, 2)), - # 'latency': str(round(delay, 2)), - # 'related-service-ids': [service_name], - # } - # telemetry_data = {'ietf-restconf:notification': { - # 'eventTime': datetime.utcnow().isoformat() + 'Z', - # 'push-update': { - # 'id': update_counter, - # 'datastore-contents': { - # 'simap-telemetry:simap-telemetry': simap_telemetry - # } - # } - # }} - # sse.publish(telemetry_data, id=update_counter, channel=str(subscription_id)) - # update_counter += 1 - # await asyncio.sleep(sampling_interval) - # - # # Send termination event - # sse.publish({}, id=update_counter, channel=str(subscription_id)) - # - #except Exception: - # MSG = 'Unhandled Exception event generator for Subscription({:s})' - # LOGGER.exception(MSG.format(str(subscription_id))) diff --git a/src/nbi/service/sse_telemetry/database/Subscription.py b/src/nbi/service/sse_telemetry/database/Subscription.py deleted file mode 100644 index f417f8598..000000000 --- a/src/nbi/service/sse_telemetry/database/Subscription.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import logging -from sqlalchemy.dialects.postgresql import insert -from sqlalchemy.engine import Engine -from sqlalchemy.orm import Session, sessionmaker -from sqlalchemy_cockroachdb import run_transaction -from typing import Any, List, Optional, Set, TypedDict - -from .models.Subscription import SSESubscriptionModel - -LOGGER = logging.getLogger(__name__) - - -class SSESubsciprionDict(TypedDict): - uuid: str - identifier: str - uri: str - xpath: str - sampling_interval : float - main_subscription: bool - main_subscription_id: Optional[str] - - -def set_subscription(db_engine: Engine, request: SSESubsciprionDict) -> None: - def callback(session: Session) -> bool: - stmt = insert(SSESubscriptionModel).values([request]) - stmt = stmt.on_conflict_do_update( - index_elements=[SSESubscriptionModel.uuid], - set_=dict( - uuid=stmt.excluded.uuid, - identifier=stmt.excluded.identifier, - uri=stmt.excluded.uri, - xpath=stmt.excluded.xpath, - sampling_interval=stmt.excluded.sampling_interval, - main_subscription=stmt.excluded.main_subscription, - main_subscription_id=stmt.excluded.main_subscription_id, - ), - ) - stmt = stmt.returning(SSESubscriptionModel) - subs = session.execute(stmt).fetchall() - return subs[0] - - _ = run_transaction(sessionmaker(bind=db_engine), callback) - - -def delete_subscription(db_engine: Engine, request: str, main_subscription: bool) -> None: - def callback(session: Session) -> bool: - num_deleted = ( - session.query(SSESubscriptionModel) - .filter_by(identifier=request, main_subscription=main_subscription) - .delete() - ) - return num_deleted > 0 - - _ = run_transaction(sessionmaker(bind=db_engine), callback) - - -def get_main_subscription(db_engine: Engine, subscription_id: str) -> Optional[SSESubsciprionDict]: - def callback(session: Session) -> Optional[SSESubsciprionDict]: - obj: Optional[SSESubscriptionModel] = ( - session.query(SSESubscriptionModel) - .filter_by(identifier=subscription_id, main_subscription=True) - .one_or_none() - ) - return ( - None - if obj is None - else SSESubsciprionDict( - uuid=obj.uuid, - identifier=obj.identifier, - uri=obj.uri, - xpath=obj.xpath, - sampling_interval=obj.sampling_interval, - main_subscription=obj.main_subscription, - main_subscription_id=obj.main_subscription_id, - ) - ) - - return run_transaction(sessionmaker(bind=db_engine), callback) - - -def get_sub_subscription(db_engine: Engine, subscription_id: str) -> List[SSESubsciprionDict]: - def callback(session: Session) -> List[SSESubsciprionDict]: - obj: List[SSESubscriptionModel] = ( - session.query(SSESubscriptionModel) - .filter_by(main_subscription_id=subscription_id, main_subscription=False) - .all() - ) - return [ - SSESubsciprionDict( - uuid=o.uuid, - identifier=o.identifier, - uri=o.uri, - xpath=o.xpath, - sampling_interval=obj.sampling_interval, - main_subscription=o.main_subscription, - main_subscription_id=o.main_subscription_id, - ) - for o in obj - ] - - return run_transaction(sessionmaker(bind=db_engine), callback) - - -def get_subscriptions(db_engine: Engine) -> List[SSESubsciprionDict]: - def callback(session: Session) -> List[SSESubsciprionDict]: - obj_list: List[SSESubscriptionModel] = session.query(SSESubscriptionModel).all() - return [ - SSESubsciprionDict( - uuid=obj.uuid, - identifier=obj.identifier, - uri=obj.uri, - xpath=obj.xpath, - sampling_interval=obj.sampling_interval, - main_subscription=obj.main_subscription, - main_subscription_id=obj.main_subscription_id, - ) - for obj in obj_list - ] - - return run_transaction(sessionmaker(bind=db_engine), callback) - - -def list_identifiers(db_engine: Engine) -> Set[str]: - def callback(session: Session) -> set[str]: - obj_list: List[SSESubscriptionModel] = session.query(SSESubscriptionModel).all() - return {obj.identifier for obj in obj_list} - - return run_transaction(sessionmaker(bind=db_engine), callback) diff --git a/src/nbi/service/sse_telemetry/database/__init__.py b/src/nbi/service/sse_telemetry/database/__init__.py deleted file mode 100644 index 3ccc21c7d..000000000 --- a/src/nbi/service/sse_telemetry/database/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - diff --git a/src/nbi/service/sse_telemetry/database/models/Subscription.py b/src/nbi/service/sse_telemetry/database/models/Subscription.py deleted file mode 100644 index f8f6dcd17..000000000 --- a/src/nbi/service/sse_telemetry/database/models/Subscription.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import sqlalchemy -from sqlalchemy import Column, Float, Integer, String, JSON, Boolean -from sqlalchemy.dialects.postgresql import UUID - - -# from sqlalchemy.orm import declarative_base - -from nbi.service.database.base import _Base - -# _Base = declarative_base() - - -class SSESubscriptionModel(_Base): - __tablename__ = 'sse_subscription' - - uuid = Column(String, primary_key=True) - identifier = Column(String, nullable=False, unique=False) - uri = Column(String, nullable=False, unique=False) - xpath = Column(String, nullable=False, unique=False) - sampling_interval = Column(Float, nullable=True) - main_subscription = Column(Boolean, default=False) - main_subscription_id = Column(String, nullable=True) - - -# def rebuild_database(db_engine: sqlalchemy.engine.Engine, drop_if_exists: bool = False): -# if drop_if_exists: -# _Base.metadata.drop_all(db_engine) -# _Base.metadata.create_all(db_engine) diff --git a/src/nbi/service/sse_telemetry/database/models/__init__.py b/src/nbi/service/sse_telemetry/database/models/__init__.py deleted file mode 100644 index 7363515f0..000000000 --- a/src/nbi/service/sse_telemetry/database/models/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. diff --git a/src/nbi/service/sse_telemetry/topology.py b/src/nbi/service/sse_telemetry/topology.py deleted file mode 100644 index 174471886..000000000 --- a/src/nbi/service/sse_telemetry/topology.py +++ /dev/null @@ -1,212 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import json, logging, os, re -from enum import Enum -from typing_extensions import List, TypedDict -from common.proto.context_pb2 import Device, DeviceId, Empty -from common.tools.object_factory.Device import json_device_id -from common.tools.rest_conf.client.RestConfClient import RestConfClient -from common.DeviceTypes import DeviceTypeEnum -from context.client.ContextClient import ContextClient -from device.client.DeviceClient import DeviceClient - -Periodic = TypedDict('Periodic', {'ietf-yang-push:period': str}) - -Input = TypedDict( - 'Input', - { - 'datastore': str, - 'ietf-yang-push:datastore-xpath-filter': str, - 'ietf-yang-push:periodic': Periodic, - }, -) - -SubscribedNotificationsSchema = TypedDict( - 'SubscribedNotificationsSchema', {'ietf-subscribed-notifications:input': Input} -) - -UnsubInput = TypedDict('UnsubInput', {'identifier': str}) - -UnsubscribedNotificationsSchema = TypedDict( - 'UnsubscribedNotificationsSchema', {'delete-subscription': UnsubInput} -) - - -class Controllers(str, Enum): - AGG_NET_CONTROLLER = 'agg-net-controller' - IP_TRANSPORT_CONTROLLER = 'ip-transport-controller' - NCE_CONTROLLER = 'nce-controller' - CONTROLLERLESS = 'controllerless' - - -match_network = re.compile(r'\/network=([^\/]*)') - -te_link = re.compile(r'\/ietf-network-topology:link=([\d.]+)-') - -phy_network = re.compile(r'providerId-\d+-clientId-\d+-topologyId-\d+') - -LOGGER = logging.getLogger(__name__) - -#dir_path = os.path.dirname(__file__) - -#with open(os.path.join(dir_path, 'Full-Te-Topology-simap1.json'), 'r') as f: -# NETWORK_DATA_SIMAP1 = json.load(f) - -#with open(os.path.join(dir_path, 'Full-Te-Topology-simap2.json'), 'r') as f: -# NETWORK_DATA_SIMAP2 = json.load(f) - - -#def get_network_data(service_id: str) -> dict: -# if service_id == 'simap1': -# return NETWORK_DATA_SIMAP1 -# elif service_id == 'simap2': -# return NETWORK_DATA_SIMAP2 -# else: -# raise ValueError(f'Unsupported service_id: {service_id}. Expected "simap1" or "simap2".') - - -def decompose_subscription( - rest_conf_client : RestConfClient, - s : SubscribedNotificationsSchema -) -> List[SubscribedNotificationsSchema]: - """ - Decomposes a subscription into its components by finding supporting links - in the network hierarchy. - """ - input_data = s['ietf-subscribed-notifications:input'] - xpath_filter = input_data['ietf-yang-push:datastore-xpath-filter'] - - xpath_filter_2 = xpath_filter.replace('/simap-telemetry', '') - xpath_data = rest_conf_client.get(xpath_filter_2) - if not xpath_data: - MSG = 'Resource({:s} => {:s}) not found in SIMAP Server' - raise Exception(MSG.format(str(xpath_filter), str(xpath_filter_2))) - -# # Parse the XPath to extract network and link information -# # Format: /ietf-network:networks/network=/ietf-network-topology:link=/simap-telemetry -# parts = xpath_filter.split('/') -# network_part = None -# link_part = None - -# for part in parts: -# if part.startswith('network='): -# network_part = part[8:] # Remove 'network=' prefix -# elif part.startswith('ietf-network-topology:link='): -# link_part = part[27:] # Remove 'ietf-network-topology:link=' prefix - -# if not network_part or not link_part: -# raise ValueError('Invalid XPath filter format') - -# # Find the network in the topology data -# networks = get_network_data(service_id)['ietf-network:networks']['network'] -# target_network = None - -# for network in networks: -# if network['network-id'] == network_part: -# target_network = network -# break - -# if not target_network: -# raise ValueError(f'Network {network_part} not found in topology data') - -# # Find the link in the network -# links = target_network.get('ietf-network-topology:link', []) -# target_link = None - -# for link in links: -# if link['link-id'] == link_part: -# target_link = link -# break - -# if not target_link: -# raise ValueError(f'Link {link_part} not found in network {network_part}') - - # Get supporting links - #supporting_links = target_link.get('ietf-network-topology:supporting-link', []) - - links = xpath_data.get('ietf-network-topology:link', list()) - if len(links) == 0: raise Exception('Link({:s}) not found'.format(str(xpath_filter_2))) - if len(links) > 1: raise Exception('Multiple occurrences for Link({:s})'.format(str(xpath_filter_2))) - link = links[0] - supporting_links = link.get('supporting-link', list()) - if len(supporting_links) == 0: - #raise ValueError( - # f'No supporting links found for link {link_part} in network {network_part}' - #) - MSG = 'No supporting links found for Resource({:s}, {:s})' - raise Exception(MSG.format(str(xpath_filter), str(xpath_data))) - - # Create decomposed subscriptions - decomposed = [] - - for supporting_link in supporting_links: - network_ref = supporting_link['network-ref'] - link_ref = supporting_link['link-ref'] - - # Create new XPath filter for the supporting link - new_xpath = f'/ietf-network:networks/network={network_ref}/ietf-network-topology:link={link_ref}/simap-telemetry' - - # Create new subscription - new_subscription = { - 'ietf-subscribed-notifications:input': { - 'datastore': input_data['datastore'], - 'ietf-yang-push:datastore-xpath-filter': new_xpath, - 'ietf-yang-push:periodic': input_data['ietf-yang-push:periodic'], - } - } - - decomposed.append(new_subscription) - - return decomposed - - -def get_controller_name(xpath: str, service_id: str, context_client: ContextClient) -> Controllers: - m = match_network.search(xpath) - network = m.groups()[0] - if 'simap' in network: - if 'aggnet' in network: - return Controllers.AGG_NET_CONTROLLER - elif 'trans' in network: - nodes = [] - for n in get_network_data(service_id)['ietf-network:networks']['network']: - nodes.extend( - [nn['node-ref'] for node in n['node'] for nn in node['supporting-node']] - ) - devices = context_client.ListDevices(Empty()) - transport_nodes = [ - d.name - for d in devices - if d.controller_id.device_uuid.uuid == 'ip-transport-controller' - ] - if all(node in transport_nodes for node in nodes): - return Controllers.IP_TRANSPORT_CONTROLLER - else: - raise ValueError(f'Unsupported transport network in XPath: {xpath}') - else: - raise ValueError(f'Unsupported network type in XPath: {xpath}') - elif phy_network.search(network): - LOGGER.info(f'Phy network detected in XPath: {xpath}') - m = te_link.search(xpath) - node = m.groups()[0] - device = context_client.GetDevice(DeviceId(**json_device_id(node))) - controller_uuid = device.controller_id.device_uuid.uuid - ctrl = context_client.GetDevice(DeviceId(**json_device_id(controller_uuid))) - if ctrl.name == 'nce-controller': - return Controllers.NCE_CONTROLLER - else: - return Controllers.CONTROLLERLESS - else: - raise ValueError(f'Unsupported XPath: {xpath}') -- GitLab From 7620b068e63204a592553b6f2681958d24bc23cf Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 9 Oct 2025 11:22:48 +0000 Subject: [PATCH 336/367] Code cleanup --- manifests/simap_connectorservice.yaml | 3 +- manifests/webuiservice.yaml | 22 +- src/simap_connector/requirements.in | 4 +- .../ecoc25-camara-e2e-telemetry/Dockerfile | 86 - .../data/camara-e2e-topology.json | 1725 ----------------- .../deploy_specs.sh | 208 -- .../mocks/Dockerfile | 30 - .../mocks/app/main.py | 299 --- .../mocks/docker-compose.yml | 58 - .../mocks/requirements.txt | 5 - .../requirements.in | 30 - .../tests/Fixtures.py | 43 - .../tests/Tools.py | 109 -- .../tests/__init__.py | 14 - .../tests/test_e2e_ietf_slice_operations.py | 478 ----- .../tests/test_onboarding.py | 67 - 16 files changed, 14 insertions(+), 3167 deletions(-) delete mode 100644 src/tests/ecoc25-camara-e2e-telemetry/Dockerfile delete mode 100644 src/tests/ecoc25-camara-e2e-telemetry/data/camara-e2e-topology.json delete mode 100755 src/tests/ecoc25-camara-e2e-telemetry/deploy_specs.sh delete mode 100644 src/tests/ecoc25-camara-e2e-telemetry/mocks/Dockerfile delete mode 100644 src/tests/ecoc25-camara-e2e-telemetry/mocks/app/main.py delete mode 100644 src/tests/ecoc25-camara-e2e-telemetry/mocks/docker-compose.yml delete mode 100644 src/tests/ecoc25-camara-e2e-telemetry/mocks/requirements.txt delete mode 100644 src/tests/ecoc25-camara-e2e-telemetry/requirements.in delete mode 100644 src/tests/ecoc25-camara-e2e-telemetry/tests/Fixtures.py delete mode 100644 src/tests/ecoc25-camara-e2e-telemetry/tests/Tools.py delete mode 100644 src/tests/ecoc25-camara-e2e-telemetry/tests/__init__.py delete mode 100644 src/tests/ecoc25-camara-e2e-telemetry/tests/test_e2e_ietf_slice_operations.py delete mode 100644 src/tests/ecoc25-camara-e2e-telemetry/tests/test_onboarding.py diff --git a/manifests/simap_connectorservice.yaml b/manifests/simap_connectorservice.yaml index a061e1f7f..09796f6f8 100644 --- a/manifests/simap_connectorservice.yaml +++ b/manifests/simap_connectorservice.yaml @@ -43,8 +43,7 @@ spec: # Assuming SIMAP Server is deployed in a local Docker container, as per: # - ./src/tests/tools/simap_server/build.sh # - ./src/tests/tools/simap_server/deploy.sh - #value: "172.17.0.1" - value: "10.254.0.9" + value: "172.17.0.1" - name: SIMAP_SERVER_PORT # Assuming SIMAP Server is deployed in a local Docker container, as per: # - ./src/tests/tools/simap_server/build.sh diff --git a/manifests/webuiservice.yaml b/manifests/webuiservice.yaml index a241b31eb..0a6213e99 100644 --- a/manifests/webuiservice.yaml +++ b/manifests/webuiservice.yaml @@ -12,17 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -#apiVersion: v1 -#kind: PersistentVolumeClaim -#metadata: -# name: grafana-pvc -#spec: -# accessModes: -# - ReadWriteOnce -# resources: -# requests: -# storage: 1Gi -#--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: grafana-pvc +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi +--- apiVersion: apps/v1 kind: Deployment metadata: diff --git a/src/simap_connector/requirements.in b/src/simap_connector/requirements.in index 180c1f034..167b3d994 100644 --- a/src/simap_connector/requirements.in +++ b/src/simap_connector/requirements.in @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -requests==2.27.* kafka-python==2.0.* psycopg2-binary==2.9.* -SQLAlchemy==1.4.* +requests==2.27.* sqlalchemy-cockroachdb==1.4.* SQLAlchemy-Utils==0.38.* +SQLAlchemy==1.4.* diff --git a/src/tests/ecoc25-camara-e2e-telemetry/Dockerfile b/src/tests/ecoc25-camara-e2e-telemetry/Dockerfile deleted file mode 100644 index cdd1b16d1..000000000 --- a/src/tests/ecoc25-camara-e2e-telemetry/Dockerfile +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -FROM python:3.9-slim - -# Install dependencies -RUN apt-get --yes --quiet --quiet update && \ - apt-get --yes --quiet --quiet install wget g++ git && \ - rm -rf /var/lib/apt/lists/* - -# Set Python to show logs as they occur -ENV PYTHONUNBUFFERED=0 - -# Get generic Python packages -RUN python3 -m pip install --upgrade pip -RUN python3 -m pip install --upgrade setuptools wheel -RUN python3 -m pip install --upgrade pip-tools - -# Get common Python packages -# Note: this step enables sharing the previous Docker build steps among all the Python components -WORKDIR /var/teraflow -COPY common_requirements.in common_requirements.in -RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in -RUN python3 -m pip install -r common_requirements.txt - -# Add common files into working directory -WORKDIR /var/teraflow/common -COPY src/common/. ./ -RUN rm -rf proto - -# Create proto sub-folder, copy .proto files, and generate Python code -RUN mkdir -p /var/teraflow/common/proto -WORKDIR /var/teraflow/common/proto -RUN touch __init__.py -COPY proto/*.proto ./ -RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto -RUN rm *.proto -RUN find . -type f -exec sed -i -E 's/^(import\ .*)_pb2/from . \1_pb2/g' {} \; - -# Create component sub-folders, get specific Python packages -RUN mkdir -p /var/teraflow/tests/ofc25-camara-e2e-controller -WORKDIR /var/teraflow/tests/ofc25-camara-e2e-controller -COPY src/tests/ofc25-camara-e2e-controller/requirements.in requirements.in -RUN pip-compile --quiet --output-file=requirements.txt requirements.in -RUN python3 -m pip install -r requirements.txt - -# Add component files into working directory -WORKDIR /var/teraflow -COPY src/__init__.py ./__init__.py -COPY src/common/*.py ./common/ -COPY src/common/tests/. ./common/tests/ -COPY src/common/tools/. ./common/tools/ -COPY src/context/__init__.py context/__init__.py -COPY src/context/client/. context/client/ -COPY src/device/__init__.py device/__init__.py -COPY src/device/client/. device/client/ -COPY src/monitoring/__init__.py monitoring/__init__.py -COPY src/monitoring/client/. monitoring/client/ -COPY src/service/__init__.py service/__init__.py -COPY src/service/client/. service/client/ -COPY src/slice/__init__.py slice/__init__.py -COPY src/slice/client/. slice/client/ -COPY src/vnt_manager/__init__.py vnt_manager/__init__.py -COPY src/vnt_manager/client/. vnt_manager/client/ -COPY src/tests/*.py ./tests/ -COPY src/tests/ofc25-camara-e2e-controller/__init__.py ./tests/ofc25-camara-e2e-controller/__init__.py -COPY src/tests/ofc25-camara-e2e-controller/data/. ./tests/ofc25-camara-e2e-controller/data/ -COPY src/tests/ofc25-camara-e2e-controller/tests/. ./tests/ofc25-camara-e2e-controller/tests/ -COPY src/tests/ofc25-camara-e2e-controller/scripts/. ./ - -RUN apt-get --yes --quiet --quiet update && \ - apt-get --yes --quiet --quiet install tree && \ - rm -rf /var/lib/apt/lists/* - -RUN tree -la /var/teraflow diff --git a/src/tests/ecoc25-camara-e2e-telemetry/data/camara-e2e-topology.json b/src/tests/ecoc25-camara-e2e-telemetry/data/camara-e2e-topology.json deleted file mode 100644 index b2a8617e2..000000000 --- a/src/tests/ecoc25-camara-e2e-telemetry/data/camara-e2e-topology.json +++ /dev/null @@ -1,1725 +0,0 @@ -{ - "contexts": [ - { - "context_id": { - "context_uuid": { - "uuid": "admin" - } - } - } - ], - "topologies": [ - { - "topology_id": { - "context_id": { - "context_uuid": { - "uuid": "admin" - } - }, - "topology_uuid": { - "uuid": "admin" - } - } - } - ], - "devices": [ - { - "device_id": { - "device_uuid": { - "uuid": "ip-transport-controller" - } - }, - "name": "ip-transport-controller", - "device_type": "ietf-slice", - "device_operational_status": 1, - "device_drivers": [ - 14 - ], - "device_config": { - "config_rules": [ - { - "action": 1, - "custom": { - "resource_key": "_connect/address", - "resource_value": "AGG_NET_IP" - } - }, - { - "action": 1, - "custom": { - "resource_key": "_connect/port", - "resource_value": "AGG_NET_PORT" - } - }, - { - "action": 1, - "custom": { - "resource_key": "_connect/settings", - "resource_value": { - "endpoints": [ - { - "uuid": "mgmt", - "name": "mgmt", - "type": "mgmt" - } - ], - "scheme": "http", - "username": "admin", - "password": "admin", - "base_url": "/restconf/v2/data", - "timeout": 120, - "verify": false - } - } - } - ] - }, - "device_endpoints": [] - }, - { - "device_id": { - "device_uuid": { - "uuid": "agg-net-controller" - } - }, - "name": "agg-net-controller", - "device_type": "ietf-slice", - "device_operational_status": 1, - "device_drivers": [ - 14 - ], - "device_config": { - "config_rules": [ - { - "action": 1, - "custom": { - "resource_key": "_connect/address", - "resource_value": "AGG_NET_IP" - } - }, - { - "action": 1, - "custom": { - "resource_key": "_connect/port", - "resource_value": "AGG_NET_PORT" - } - }, - { - "action": 1, - "custom": { - "resource_key": "_connect/settings", - "resource_value": { - "endpoints": [ - { - "uuid": "mgmt", - "name": "mgmt", - "type": "mgmt" - } - ], - "scheme": "http", - "username": "admin", - "password": "admin", - "base_url": "/restconf/v2/data", - "timeout": 120, - "verify": false - } - } - } - ] - }, - "device_endpoints": [] - }, - { - "device_id": { - "device_uuid": { - "uuid": "nce-controller" - } - }, - "name": "nce-controller", - "device_type": "nce", - "device_operational_status": 1, - "device_drivers": [ - 15 - ], - "device_config": { - "config_rules": [ - { - "action": 1, - "custom": { - "resource_key": "_connect/address", - "resource_value": "NCE_IP" - } - }, - { - "action": 1, - "custom": { - "resource_key": "_connect/port", - "resource_value": "NCE_PORT" - } - }, - { - "action": 1, - "custom": { - "resource_key": "_connect/settings", - "resource_value": { - "endpoints": [ - { - "uuid": "mgmt", - "name": "mgmt", - "type": "mgmt" - } - ], - "scheme": "http", - "username": "admin", - "password": "admin", - "base_url": "/restconf/v2/data", - "timeout": 120, - "verify": false - } - } - } - ] - }, - "device_endpoints": [] - }, - { - "device_id": { - "device_uuid": { - "uuid": "172.16.182.25" - } - }, - "name": "172.16.182.25", - "device_type": "emu-packet-router", - "controller_id": { - "device_uuid": { - "uuid": "ip-transport-controller" - } - }, - "device_operational_status": 1, - "device_drivers": [ - 0, - 14 - ], - "device_config": { - "config_rules": [ - { - "action": 1, - "custom": { - "resource_key": "_connect/address", - "resource_value": "127.0.0.1" - } - }, - { - "action": 1, - "custom": { - "resource_key": "_connect/port", - "resource_value": "0" - } - }, - { - "action": 1, - "custom": { - "resource_key": "_connect/settings", - "resource_value": { - "endpoints": [ - { - "uuid": "mgmt", - "name": "mgmt", - "type": "mgmt" - }, - { - "uuid": "200", - "name": "200", - "type": "optical", - "address_ip": "128.32.33.254", - "address_prefix": "24", - "site_location": "access", - "mtu": "1500" - }, - { - "uuid": "500", - "name": "500", - "type": "optical" - }, - { - "uuid": "501", - "name": "501", - "type": "optical" - } - ] - } - } - } - ] - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "172.16.185.31" - } - }, - "name": "172.16.185.31", - "device_type": "emu-packet-router", - "controller_id": { - "device_uuid": { - "uuid": "ip-transport-controller" - } - }, - "device_operational_status": 1, - "device_drivers": [ - 0, - 14 - ], - "device_config": { - "config_rules": [ - { - "action": 1, - "custom": { - "resource_key": "_connect/address", - "resource_value": "127.0.0.1" - } - }, - { - "action": 1, - "custom": { - "resource_key": "_connect/port", - "resource_value": "0" - } - }, - { - "action": 1, - "custom": { - "resource_key": "_connect/settings", - "resource_value": { - "endpoints": [ - { - "uuid": "mgmt", - "name": "mgmt", - "type": "mgmt" - }, - { - "uuid": "500", - "name": "500", - "type": "optical" - }, - { - "uuid": "501", - "name": "501", - "type": "optical" - } - ] - } - } - } - ] - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "172.16.185.33" - } - }, - "name": "172.16.185.33", - "device_type": "emu-packet-router", - "controller_id": { - "device_uuid": { - "uuid": "ip-transport-controller" - } - }, - "device_operational_status": 1, - "device_drivers": [ - 0, - 14 - ], - "device_config": { - "config_rules": [ - { - "action": 1, - "custom": { - "resource_key": "_connect/address", - "resource_value": "127.0.0.1" - } - }, - { - "action": 1, - "custom": { - "resource_key": "_connect/port", - "resource_value": "0" - } - }, - { - "action": 1, - "custom": { - "resource_key": "_connect/settings", - "resource_value": { - "endpoints": [ - { - "uuid": "mgmt", - "name": "mgmt", - "type": "mgmt" - }, - { - "uuid": "500", - "name": "500", - "type": "optical" - }, - { - "uuid": "501", - "name": "501", - "type": "optical" - } - ] - } - } - } - ] - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "172.16.185.32" - } - }, - "name": "172.16.185.32", - "device_type": "emu-packet-router", - "controller_id": { - "device_uuid": { - "uuid": "ip-transport-controller" - } - }, - "device_operational_status": 1, - "device_drivers": [ - 0, - 14 - ], - "device_config": { - "config_rules": [ - { - "action": 1, - "custom": { - "resource_key": "_connect/address", - "resource_value": "127.0.0.1" - } - }, - { - "action": 1, - "custom": { - "resource_key": "_connect/port", - "resource_value": "0" - } - }, - { - "action": 1, - "custom": { - "resource_key": "_connect/settings", - "resource_value": { - "endpoints": [ - { - "uuid": "mgmt", - "name": "mgmt", - "type": "mgmt" - }, - { - "uuid": "200", - "name": "200", - "type": "optical", - "ce-ip": "172.10.33.2", - "address_ip": "172.10.33.254", - "address_prefix": "24", - "site_location": "cloud", - "mtu": "1500" - }, - { - "uuid": "500", - "name": "500", - "type": "optical" - }, - { - "uuid": "501", - "name": "501", - "type": "optical" - } - ] - } - } - } - ] - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "172.16.58.10" - } - }, - "name": "172.16.58.10", - "device_type": "emu-packet-router", - "controller_id": { - "device_uuid": { - "uuid": "nce-controller" - } - }, - "device_operational_status": 1, - "device_drivers": [ - 15 - ], - "device_config": { - "config_rules": [ - { - "action": 1, - "custom": { - "resource_key": "_connect/address", - "resource_value": "127.0.0.1" - } - }, - { - "action": 1, - "custom": { - "resource_key": "_connect/port", - "resource_value": "0" - } - }, - { - "action": 1, - "custom": { - "resource_key": "_connect/settings", - "resource_value": { - "endpoints": [ - { - "uuid": "mgmt", - "name": "mgmt", - "type": "mgmt" - }, - { - "uuid": "200", - "name": "200", - "type": "optical", - "address_ip": "0.0.0.0", - "address_prefix": "24" - }, - { - "uuid": "201", - "name": "201", - "type": "optical", - "address_ip": "0.0.0.0", - "address_prefix": "24" - }, - { - "uuid": "500", - "name": "500", - "type": "optical", - "address_ip": "128.32.33.2", - "address_prefix": "24" - } - ] - } - } - } - ] - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "172.16.61.10" - } - }, - "name": "172.16.61.10", - "device_type": "emu-packet-router", - "controller_id": { - "device_uuid": { - "uuid": "nce-controller" - } - }, - "device_operational_status": 1, - "device_drivers": [ - 15 - ], - "device_config": { - "config_rules": [ - { - "action": 1, - "custom": { - "resource_key": "_connect/address", - "resource_value": "127.0.0.1" - } - }, - { - "action": 1, - "custom": { - "resource_key": "_connect/port", - "resource_value": "0" - } - }, - { - "action": 1, - "custom": { - "resource_key": "_connect/settings", - "resource_value": { - "endpoints": [ - { - "uuid": "mgmt", - "name": "mgmt", - "type": "mgmt" - }, - { - "uuid": "200", - "name": "200", - "type": "optical", - "address_ip": "0.0.0.0", - "address_prefix": "24" - }, - { - "uuid": "500", - "name": "500", - "type": "optical", - "address_ip": "128.32.33.2", - "address_prefix": "24" - } - ] - } - } - } - ] - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "172.16.61.11" - } - }, - "name": "172.16.61.11", - "device_type": "emu-packet-router", - "controller_id": { - "device_uuid": { - "uuid": "nce-controller" - } - }, - "device_operational_status": 1, - "device_drivers": [ - 15 - ], - "device_config": { - "config_rules": [ - { - "action": 1, - "custom": { - "resource_key": "_connect/address", - "resource_value": "127.0.0.1" - } - }, - { - "action": 1, - "custom": { - "resource_key": "_connect/port", - "resource_value": "0" - } - }, - { - "action": 1, - "custom": { - "resource_key": "_connect/settings", - "resource_value": { - "endpoints": [ - { - "uuid": "mgmt", - "name": "mgmt", - "type": "mgmt" - }, - { - "uuid": "200", - "name": "200", - "type": "optical", - "address_ip": "0.0.0.0", - "address_prefix": "24" - }, - { - "uuid": "500", - "name": "500", - "type": "optical", - "address_ip": "128.32.33.2", - "address_prefix": "24" - } - ] - } - } - } - ] - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "172.16.104.221" - } - }, - "device_type": "emu-datacenter", - "device_drivers": [ - 0 - ], - "device_endpoints": [], - "device_operational_status": 1, - "device_config": { - "config_rules": [ - { - "action": 1, - "custom": { - "resource_key": "_connect/address", - "resource_value": "127.0.0.1" - } - }, - { - "action": 1, - "custom": { - "resource_key": "_connect/port", - "resource_value": "0" - } - }, - { - "action": 1, - "custom": { - "resource_key": "_connect/settings", - "resource_value": { - "endpoints": [ - { - "sample_types": [], - "type": "copper", - "uuid": "eth0" - } - ] - } - } - } - ] - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "172.16.104.222" - } - }, - "device_type": "emu-datacenter", - "device_drivers": [ - 0 - ], - "device_endpoints": [], - "device_operational_status": 1, - "device_config": { - "config_rules": [ - { - "action": 1, - "custom": { - "resource_key": "_connect/address", - "resource_value": "127.0.0.1" - } - }, - { - "action": 1, - "custom": { - "resource_key": "_connect/port", - "resource_value": "0" - } - }, - { - "action": 1, - "custom": { - "resource_key": "_connect/settings", - "resource_value": { - "endpoints": [ - { - "sample_types": [], - "type": "copper", - "uuid": "eth0" - } - ] - } - } - } - ] - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "172.16.204.220" - } - }, - "device_type": "emu-datacenter", - "device_drivers": [ - 0 - ], - "device_endpoints": [], - "device_operational_status": 1, - "device_config": { - "config_rules": [ - { - "action": 1, - "custom": { - "resource_key": "_connect/address", - "resource_value": "127.0.0.1" - } - }, - { - "action": 1, - "custom": { - "resource_key": "_connect/port", - "resource_value": "0" - } - }, - { - "action": 1, - "custom": { - "resource_key": "_connect/settings", - "resource_value": { - "endpoints": [ - { - "sample_types": [], - "type": "optical", - "uuid": "500" - }, - { - "sample_types": [], - "type": "optical", - "uuid": "200" - }, - { - "sample_types": [], - "type": "optical", - "uuid": "201" - } - ] - } - } - } - ] - } - } - ], - "links": [ - { - "link_id": { - "link_uuid": { - "uuid": "agg-net-controller/mgmt==ip-transport-controller/mgmt" - } - }, - "name": "agg-net-controller/mgmt==ip-transport-controller/mgmt", - "link_endpoint_ids": [ - { - "device_id": { - "device_uuid": { - "uuid": "agg-net-controller" - } - }, - "endpoint_uuid": { - "uuid": "mgmt" - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "ip-transport-controller" - } - }, - "endpoint_uuid": { - "uuid": "mgmt" - } - } - ] - }, - { - "link_id": { - "link_uuid": { - "uuid": "nce-controller/mgmt==172.16.61.11/mgmt" - } - }, - "name": "nce-controller/mgmt==172.16.61.11/mgmt", - "link_endpoint_ids": [ - { - "device_id": { - "device_uuid": { - "uuid": "nce-controller" - } - }, - "endpoint_uuid": { - "uuid": "mgmt" - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "172.16.61.11" - } - }, - "endpoint_uuid": { - "uuid": "mgmt" - } - } - ] - }, - { - "link_id": { - "link_uuid": { - "uuid": "nce-controller/mgmt==172.16.61.10/mgmt" - } - }, - "name": "nce-controller/mgmt==172.16.61.10/mgmt", - "link_endpoint_ids": [ - { - "device_id": { - "device_uuid": { - "uuid": "nce-controller" - } - }, - "endpoint_uuid": { - "uuid": "mgmt" - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "172.16.61.10" - } - }, - "endpoint_uuid": { - "uuid": "mgmt" - } - } - ] - }, - { - "link_id": { - "link_uuid": { - "uuid": "nce-controller/mgmt==172.16.58.10/mgmt" - } - }, - "name": "nce-controller/mgmt==172.16.58.10/mgmt", - "link_endpoint_ids": [ - { - "device_id": { - "device_uuid": { - "uuid": "nce-controller" - } - }, - "endpoint_uuid": { - "uuid": "mgmt" - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "172.16.58.10" - } - }, - "endpoint_uuid": { - "uuid": "mgmt" - } - } - ] - }, - { - "link_id": { - "link_uuid": { - "uuid": "ip-transport-controller/mgmt==172.16.185.33/mgmt" - } - }, - "name": "ip-transport-controller/mgmt==172.16.185.33/mgmt", - "link_endpoint_ids": [ - { - "device_id": { - "device_uuid": { - "uuid": "ip-transport-controller" - } - }, - "endpoint_uuid": { - "uuid": "mgmt" - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "172.16.185.33" - } - }, - "endpoint_uuid": { - "uuid": "mgmt" - } - } - ] - }, - { - "link_id": { - "link_uuid": { - "uuid": "ip-transport-controller/mgmt==172.16.185.31/mgmt" - } - }, - "name": "ip-transport-controller/mgmt==172.16.185.31/mgmt", - "link_endpoint_ids": [ - { - "device_id": { - "device_uuid": { - "uuid": "ip-transport-controller" - } - }, - "endpoint_uuid": { - "uuid": "mgmt" - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "172.16.185.31" - } - }, - "endpoint_uuid": { - "uuid": "mgmt" - } - } - ] - }, - { - "link_id": { - "link_uuid": { - "uuid": "ip-transport-controller/mgmt==172.16.185.32/mgmt" - } - }, - "name": "ip-transport-controller/mgmt==172.16.185.32/mgmt", - "link_endpoint_ids": [ - { - "device_id": { - "device_uuid": { - "uuid": "ip-transport-controller" - } - }, - "endpoint_uuid": { - "uuid": "mgmt" - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "172.16.185.32" - } - }, - "endpoint_uuid": { - "uuid": "mgmt" - } - } - ] - }, - { - "link_id": { - "link_uuid": { - "uuid": "ip-transport-controller/mgmt==172.16.182.25/mgmt" - } - }, - "name": "ip-transport-controller/mgmt==172.16.182.25/mgmt", - "link_endpoint_ids": [ - { - "device_id": { - "device_uuid": { - "uuid": "ip-transport-controller" - } - }, - "endpoint_uuid": { - "uuid": "mgmt" - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "172.16.182.25" - } - }, - "endpoint_uuid": { - "uuid": "mgmt" - } - } - ] - }, - { - "link_id": { - "link_uuid": { - "uuid": "172.16.182.25-500" - } - }, - "name": "172.16.182.25-500", - "attributes": { - "total_capacity_gbps": 10, - "used_capacity_gbps": 0 - }, - "link_endpoint_ids": [ - { - "device_id": { - "device_uuid": { - "uuid": "172.16.182.25" - } - }, - "endpoint_uuid": { - "uuid": "500" - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "172.16.185.33" - } - }, - "endpoint_uuid": { - "uuid": "500" - } - } - ] - }, - { - "link_id": { - "link_uuid": { - "uuid": "172.16.185.33-500" - } - }, - "name": "172.16.185.33-500", - "attributes": { - "total_capacity_gbps": 10, - "used_capacity_gbps": 0 - }, - "link_endpoint_ids": [ - { - "device_id": { - "device_uuid": { - "uuid": "172.16.185.33" - } - }, - "endpoint_uuid": { - "uuid": "500" - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "172.16.182.25" - } - }, - "endpoint_uuid": { - "uuid": "500" - } - } - ] - }, - { - "link_id": { - "link_uuid": { - "uuid": "172.16.182.25-501" - } - }, - "name": "172.16.182.25-501", - "attributes": { - "total_capacity_gbps": 10, - "used_capacity_gbps": 0 - }, - "link_endpoint_ids": [ - { - "device_id": { - "device_uuid": { - "uuid": "172.16.182.25" - } - }, - "endpoint_uuid": { - "uuid": "501" - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "172.16.185.31" - } - }, - "endpoint_uuid": { - "uuid": "501" - } - } - ] - }, - { - "link_id": { - "link_uuid": { - "uuid": "172.16.185.31-501" - } - }, - "name": "172.16.185.31-501", - "attributes": { - "total_capacity_gbps": 10, - "used_capacity_gbps": 0 - }, - "link_endpoint_ids": [ - { - "device_id": { - "device_uuid": { - "uuid": "172.16.185.31" - } - }, - "endpoint_uuid": { - "uuid": "501" - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "172.16.182.25" - } - }, - "endpoint_uuid": { - "uuid": "501" - } - } - ] - }, - { - "link_id": { - "link_uuid": { - "uuid": "172.16.185.31-500" - } - }, - "name": "172.16.185.31-500", - "attributes": { - "total_capacity_gbps": 10, - "used_capacity_gbps": 0 - }, - "link_endpoint_ids": [ - { - "device_id": { - "device_uuid": { - "uuid": "172.16.185.31" - } - }, - "endpoint_uuid": { - "uuid": "500" - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "172.16.185.32" - } - }, - "endpoint_uuid": { - "uuid": "500" - } - } - ] - }, - { - "link_id": { - "link_uuid": { - "uuid": "172.16.185.32-500" - } - }, - "name": "172.16.185.32-500", - "attributes": { - "total_capacity_gbps": 10, - "used_capacity_gbps": 0 - }, - "link_endpoint_ids": [ - { - "device_id": { - "device_uuid": { - "uuid": "172.16.185.32" - } - }, - "endpoint_uuid": { - "uuid": "500" - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "172.16.185.31" - } - }, - "endpoint_uuid": { - "uuid": "500" - } - } - ] - }, - { - "link_id": { - "link_uuid": { - "uuid": "172.16.185.33-501" - } - }, - "name": "172.16.185.33-501", - "attributes": { - "total_capacity_gbps": 10, - "used_capacity_gbps": 0 - }, - "link_endpoint_ids": [ - { - "device_id": { - "device_uuid": { - "uuid": "172.16.185.33" - } - }, - "endpoint_uuid": { - "uuid": "501" - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "172.16.185.32" - } - }, - "endpoint_uuid": { - "uuid": "501" - } - } - ] - }, - { - "link_id": { - "link_uuid": { - "uuid": "172.16.185.32-501" - } - }, - "name": "172.16.185.32-501", - "attributes": { - "total_capacity_gbps": 10, - "used_capacity_gbps": 0 - }, - "link_endpoint_ids": [ - { - "device_id": { - "device_uuid": { - "uuid": "172.16.185.32" - } - }, - "endpoint_uuid": { - "uuid": "501" - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "172.16.185.33" - } - }, - "endpoint_uuid": { - "uuid": "501" - } - } - ] - }, - { - "link_id": { - "link_uuid": { - "uuid": "172.16.185.32-200" - } - }, - "name": "172.16.185.32-200", - "attributes": { - "total_capacity_gbps": 10, - "used_capacity_gbps": 0 - }, - "link_endpoint_ids": [ - { - "device_id": { - "device_uuid": { - "uuid": "172.16.185.32" - } - }, - "endpoint_uuid": { - "uuid": "200" - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "172.16.204.220" - } - }, - "endpoint_uuid": { - "uuid": "500" - } - } - ] - }, - { - "link_id": { - "link_uuid": { - "uuid": "172.16.204.220-500" - } - }, - "name": "172.16.204.220-500", - "attributes": { - "total_capacity_gbps": 10, - "used_capacity_gbps": 0 - }, - "link_endpoint_ids": [ - { - "device_id": { - "device_uuid": { - "uuid": "172.16.204.220" - } - }, - "endpoint_uuid": { - "uuid": "500" - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "172.16.185.32" - } - }, - "endpoint_uuid": { - "uuid": "200" - } - } - ] - }, - { - "link_id": { - "link_uuid": { - "uuid": "172.16.182.25-200" - } - }, - "name": "172.16.182.25-200", - "attributes": { - "total_capacity_gbps": 10, - "used_capacity_gbps": 0 - }, - "link_endpoint_ids": [ - { - "device_id": { - "device_uuid": { - "uuid": "172.16.182.25" - } - }, - "endpoint_uuid": { - "uuid": "200" - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "172.16.58.10" - } - }, - "endpoint_uuid": { - "uuid": "500" - } - } - ] - }, - { - "link_id": { - "link_uuid": { - "uuid": "172.16.58.10-500" - } - }, - "name": "172.16.58.10-500", - "attributes": { - "total_capacity_gbps": 10, - "used_capacity_gbps": 0 - }, - "link_endpoint_ids": [ - { - "device_id": { - "device_uuid": { - "uuid": "172.16.58.10" - } - }, - "endpoint_uuid": { - "uuid": "500" - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "172.16.182.25" - } - }, - "endpoint_uuid": { - "uuid": "200" - } - } - ] - }, - { - "link_id": { - "link_uuid": { - "uuid": "172.16.58.10-200" - } - }, - "name": "172.16.58.10-200", - "attributes": { - "total_capacity_gbps": 10, - "used_capacity_gbps": 0 - }, - "link_endpoint_ids": [ - { - "device_id": { - "device_uuid": { - "uuid": "172.16.58.10" - } - }, - "endpoint_uuid": { - "uuid": "200" - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "172.16.61.10" - } - }, - "endpoint_uuid": { - "uuid": "500" - } - } - ] - }, - { - "link_id": { - "link_uuid": { - "uuid": "172.16.61.10-500" - } - }, - "name": "172.16.61.10-500", - "attributes": { - "total_capacity_gbps": 10, - "used_capacity_gbps": 0 - }, - "link_endpoint_ids": [ - { - "device_id": { - "device_uuid": { - "uuid": "172.16.61.10" - } - }, - "endpoint_uuid": { - "uuid": "500" - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "172.16.58.10" - } - }, - "endpoint_uuid": { - "uuid": "200" - } - } - ] - }, - { - "link_id": { - "link_uuid": { - "uuid": "172.16.58.10-201" - } - }, - "name": "172.16.58.10-201", - "attributes": { - "total_capacity_gbps": 10, - "used_capacity_gbps": 0 - }, - "link_endpoint_ids": [ - { - "device_id": { - "device_uuid": { - "uuid": "172.16.58.10" - } - }, - "endpoint_uuid": { - "uuid": "201" - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "172.16.61.11" - } - }, - "endpoint_uuid": { - "uuid": "500" - } - } - ] - }, - { - "link_id": { - "link_uuid": { - "uuid": "172.16.61.11-500" - } - }, - "name": "172.16.61.11-500", - "attributes": { - "total_capacity_gbps": 10, - "used_capacity_gbps": 0 - }, - "link_endpoint_ids": [ - { - "device_id": { - "device_uuid": { - "uuid": "172.16.61.11" - } - }, - "endpoint_uuid": { - "uuid": "500" - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "172.16.58.10" - } - }, - "endpoint_uuid": { - "uuid": "201" - } - } - ] - }, - { - "link_id": { - "link_uuid": { - "uuid": "172.16.61.10-200" - } - }, - "name": "172.16.61.10-200", - "attributes": { - "total_capacity_gbps": 10, - "used_capacity_gbps": 0 - }, - "link_endpoint_ids": [ - { - "device_id": { - "device_uuid": { - "uuid": "172.16.61.10" - } - }, - "endpoint_uuid": { - "uuid": "200" - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "172.16.104.221" - } - }, - "endpoint_uuid": { - "uuid": "eth0" - } - } - ] - }, - { - "link_id": { - "link_uuid": { - "uuid": "172.16.104.221-eth0" - } - }, - "name": "172.16.104.221-eth0", - "attributes": { - "total_capacity_gbps": 10, - "used_capacity_gbps": 0 - }, - "link_endpoint_ids": [ - { - "device_id": { - "device_uuid": { - "uuid": "172.16.104.221" - } - }, - "endpoint_uuid": { - "uuid": "eth0" - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "172.16.61.10" - } - }, - "endpoint_uuid": { - "uuid": "200" - } - } - ] - }, - { - "link_id": { - "link_uuid": { - "uuid": "172.16.61.11-200" - } - }, - "name": "172.16.61.11-200", - "attributes": { - "total_capacity_gbps": 10, - "used_capacity_gbps": 0 - }, - "link_endpoint_ids": [ - { - "device_id": { - "device_uuid": { - "uuid": "172.16.61.11" - } - }, - "endpoint_uuid": { - "uuid": "200" - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "172.16.104.222" - } - }, - "endpoint_uuid": { - "uuid": "eth0" - } - } - ] - }, - { - "link_id": { - "link_uuid": { - "uuid": "172.16.104.222-eth0" - } - }, - "name": "172.16.104.222-eth0", - "attributes": { - "total_capacity_gbps": 10, - "used_capacity_gbps": 0 - }, - "link_endpoint_ids": [ - { - "device_id": { - "device_uuid": { - "uuid": "172.16.104.222" - } - }, - "endpoint_uuid": { - "uuid": "eth0" - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "172.16.61.11" - } - }, - "endpoint_uuid": { - "uuid": "200" - } - } - ] - } - ] -} diff --git a/src/tests/ecoc25-camara-e2e-telemetry/deploy_specs.sh b/src/tests/ecoc25-camara-e2e-telemetry/deploy_specs.sh deleted file mode 100755 index fc61779a3..000000000 --- a/src/tests/ecoc25-camara-e2e-telemetry/deploy_specs.sh +++ /dev/null @@ -1,208 +0,0 @@ -#!/bin/bash -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# ----- TeraFlowSDN ------------------------------------------------------------ - -# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to. -export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" - -# Set the list of components, separated by spaces, you want to build images for, and deploy. -#export TFS_COMPONENTS="context device pathcomp service slice nbi webui load_generator" -export TFS_COMPONENTS="context device pathcomp service slice nbi webui" - -# Uncomment to activate Monitoring (old) -#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring" - -# Uncomment to activate Monitoring Framework (new) -#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics automation" - -# Uncomment to activate QoS Profiles -#export TFS_COMPONENTS="${TFS_COMPONENTS} qos_profile" - -# Uncomment to activate BGP-LS Speaker -#export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker" - -# Uncomment to activate Optical Controller -# To manage optical connections, "service" requires "opticalcontroller" to be deployed -# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the -# "opticalcontroller" only if "service" is already in TFS_COMPONENTS, and re-export it. -#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then -# BEFORE="${TFS_COMPONENTS% service*}" -# AFTER="${TFS_COMPONENTS#* service}" -# export TFS_COMPONENTS="${BEFORE} opticalcontroller service ${AFTER}" -#fi - -# Uncomment to activate ZTP -#export TFS_COMPONENTS="${TFS_COMPONENTS} ztp" - -# Uncomment to activate Policy Manager -#export TFS_COMPONENTS="${TFS_COMPONENTS} policy" - -# Uncomment to activate Optical CyberSecurity -#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager" - -# Uncomment to activate L3 CyberSecurity -#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector" - -# Uncomment to activate TE -#export TFS_COMPONENTS="${TFS_COMPONENTS} te" - -# Uncomment to activate Forecaster -#export TFS_COMPONENTS="${TFS_COMPONENTS} forecaster" - -# Uncomment to activate E2E Orchestrator -#export TFS_COMPONENTS="${TFS_COMPONENTS} e2e_orchestrator" - -# Uncomment to activate DLT and Interdomain -#export TFS_COMPONENTS="${TFS_COMPONENTS} interdomain dlt" -#if [[ "$TFS_COMPONENTS" == *"dlt"* ]]; then -# export KEY_DIRECTORY_PATH="src/dlt/gateway/keys/priv_sk" -# export CERT_DIRECTORY_PATH="src/dlt/gateway/keys/cert.pem" -# export TLS_CERT_PATH="src/dlt/gateway/keys/ca.crt" -#fi - -# Uncomment to activate QKD App -# To manage QKD Apps, "service" requires "qkd_app" to be deployed -# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the -# "qkd_app" only if "service" is already in TFS_COMPONENTS, and re-export it. -#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then -# BEFORE="${TFS_COMPONENTS% service*}" -# AFTER="${TFS_COMPONENTS#* service}" -# export TFS_COMPONENTS="${BEFORE} qkd_app service ${AFTER}" -#fi - - -# Set the tag you want to use for your images. -export TFS_IMAGE_TAG="dev" - -# Set the name of the Kubernetes namespace to deploy TFS to. -export TFS_K8S_NAMESPACE="tfs" - -# Set additional manifest files to be applied after the deployment -export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" - -# Uncomment to monitor performance of components -#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml" - -# Uncomment when deploying Optical CyberSecurity -#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml" - -# Set the new Grafana admin password -export TFS_GRAFANA_PASSWORD="admin123+" - -# Disable skip-build flag to rebuild the Docker images. -export TFS_SKIP_BUILD="" - - -# ----- CockroachDB ------------------------------------------------------------ - -# Set the namespace where CockroackDB will be deployed. -export CRDB_NAMESPACE="crdb" - -# Set the external port CockroackDB Postgre SQL interface will be exposed to. -export CRDB_EXT_PORT_SQL="26257" - -# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to. -export CRDB_EXT_PORT_HTTP="8081" - -# Set the database username to be used by Context. -export CRDB_USERNAME="tfs" - -# Set the database user's password to be used by Context. -export CRDB_PASSWORD="tfs123" - -# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing. -# See ./deploy/all.sh or ./deploy/crdb.sh for additional details -export CRDB_DEPLOY_MODE="single" - -# Disable flag for dropping database, if it exists. -export CRDB_DROP_DATABASE_IF_EXISTS="YES" - -# Disable flag for re-deploying CockroachDB from scratch. -export CRDB_REDEPLOY="" - - -# ----- NATS ------------------------------------------------------------------- - -# Set the namespace where NATS will be deployed. -export NATS_NAMESPACE="nats" - -# Set the external port NATS Client interface will be exposed to. -export NATS_EXT_PORT_CLIENT="4222" - -# Set the external port NATS HTTP Mgmt GUI interface will be exposed to. -export NATS_EXT_PORT_HTTP="8222" - -# Set NATS installation mode to 'single'. This option is convenient for development and testing. -# See ./deploy/all.sh or ./deploy/nats.sh for additional details -export NATS_DEPLOY_MODE="single" - -# Disable flag for re-deploying NATS from scratch. -export NATS_REDEPLOY="" - - -# ----- QuestDB ---------------------------------------------------------------- - -# Set the namespace where QuestDB will be deployed. -export QDB_NAMESPACE="qdb" - -# Set the external port QuestDB Postgre SQL interface will be exposed to. -export QDB_EXT_PORT_SQL="8812" - -# Set the external port QuestDB Influx Line Protocol interface will be exposed to. -export QDB_EXT_PORT_ILP="9009" - -# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to. -export QDB_EXT_PORT_HTTP="9000" - -# Set the database username to be used for QuestDB. -export QDB_USERNAME="admin" - -# Set the database user's password to be used for QuestDB. -export QDB_PASSWORD="quest" - -# Set the table name to be used by Monitoring for KPIs. -export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis" - -# Set the table name to be used by Slice for plotting groups. -export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups" - -# Disable flag for dropping tables if they exist. -export QDB_DROP_TABLES_IF_EXIST="YES" - -# Disable flag for re-deploying QuestDB from scratch. -export QDB_REDEPLOY="" - - -# ----- K8s Observability ------------------------------------------------------ - -# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to. -export PROM_EXT_PORT_HTTP="9090" - -# Set the external port Grafana HTTP Dashboards will be exposed to. -export GRAF_EXT_PORT_HTTP="3000" - - -# ----- Apache Kafka ----------------------------------------------------------- - -# Set the namespace where Apache Kafka will be deployed. -export KFK_NAMESPACE="kafka" - -# Set the port Apache Kafka server will be exposed to. -export KFK_SERVER_PORT="9092" - -# Set the flag to YES for redeploying of Apache Kafka -export KFK_REDEPLOY="" diff --git a/src/tests/ecoc25-camara-e2e-telemetry/mocks/Dockerfile b/src/tests/ecoc25-camara-e2e-telemetry/mocks/Dockerfile deleted file mode 100644 index cf4797c4a..000000000 --- a/src/tests/ecoc25-camara-e2e-telemetry/mocks/Dockerfile +++ /dev/null @@ -1,30 +0,0 @@ -FROM python:3.11-slim - -WORKDIR /app - -# Install system dependencies -RUN apt-get update && apt-get install -y \ - curl \ - && rm -rf /var/lib/apt/lists/* - -# Copy requirements first for better caching -COPY requirements.txt . -RUN pip install --no-cache-dir -r requirements.txt - -# Copy application code -COPY app/ . - -# Create non-root user -RUN adduser --disabled-password --gecos '' appuser -RUN chown -R appuser:appuser /app -USER appuser - -# Expose port -EXPOSE 8000 - -# Health check -HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ - CMD curl -f http://localhost:8000/health || exit 1 - -# Run the application -CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"] diff --git a/src/tests/ecoc25-camara-e2e-telemetry/mocks/app/main.py b/src/tests/ecoc25-camara-e2e-telemetry/mocks/app/main.py deleted file mode 100644 index c0ca9bb3c..000000000 --- a/src/tests/ecoc25-camara-e2e-telemetry/mocks/app/main.py +++ /dev/null @@ -1,299 +0,0 @@ -import asyncio -import json -import logging -import os -import random -import re -from datetime import datetime -from typing import Dict - -from fastapi import FastAPI, HTTPException -from fastapi.responses import StreamingResponse -from pydantic import BaseModel, Field - -# Configure logging -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - -# Environment variables -SERVICE_NAME = os.getenv('SERVICE_NAME', 'nce') -BASE_BANDWIDTH = float(os.getenv('BASE_BANDWIDTH', '75.0')) -BASE_DELAY = float(os.getenv('BASE_DELAY', '2.0')) -PORT = int(os.getenv('PORT', '8000')) - - -match_network = re.compile(r'\/network=([^\/]*)') - -# Global state -subscriptions: Dict[str, Dict] = {} -active_streams: Dict[str, bool] = {} -metrics_multipliers = {'bandwidth': 1.0, 'delay': 1.0} - -app = FastAPI( - title='F5G Telemetry API with SSE + YANG Push', - version='0.0.1', - description=f'Mock telemetry server for {SERVICE_NAME}', -) - - -# Pydantic models -class EstablishSubscriptionInput(BaseModel): - datastore: str - ietf_yang_push_datastore_xpath_filter: str = Field( - alias='ietf-yang-push:datastore-xpath-filter' - ) - ietf_yang_push_periodic: Dict[str, str] = Field(alias='ietf-yang-push:periodic') - - -class EstablishSubscriptionRequest(BaseModel): - ietf_subscribed_notifications_input: EstablishSubscriptionInput = Field( - alias='ietf-subscribed-notifications:input' - ) - - -class EstablishSubscriptionResponse(BaseModel): - identifier: str - uri: str - - -class DeleteSubscriptionInner(BaseModel): - identifier: str - - -class DeleteSubscriptionRequest(BaseModel): - delete_subscription: DeleteSubscriptionInner = Field(alias='delete-subscription') - - -class MetricRequest(BaseModel): - factor: float = Field(default=0.2, description='Factor to degrade/enhance metrics by') - - -def generate_telemetry_data(update_id: str, service_name: str) -> Dict: - """Generate realistic telemetry data with noise""" - base_bandwidth = BASE_BANDWIDTH * metrics_multipliers['bandwidth'] - base_delay = BASE_DELAY * metrics_multipliers['delay'] - - # Add realistic noise - bandwidth_noise = random.uniform(-5.0, 5.0) - delay_noise = random.uniform(-0.5, 0.5) - - bandwidth = max(0.0, base_bandwidth + bandwidth_noise) - delay = max(0.0, base_delay + delay_noise) - - return { - 'notification': { - 'eventTime': datetime.utcnow().isoformat() + 'Z', - 'push-update': { - 'id': update_id, - 'datastore-contents': { - 'ietf-network-topology:simap-telemetry': { - 'bandwidth-utilization': str(round(bandwidth, 2)), - 'latency': str(round(delay, 2)), - 'related-service-ids': [service_name], - } - }, - }, - } - } - - -@app.get('/health') -async def health_check(): - """Health check endpoint""" - return {'status': 'healthy', 'service': SERVICE_NAME} - - -@app.post('/restconf/operations/subscriptions:establish-subscription') -async def establish_subscription( - request: EstablishSubscriptionRequest, -) -> EstablishSubscriptionResponse: - """Establish a telemetry subscription""" - subscription_id = str(random.randint(10, 99)) - - m = match_network.search( - request.ietf_subscribed_notifications_input.ietf_yang_push_datastore_xpath_filter - ) - service_name = m.groups()[0] - # Store subscription details - subscriptions[subscription_id] = { - 'id': subscription_id, - 'datastore': request.ietf_subscribed_notifications_input.datastore, - 'xpath_filter': request.ietf_subscribed_notifications_input.ietf_yang_push_datastore_xpath_filter, - 'service': service_name, - 'period': request.ietf_subscribed_notifications_input.ietf_yang_push_periodic.get( - 'ietf-yang-push:period', '3' - ), - 'created_at': datetime.utcnow().isoformat(), - 'active': True, - } - - logger.info(f'Created subscription {subscription_id} for {SERVICE_NAME}') - - return EstablishSubscriptionResponse( - identifier=subscription_id, uri=f'/restconf/data/subscriptions/{subscription_id}' - ) - - -@app.get('/restconf/data/subscriptions/{subscription_id}') -async def start_telemetry_stream(subscription_id: str): - """Start SSE telemetry stream for a subscription""" - if subscription_id not in subscriptions: - raise HTTPException(status_code=404, detail='Subscription not found') - - subscription = subscriptions[subscription_id] - if not subscription['active']: - raise HTTPException(status_code=400, detail='Subscription is not active') - - active_streams[subscription_id] = True - - async def event_generator(): - """Generate SSE events""" - update_counter = 1 - period = float(subscription['period']) - - try: - while active_streams.get(subscription_id, False): - # Generate telemetry data - telemetry_data = generate_telemetry_data( - str(update_counter), subscription['service'] - ) - - # Format as SSE - sse_data = f'event: push-update\nid: {update_counter}\ndata: {json.dumps(telemetry_data)}\n\n' - - yield sse_data - - update_counter += 1 - await asyncio.sleep(period) - - # Send termination event - termination_event = ( - f'event: subscription-terminated\nid: {update_counter}\ndata: {{}}\n\n' - ) - yield termination_event - - except Exception as e: - logger.error(f'Error in event generator: {e}') - finally: - # Clean up - active_streams[subscription_id] = False - if subscription_id in subscriptions: - subscriptions[subscription_id]['active'] = False - - return StreamingResponse( - event_generator(), - media_type='text/event-stream', - headers={ - 'Cache-Control': 'no-cache', - 'Connection': 'keep-alive', - 'Access-Control-Allow-Origin': '*', - 'Access-Control-Allow-Headers': 'Cache-Control', - }, - ) - - -@app.post('/restconf/operations/subscriptions:delete-subscription') -async def delete_subscription(request: DeleteSubscriptionRequest): - """Delete a subscription""" - subscription_id = request.delete_subscription.identifier - - if subscription_id not in subscriptions: - raise HTTPException(status_code=404, detail='Subscription not found') - - # Stop active stream - active_streams[subscription_id] = False - - # Remove subscription - del subscriptions[subscription_id] - - logger.info(f'Deleted subscription {subscription_id} for {SERVICE_NAME}') - - return {'status': 'deleted'} - - -@app.post('/degrade/delay') -async def degrade_delay(request: MetricRequest): - """Degrade delay metrics""" - metrics_multipliers['delay'] *= 1.0 + request.factor - logger.info(f'Degraded delay by factor {request.factor} for {SERVICE_NAME}') - return {'status': 'degraded', 'new_multiplier': metrics_multipliers['delay']} - - -@app.post('/enhance/delay') -async def enhance_delay(request: MetricRequest): - """Enhance delay metrics""" - metrics_multipliers['delay'] *= 1.0 - request.factor - metrics_multipliers['delay'] = max(0.1, metrics_multipliers['delay']) # Prevent negative values - logger.info(f'Enhanced delay by factor {request.factor} for {SERVICE_NAME}') - return {'status': 'enhanced', 'new_multiplier': metrics_multipliers['delay']} - - -@app.post('/degrade/bandwidth') -async def degrade_bandwidth(request: MetricRequest): - """Degrade bandwidth metrics""" - metrics_multipliers['bandwidth'] *= 1.0 - request.factor - metrics_multipliers['bandwidth'] = max( - 0.1, metrics_multipliers['bandwidth'] - ) # Prevent negative values - logger.info(f'Degraded bandwidth by factor {request.factor} for {SERVICE_NAME}') - return {'status': 'degraded', 'new_multiplier': metrics_multipliers['bandwidth']} - - -@app.post('/enhance/bandwidth') -async def enhance_bandwidth(request: MetricRequest): - """Enhance bandwidth metrics""" - metrics_multipliers['bandwidth'] *= 1.0 + request.factor - logger.info(f'Enhanced bandwidth by factor {request.factor} for {SERVICE_NAME}') - return {'status': 'enhanced', 'new_multiplier': metrics_multipliers['bandwidth']} - - -@app.get('/subscriptions') -async def list_subscriptions(): - """List all active subscriptions""" - return {'subscriptions': subscriptions} - - -@app.get('/metrics/status') -async def get_metrics_status(): - """Get current metrics multipliers""" - return { - 'service': SERVICE_NAME, - 'base_bandwidth': BASE_BANDWIDTH, - 'base_delay': BASE_DELAY, - 'current_multipliers': metrics_multipliers, - 'effective_bandwidth': BASE_BANDWIDTH * metrics_multipliers['bandwidth'], - 'effective_delay': BASE_DELAY * metrics_multipliers['delay'], - } - - -if __name__ == '__main__': - import uvicorn - - uvicorn.run(app, host='0.0.0.0', port=PORT) - - - -''' -subcription: -curl -X curl -X POST http://localhost:8001/restconf/operations/subscriptions:establish-subscription -H "Content-Type: application/json" -d '{ - "ietf-subscribed-notifications:input": { - "datastore": "operational", - "ietf-yang-push:datastore-xpath-filter": "/ietf-network:networks/network=trans-simap-1/ietf-network-topology:link=link-1/simap-telemetry", - "ietf-yang-push:periodic": { - "ietf-yang-push:period": "3" - } - } - }' - - -start telemetry: -curl -X GET http://localhost:8001/restconf/data/subscriptions/34 - - -stop telemetry: -curl -X curl -X POST http://localhost:8001/restconf/operations/subscriptions:delete-subscription -H "Content-Type: application/json" -d '{ - "delete-subscription": { - "identifier": "34" - } - }' -''' diff --git a/src/tests/ecoc25-camara-e2e-telemetry/mocks/docker-compose.yml b/src/tests/ecoc25-camara-e2e-telemetry/mocks/docker-compose.yml deleted file mode 100644 index c5cd0b8b2..000000000 --- a/src/tests/ecoc25-camara-e2e-telemetry/mocks/docker-compose.yml +++ /dev/null @@ -1,58 +0,0 @@ -version: '3.8' - -services: - nce: - build: . - ports: - - "8001:8000" - environment: - - SERVICE_NAME=nce - - BASE_BANDWIDTH=75.0 - - BASE_DELAY=2.0 - - PORT=8000 - volumes: - - ./app:/app - restart: unless-stopped - - aggnet-controller: - build: . - ports: - - "8002:8000" - environment: - - SERVICE_NAME=aggnet-controller - - BASE_BANDWIDTH=80.0 - - BASE_DELAY=1.5 - - PORT=8000 - volumes: - - ./app:/app - restart: unless-stopped - - ip-controller: - build: . - ports: - - "8003:8000" - environment: - - SERVICE_NAME=ip-controller - - BASE_BANDWIDTH=80.0 - - BASE_DELAY=1.5 - - PORT=8000 - volumes: - - ./app:/app - restart: unless-stopped - - optical-controller: - build: . - ports: - - "8004:8000" - environment: - - SERVICE_NAME=optical-controller - - BASE_BANDWIDTH=90.0 - - BASE_DELAY=1.0 - - PORT=8000 - volumes: - - ./app:/app - restart: unless-stopped - -networks: - default: - driver: bridge diff --git a/src/tests/ecoc25-camara-e2e-telemetry/mocks/requirements.txt b/src/tests/ecoc25-camara-e2e-telemetry/mocks/requirements.txt deleted file mode 100644 index 1cc77241c..000000000 --- a/src/tests/ecoc25-camara-e2e-telemetry/mocks/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -fastapi==0.104.1 -uvicorn[standard]==0.24.0 -pydantic==2.5.0 -python-multipart==0.0.6 -aiofiles==23.2.1 diff --git a/src/tests/ecoc25-camara-e2e-telemetry/requirements.in b/src/tests/ecoc25-camara-e2e-telemetry/requirements.in deleted file mode 100644 index 1bdaec999..000000000 --- a/src/tests/ecoc25-camara-e2e-telemetry/requirements.in +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -deepdiff==6.7.* -requests==2.27.* - -coverage==6.3 -grpcio==1.47.* -grpcio-health-checking==1.47.* -grpcio-reflection==1.47.* -grpcio-tools==1.47.* -grpclib==0.4.4 -prettytable==3.5.0 -prometheus-client==0.13.0 -protobuf==3.20.* -pytest==6.2.5 -pytest-benchmark==3.4.1 -python-dateutil==2.8.2 -pytest-depends==1.0.1 diff --git a/src/tests/ecoc25-camara-e2e-telemetry/tests/Fixtures.py b/src/tests/ecoc25-camara-e2e-telemetry/tests/Fixtures.py deleted file mode 100644 index 5997e58c8..000000000 --- a/src/tests/ecoc25-camara-e2e-telemetry/tests/Fixtures.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pytest -from context.client.ContextClient import ContextClient -from device.client.DeviceClient import DeviceClient -from monitoring.client.MonitoringClient import MonitoringClient -from service.client.ServiceClient import ServiceClient - -@pytest.fixture(scope='session') -def context_client(): - _client = ContextClient() - yield _client - _client.close() - -@pytest.fixture(scope='session') -def device_client(): - _client = DeviceClient() - yield _client - _client.close() - -@pytest.fixture(scope='session') -def monitoring_client(): - _client = MonitoringClient() - yield _client - _client.close() - -@pytest.fixture(scope='session') -def service_client(): - _client = ServiceClient() - yield _client - _client.close() diff --git a/src/tests/ecoc25-camara-e2e-telemetry/tests/Tools.py b/src/tests/ecoc25-camara-e2e-telemetry/tests/Tools.py deleted file mode 100644 index 9ca1d7d21..000000000 --- a/src/tests/ecoc25-camara-e2e-telemetry/tests/Tools.py +++ /dev/null @@ -1,109 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import enum, logging, requests -from typing import Any, Dict, List, Optional, Set, Union -from common.Constants import ServiceNameEnum -from common.Settings import get_service_host, get_service_port_http - -NBI_ADDRESS = get_service_host(ServiceNameEnum.NBI) -NBI_PORT = get_service_port_http(ServiceNameEnum.NBI) -NBI_USERNAME = 'admin' -NBI_PASSWORD = 'admin' -NBI_BASE_URL = '' - -class RestRequestMethod(enum.Enum): - GET = 'get' - POST = 'post' - PUT = 'put' - PATCH = 'patch' - DELETE = 'delete' - -EXPECTED_STATUS_CODES : Set[int] = { - requests.codes['OK' ], - requests.codes['CREATED' ], - requests.codes['ACCEPTED' ], - requests.codes['NO_CONTENT'], -} - -def do_rest_request( - method : RestRequestMethod, url : str, body : Optional[Any] = None, timeout : int = 10, - allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, - logger : Optional[logging.Logger] = None -) -> Optional[Union[Dict, List]]: - request_url = 'http://{:s}:{:s}@{:s}:{:d}{:s}{:s}'.format( - NBI_USERNAME, NBI_PASSWORD, NBI_ADDRESS, NBI_PORT, str(NBI_BASE_URL), url - ) - - if logger is not None: - msg = 'Request: {:s} {:s}'.format(str(method.value).upper(), str(request_url)) - if body is not None: msg += ' body={:s}'.format(str(body)) - logger.warning(msg) - reply = requests.request(method.value, request_url, headers={'Content-Type': 'application/json'}, timeout=timeout, json=body, allow_redirects=allow_redirects) - if logger is not None: - logger.warning('Reply: {:s}'.format(str(reply.text))) - assert reply.status_code in expected_status_codes, 'Reply failed with status code {:d}'.format(reply.status_code) - - if reply.content and len(reply.content) > 0: return reply.json() - return None - -def do_rest_get_request( - url : str, body : Optional[Any] = None, timeout : int = 10, - allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, - logger : Optional[logging.Logger] = None -) -> Optional[Union[Dict, List]]: - return do_rest_request( - RestRequestMethod.GET, url, body=body, timeout=timeout, allow_redirects=allow_redirects, - expected_status_codes=expected_status_codes, logger=logger - ) - -def do_rest_post_request( - url : str, body : Optional[Any] = None, timeout : int = 10, - allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, - logger : Optional[logging.Logger] = None -) -> Optional[Union[Dict, List]]: - return do_rest_request( - RestRequestMethod.POST, url, body=body, timeout=timeout, allow_redirects=allow_redirects, - expected_status_codes=expected_status_codes, logger=logger - ) - -def do_rest_put_request( - url : str, body : Optional[Any] = None, timeout : int = 10, - allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, - logger : Optional[logging.Logger] = None -) -> Optional[Union[Dict, List]]: - return do_rest_request( - RestRequestMethod.PUT, url, body=body, timeout=timeout, allow_redirects=allow_redirects, - expected_status_codes=expected_status_codes, logger=logger - ) - -def do_rest_patch_request( - url : str, body : Optional[Any] = None, timeout : int = 10, - allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, - logger : Optional[logging.Logger] = None -) -> Optional[Union[Dict, List]]: - return do_rest_request( - RestRequestMethod.PATCH, url, body=body, timeout=timeout, allow_redirects=allow_redirects, - expected_status_codes=expected_status_codes, logger=logger - ) - -def do_rest_delete_request( - url : str, body : Optional[Any] = None, timeout : int = 10, - allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, - logger : Optional[logging.Logger] = None -) -> Optional[Union[Dict, List]]: - return do_rest_request( - RestRequestMethod.DELETE, url, body=body, timeout=timeout, allow_redirects=allow_redirects, - expected_status_codes=expected_status_codes, logger=logger - ) diff --git a/src/tests/ecoc25-camara-e2e-telemetry/tests/__init__.py b/src/tests/ecoc25-camara-e2e-telemetry/tests/__init__.py deleted file mode 100644 index 3ccc21c7d..000000000 --- a/src/tests/ecoc25-camara-e2e-telemetry/tests/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - diff --git a/src/tests/ecoc25-camara-e2e-telemetry/tests/test_e2e_ietf_slice_operations.py b/src/tests/ecoc25-camara-e2e-telemetry/tests/test_e2e_ietf_slice_operations.py deleted file mode 100644 index cb991edbf..000000000 --- a/src/tests/ecoc25-camara-e2e-telemetry/tests/test_e2e_ietf_slice_operations.py +++ /dev/null @@ -1,478 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json, logging, os -import requests -from deepdiff import DeepDiff - - -LOGGER = logging.getLogger(__name__) -LOGGER.setLevel(logging.DEBUG) - -HEADERS = {"Content-Type": "application/json"} - -POST_NETWORK_SLICE1 = os.path.join( - os.path.dirname(os.path.abspath(__file__)), - "..", - "data", - "slice", - "post_network_slice1.json", -) -POST_NETWORK_SLICE2 = os.path.join( - os.path.dirname(os.path.abspath(__file__)), - "..", - "data", - "slice", - "post_network_slice2.json", -) -POST_CONNECTION_GROUP_TO_NETWORK_SLICE1 = os.path.join( - os.path.dirname(os.path.abspath(__file__)), - "..", - "data", - "slice", - "post_connection_group_to_network_slice1.json", -) -POST_CONNECTION_GROUP_TO_NETWORK_SLICE2 = os.path.join( - os.path.dirname(os.path.abspath(__file__)), - "..", - "data", - "slice", - "post_connection_group_to_network_slice2.json", -) -POST_MATCH_CRITERIA_TO_SDP1_IN_SLICE1 = os.path.join( - os.path.dirname(os.path.abspath(__file__)), - "..", - "data", - "slice", - "post_match_criteria_to_sdp1_in_slice1.json", -) -POST_MATCH_CRITERIA_TO_SDP1_IN_SLICE2 = os.path.join( - os.path.dirname(os.path.abspath(__file__)), - "..", - "data", - "slice", - "post_match_criteria_to_sdp1_in_slice2.json", -) -POST_SDP_TO_NETWORK_SLICE1 = os.path.join( - os.path.dirname(os.path.abspath(__file__)), - "..", - "data", - "slice", - "post_sdp_to_network_slice1.json", -) -POST_SDP_TO_NETWORK_SLICE2 = os.path.join( - os.path.dirname(os.path.abspath(__file__)), - "..", - "data", - "slice", - "post_sdp_to_network_slice2.json", -) -TARGET_NCE_APP_FLOWS = os.path.join( - os.path.dirname(os.path.abspath(__file__)), - "..", - "data", - "target-nce-app-flows.json", -) -TARGET_NCE_APPS = os.path.join( - os.path.dirname(os.path.abspath(__file__)), - "..", - "data", - "target-nce-apps.json", -) -TARGET_FULL_IETF_SLICE = os.path.join( - os.path.dirname(os.path.abspath(__file__)), - "..", - "data", - "slice", - "target-full-ietf-slice.json", -) -TARGET_FULL_IETF_SLICE = os.path.join( - os.path.dirname(os.path.abspath(__file__)), - "..", - "data", - "target-full-ietf-slice.json", -) -TARGET_IETF_SLICE_POSTED_SLICES = os.path.join( - os.path.dirname(os.path.abspath(__file__)), - "..", - "data", - "target-ietf-slice-posted-slices.json", -) -TARGET_IETF_SLICE_PUT_CONNECTION_GROUPS = os.path.join( - os.path.dirname(os.path.abspath(__file__)), - "..", - "data", - "target-ietf-slice-put-connection-groups.json", -) - -NBI_ADDRESS = "localhost" -NBI_PORT = "80" -NBI_USERNAME = "admin" -NBI_PASSWORD = "admin" - -NCE_ADDRESS = "localhost" -NCE_PORT = 9090 - -AGG_TFS_ADDRESS = "localhost" -AGG_TFS_PORT = 9091 - -BASE_IETF_SLICE_URL = f"http://{NBI_ADDRESS}:{NBI_PORT}/restconf/data/ietf-network-slice-service:network-slice-services" -NCE_APP_DATA_URL = f"http://{NCE_ADDRESS}:{NCE_PORT}/restconf/v1/data/app-flows/apps" -NCE_APP_FLOW_DATA_URL = f"http://{NCE_ADDRESS}:{NCE_PORT}/restconf/v1/data/app-flows" -AGG_TFS_IETF_SLICE_URL = f"http://{AGG_TFS_ADDRESS}:{AGG_TFS_PORT}/restconf/data/ietf-network-slice-service:network-slice-services" - - -# pylint: disable=redefined-outer-name, unused-argument -def test_ietf_slice_creation_removal(): - # Issue service creation request - with open(POST_NETWORK_SLICE1, "r", encoding="UTF-8") as f: - post_network_slice1 = json.load(f) - with open(POST_NETWORK_SLICE2, "r", encoding="UTF-8") as f: - post_network_slice2 = json.load(f) - with open(POST_CONNECTION_GROUP_TO_NETWORK_SLICE1, "r", encoding="UTF-8") as f: - post_connection_group_to_network_slice1 = json.load(f) - with open(POST_CONNECTION_GROUP_TO_NETWORK_SLICE2, "r", encoding="UTF-8") as f: - post_connection_group_to_network_slice2 = json.load(f) - with open(POST_MATCH_CRITERIA_TO_SDP1_IN_SLICE1, "r", encoding="UTF-8") as f: - post_match_criteria_to_sdp1_in_slice1 = json.load(f) - with open(POST_MATCH_CRITERIA_TO_SDP1_IN_SLICE2, "r", encoding="UTF-8") as f: - post_match_criteria_to_sdp1_in_slice2 = json.load(f) - with open(POST_SDP_TO_NETWORK_SLICE1, "r", encoding="UTF-8") as f: - post_sdp_to_network_slice1 = json.load(f) - with open(POST_SDP_TO_NETWORK_SLICE2, "r", encoding="UTF-8") as f: - post_sdp_to_network_slice2 = json.load(f) - with open(TARGET_NCE_APPS, "r", encoding="UTF-8") as f: - target_nce_apps = json.load(f) - with open(TARGET_NCE_APP_FLOWS, "r", encoding="UTF-8") as f: - target_nce_app_flows = json.load(f) - with open(TARGET_FULL_IETF_SLICE, "r", encoding="UTF-8") as f: - target_full_ietf_slice = json.load(f) - with open(TARGET_IETF_SLICE_POSTED_SLICES, "r", encoding="UTF-8") as f: - target_ietf_slice_posted_slices = json.load(f) - with open(TARGET_IETF_SLICE_PUT_CONNECTION_GROUPS, "r", encoding="UTF-8") as f: - target_ietf_slice_put_connection_groups = json.load(f) - - # op 1 - URL = BASE_IETF_SLICE_URL - requests.post(URL, headers=HEADERS, json=post_network_slice1) - - URL = NCE_APP_DATA_URL - apps_response = requests.get(URL).json() - URL = NCE_APP_FLOW_DATA_URL - app_flows_response = requests.get(URL).json() - URL = AGG_TFS_IETF_SLICE_URL - ietf_slice_services = requests.get(URL).json() - URL = ( - AGG_TFS_IETF_SLICE_URL - + "/slice-service=dummy/connection-groups/connection-group=dummy" - ) - ietf_slice_connection_groups = requests.get(URL).json() - - app_name = "App_Flow_2_1_slice1" - apps_diff = DeepDiff(apps_response[app_name], target_nce_apps[app_name]) - app_flows_diff = DeepDiff( - app_flows_response[app_name], - target_nce_app_flows[app_name], - exclude_regex_paths=r"root\['app-flow'\]\[\d+\]\['user-id'\]", - ) - assert not apps_diff - assert not app_flows_diff - assert len(apps_response) == 1 and len(app_flows_response) == 1 - - assert len(ietf_slice_connection_groups) == 0 - assert len(ietf_slice_services) == 1 - slice_diff = DeepDiff( - ietf_slice_services["slice1"], target_ietf_slice_posted_slices[0] - ) - assert not slice_diff - - # op 2 - URL = BASE_IETF_SLICE_URL + "/slice-service=slice1/sdps" - requests.post(URL, headers=HEADERS, json=post_sdp_to_network_slice1) - URL = BASE_IETF_SLICE_URL + "/slice-service=slice1/connection-groups" - requests.post(URL, headers=HEADERS, json=post_connection_group_to_network_slice1) - URL = ( - BASE_IETF_SLICE_URL + "/slice-service=slice1/sdps/sdp=1/service-match-criteria" - ) - requests.post(URL, headers=HEADERS, json=post_match_criteria_to_sdp1_in_slice1) - - URL = NCE_APP_DATA_URL - apps_response = requests.get(URL).json() - URL = NCE_APP_FLOW_DATA_URL - app_flows_response = requests.get(URL).json() - URL = AGG_TFS_IETF_SLICE_URL - ietf_slice_services = requests.get(URL).json() - URL = ( - AGG_TFS_IETF_SLICE_URL - + "/slice-service=dummy/connection-groups/connection-group=dummy" - ) - ietf_slice_connection_groups = requests.get(URL).json() - - app_name = "App_Flow_3_1_slice1" - apps_diff = DeepDiff(apps_response[app_name], target_nce_apps[app_name]) - app_flows_diff = DeepDiff( - app_flows_response[app_name], - target_nce_app_flows[app_name], - exclude_regex_paths=r"root\['app-flow'\]\[\d+\]\['user-id'\]", - ) - assert not apps_diff - assert not app_flows_diff - assert len(apps_response) == 2 and len(app_flows_response) == 2 - - assert len(ietf_slice_connection_groups) == 1 - assert len(ietf_slice_services) == 1 - connection_group_diff = DeepDiff( - ietf_slice_connection_groups[0], target_ietf_slice_put_connection_groups[0] - ) - assert not connection_group_diff - - # op 3 - URL = BASE_IETF_SLICE_URL - requests.post(URL, headers=HEADERS, json=post_network_slice2) - - URL = NCE_APP_DATA_URL - apps_response = requests.get(URL).json() - URL = NCE_APP_FLOW_DATA_URL - app_flows_response = requests.get(URL).json() - URL = AGG_TFS_IETF_SLICE_URL - ietf_slice_services = requests.get(URL).json() - URL = ( - AGG_TFS_IETF_SLICE_URL - + "/slice-service=dummy/connection-groups/connection-group=dummy" - ) - ietf_slice_connection_groups = requests.get(URL).json() - - app_name = "App_Flow_2_1_slice2" - apps_diff = DeepDiff(apps_response[app_name], target_nce_apps[app_name]) - app_flows_diff = DeepDiff( - app_flows_response[app_name], - target_nce_app_flows[app_name], - exclude_regex_paths=r"root\['app-flow'\]\[\d+\]\['user-id'\]", - ) - assert not apps_diff - assert not app_flows_diff - assert len(apps_response) == 3 and len(app_flows_response) == 3 - - assert len(ietf_slice_connection_groups) == 1 - assert len(ietf_slice_services) == 2 - slice_diff = DeepDiff( - ietf_slice_services["slice2"], target_ietf_slice_posted_slices[1] - ) - assert not slice_diff - - # op 4 - URL = BASE_IETF_SLICE_URL + "/slice-service=slice2/sdps" - requests.post(URL, headers=HEADERS, json=post_sdp_to_network_slice2) - URL = BASE_IETF_SLICE_URL + "/slice-service=slice2/connection-groups" - requests.post(URL, headers=HEADERS, json=post_connection_group_to_network_slice2) - URL = ( - BASE_IETF_SLICE_URL + "/slice-service=slice2/sdps/sdp=1/service-match-criteria" - ) - requests.post(URL, headers=HEADERS, json=post_match_criteria_to_sdp1_in_slice2) - - URL = NCE_APP_DATA_URL - apps_response = requests.get(URL).json() - URL = NCE_APP_FLOW_DATA_URL - app_flows_response = requests.get(URL).json() - URL = AGG_TFS_IETF_SLICE_URL - ietf_slice_services = requests.get(URL).json() - URL = ( - AGG_TFS_IETF_SLICE_URL - + "/slice-service=dummy/connection-groups/connection-group=dummy" - ) - ietf_slice_connection_groups = requests.get(URL).json() - - app_name = "App_Flow_3_1_slice2" - apps_diff = DeepDiff(apps_response[app_name], target_nce_apps[app_name]) - app_flows_diff = DeepDiff( - app_flows_response[app_name], - target_nce_app_flows[app_name], - exclude_regex_paths=r"root\['app-flow'\]\[\d+\]\['user-id'\]", - ) - assert not apps_diff - assert not app_flows_diff - assert len(apps_response) == 4 and len(app_flows_response) == 4 - - assert len(ietf_slice_connection_groups) == 2 - assert len(ietf_slice_services) == 2 - connection_group_diff = DeepDiff( - ietf_slice_connection_groups[1], target_ietf_slice_put_connection_groups[1] - ) - assert not connection_group_diff - - # op 5 - ietf_slices_full_retrieved = requests.get(BASE_IETF_SLICE_URL).json() - ietf_slice_data = DeepDiff(ietf_slices_full_retrieved, target_full_ietf_slice) - assert not ietf_slice_data - - # op 6 - URL = BASE_IETF_SLICE_URL + "/slice-service=slice1/sdps/sdp=2" - requests.delete(URL) - URL = ( - BASE_IETF_SLICE_URL - + "/slice-service=slice1/sdps/sdp=1/service-match-criteria/match-criterion=1" - ) - requests.delete(URL) - URL = ( - BASE_IETF_SLICE_URL - + "/slice-service=slice1/connection-groups/connection-group=line1" - ) - requests.delete(URL) - - URL = NCE_APP_DATA_URL - apps_response = requests.get(URL).json() - URL = NCE_APP_FLOW_DATA_URL - app_flows_response = requests.get(URL).json() - URL = AGG_TFS_IETF_SLICE_URL - ietf_slice_services = requests.get(URL).json() - URL = ( - AGG_TFS_IETF_SLICE_URL - + "/slice-service=dummy/connection-groups/connection-group=dummy" - ) - ietf_slice_connection_groups = requests.get(URL).json() - - app_name = "App_Flow_2_1_slice1" - assert app_name not in apps_response - assert app_name not in app_flows_response - assert len(apps_response) == 3 and len(app_flows_response) == 3 - - assert len(ietf_slice_connection_groups) == 3 - assert len(ietf_slice_services) == 2 - connection_group_diff = DeepDiff( - ietf_slice_connection_groups[2], target_ietf_slice_put_connection_groups[2] - ) - assert not connection_group_diff - - # op 7 - URL = BASE_IETF_SLICE_URL + "/slice-service=slice1/sdps/sdp=3" - requests.delete(URL) - URL = ( - BASE_IETF_SLICE_URL - + "/slice-service=slice1/sdps/sdp=1/service-match-criteria/match-criterion=2" - ) - requests.delete(URL) - URL = ( - BASE_IETF_SLICE_URL - + "/slice-service=slice1/connection-groups/connection-group=line2" - ) - requests.delete(URL) - URL = BASE_IETF_SLICE_URL + "/slice-service=slice1/sdps/sdp=1" - - URL = NCE_APP_DATA_URL - apps_response = requests.get(URL).json() - URL = NCE_APP_FLOW_DATA_URL - app_flows_response = requests.get(URL).json() - URL = AGG_TFS_IETF_SLICE_URL - ietf_slice_services = requests.get(URL).json() - URL = ( - AGG_TFS_IETF_SLICE_URL - + "/slice-service=dummy/connection-groups/connection-group=dummy" - ) - ietf_slice_connection_groups = requests.get(URL).json() - - requests.delete(URL) - URL = BASE_IETF_SLICE_URL + "/slice-service=slice1" - requests.delete(URL) - - app_name = "App_Flow_3_1_slice1" - assert app_name not in apps_response - assert app_name not in app_flows_response - assert len(apps_response) == 2 and len(app_flows_response) == 2 - - assert len(ietf_slice_connection_groups) == 3 - assert len(ietf_slice_services) == 1 - assert "slice1" not in ietf_slice_services - - # op 8 - URL = BASE_IETF_SLICE_URL + "/slice-service=slice2/sdps/sdp=2" - requests.delete(URL) - URL = ( - BASE_IETF_SLICE_URL - + "/slice-service=slice2/sdps/sdp=1/service-match-criteria/match-criterion=1" - ) - requests.delete(URL) - URL = ( - BASE_IETF_SLICE_URL - + "/slice-service=slice2/connection-groups/connection-group=line1" - ) - requests.delete(URL) - - URL = NCE_APP_DATA_URL - apps_response = requests.get(URL).json() - URL = NCE_APP_FLOW_DATA_URL - app_flows_response = requests.get(URL).json() - URL = AGG_TFS_IETF_SLICE_URL - ietf_slice_services = requests.get(URL).json() - URL = ( - AGG_TFS_IETF_SLICE_URL - + "/slice-service=dummy/connection-groups/connection-group=dummy" - ) - ietf_slice_connection_groups = requests.get(URL).json() - - app_name = "App_Flow_2_1_slice2" - assert app_name not in apps_response - assert app_name not in app_flows_response - assert len(apps_response) == 1 and len(app_flows_response) == 1 - - assert len(ietf_slice_connection_groups) == 4 - assert len(ietf_slice_services) == 1 - connection_group_diff = DeepDiff( - ietf_slice_connection_groups[3], target_ietf_slice_put_connection_groups[3] - ) - assert not connection_group_diff - - # op 9 - URL = BASE_IETF_SLICE_URL + "/slice-service=slice2/sdps/sdp=3" - requests.delete(URL) - URL = ( - BASE_IETF_SLICE_URL - + "/slice-service=slice2/sdps/sdp=1/service-match-criteria/match-criterion=2" - ) - requests.delete(URL) - URL = ( - BASE_IETF_SLICE_URL - + "/slice-service=slice2/connection-groups/connection-group=line2" - ) - requests.delete(URL) - - URL = NCE_APP_DATA_URL - apps_response = requests.get(URL).json() - URL = NCE_APP_FLOW_DATA_URL - app_flows_response = requests.get(URL).json() - URL = AGG_TFS_IETF_SLICE_URL - ietf_slice_services = requests.get(URL).json() - URL = ( - AGG_TFS_IETF_SLICE_URL - + "/slice-service=dummy/connection-groups/connection-group=dummy" - ) - ietf_slice_connection_groups = requests.get(URL).json() - - URL = BASE_IETF_SLICE_URL + "/slice-service=slice2/sdps/sdp=1" - requests.delete(URL) - URL = BASE_IETF_SLICE_URL + "/slice-service=slice2" - requests.delete(URL) - - app_name = "App_Flow_3_1_slice2" - assert app_name not in apps_response - assert app_name not in app_flows_response - assert len(apps_response) == 0 and len(app_flows_response) == 0 - - assert len(ietf_slice_connection_groups) == 4 - assert len(ietf_slice_services) == 0 - - # op 10 - ietf_slices_full_retrieved = requests.get(BASE_IETF_SLICE_URL).json() - empty_ietf_slices = {"network-slice-services": {"slice-service": []}} - ietf_slice_data = DeepDiff(ietf_slices_full_retrieved, empty_ietf_slices) - assert not ietf_slice_data diff --git a/src/tests/ecoc25-camara-e2e-telemetry/tests/test_onboarding.py b/src/tests/ecoc25-camara-e2e-telemetry/tests/test_onboarding.py deleted file mode 100644 index 273d5d1f4..000000000 --- a/src/tests/ecoc25-camara-e2e-telemetry/tests/test_onboarding.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging, os, time -from common.Constants import DEFAULT_CONTEXT_NAME -from common.proto.context_pb2 import ContextId, DeviceOperationalStatusEnum, Empty -from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results, validate_empty_scenario -from common.tools.object_factory.Context import json_context_id -from context.client.ContextClient import ContextClient -from device.client.DeviceClient import DeviceClient -from .Fixtures import context_client, device_client # pylint: disable=unused-import - -LOGGER = logging.getLogger(__name__) -LOGGER.setLevel(logging.DEBUG) - -DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'camara-e2e-topology-modified.json') -ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) - -def test_scenario_onboarding( - context_client : ContextClient, # pylint: disable=redefined-outer-name - device_client : DeviceClient, # pylint: disable=redefined-outer-name -) -> None: - validate_empty_scenario(context_client) - - descriptor_loader = DescriptorLoader( - descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client) - results = descriptor_loader.process() - check_descriptor_load_results(results, descriptor_loader) - # descriptor_loader.validate() - - # Verify the scenario has no services/slices - response = context_client.GetContext(ADMIN_CONTEXT_ID) - assert len(response.service_ids) == 0 - assert len(response.slice_ids) == 0 - -def test_scenario_devices_enabled( - context_client : ContextClient, # pylint: disable=redefined-outer-name -) -> None: - """ - This test validates that the devices are enabled. - """ - DEVICE_OP_STATUS_ENABLED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED - - num_devices = -1 - num_devices_enabled, num_retry = 0, 0 - while (num_devices != num_devices_enabled) and (num_retry < 10): - time.sleep(1.0) - response = context_client.ListDevices(Empty()) - num_devices = len(response.devices) - num_devices_enabled = 0 - for device in response.devices: - if device.device_operational_status != DEVICE_OP_STATUS_ENABLED: continue - num_devices_enabled += 1 - LOGGER.info('Num Devices enabled: {:d}/{:d}'.format(num_devices_enabled, num_devices)) - num_retry += 1 - assert num_devices_enabled == num_devices -- GitLab From c73d4e9c6c18fdbce2031600bfc8d379e5def146 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 9 Oct 2025 11:35:28 +0000 Subject: [PATCH 337/367] Code cleanup --- f5ga | 1 - my_deploy.sh | 2 +- nfvsdn22 | 1 - oeccpsc22 | 1 - .../config-port-forward-vpn.sh | 24 ------------------- .../nce_fan_client/Requests.py | 15 ++++++++++++ .../nce_fan_ctrl/Callbacks.py | 6 ++--- .../mock_nce_t_ctrl/nce_t_client/Requests.py | 15 ++++++++++++ .../mock_nce_t_ctrl/nce_t_ctrl/Callbacks.py | 4 ++-- 9 files changed, 36 insertions(+), 33 deletions(-) delete mode 120000 f5ga delete mode 120000 nfvsdn22 delete mode 120000 oeccpsc22 delete mode 100755 src/tests/ecoc25-f5ga-telemetry/config-port-forward-vpn.sh diff --git a/f5ga b/f5ga deleted file mode 120000 index f38bc47e9..000000000 --- a/f5ga +++ /dev/null @@ -1 +0,0 @@ -src/tests/ecoc25-f5ga-telemetry/ \ No newline at end of file diff --git a/my_deploy.sh b/my_deploy.sh index 71fbdf9bb..97b65d116 100644 --- a/my_deploy.sh +++ b/my_deploy.sh @@ -87,7 +87,7 @@ export TFS_COMPONENTS="context device pathcomp service slice nbi webui" #fi # Uncomment to activate SIMAP Connector -export TFS_COMPONENTS="${TFS_COMPONENTS} simap_connector" +#export TFS_COMPONENTS="${TFS_COMPONENTS} simap_connector" # Uncomment to activate Load Generator #export TFS_COMPONENTS="${TFS_COMPONENTS} load_generator" diff --git a/nfvsdn22 b/nfvsdn22 deleted file mode 120000 index ac93a84be..000000000 --- a/nfvsdn22 +++ /dev/null @@ -1 +0,0 @@ -src/tests/nfvsdn22/ \ No newline at end of file diff --git a/oeccpsc22 b/oeccpsc22 deleted file mode 120000 index 4f55befad..000000000 --- a/oeccpsc22 +++ /dev/null @@ -1 +0,0 @@ -src/tests/oeccpsc22/ \ No newline at end of file diff --git a/src/tests/ecoc25-f5ga-telemetry/config-port-forward-vpn.sh b/src/tests/ecoc25-f5ga-telemetry/config-port-forward-vpn.sh deleted file mode 100755 index 68becba20..000000000 --- a/src/tests/ecoc25-f5ga-telemetry/config-port-forward-vpn.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash - -# Enable routing/NAT on TFS-E2E -echo "net.ipv4.ip_forward=1" | sudo tee -a /etc/sysctl.d/20-ipv4-forward.conf -sudo sysctl -p - -# DNAT from VPN to VM-B -sudo iptables -t nat -A PREROUTING -i tun0 -p tcp --dport 8881 -j DNAT --to-destination 10.254.0.9:8080 -sudo iptables -t nat -A PREROUTING -i tun0 -p tcp --dport 8882 -j DNAT --to-destination 10.254.0.9:8083 - -# MASQUERADE replies from VM-B back to VPN (generic, not by port) -sudo iptables -t nat -A POSTROUTING -o enp0s3 -s 10.0.58.0/24 -j MASQUERADE -sudo iptables -t nat -A POSTROUTING -o enp0s3 -s 10.1.7.0/24 -j MASQUERADE -sudo iptables -t nat -A POSTROUTING -o enp0s3 -s 192.168.0.0/16 -j MASQUERADE - - -# allow new+established from VPN to VM-B’s 8080/8083 -sudo iptables -A FORWARD -i tun0 -o enp0s3 -p tcp -d 10.254.0.9 --dport 8080 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT -sudo iptables -A FORWARD -i tun0 -o enp0s3 -p tcp -d 10.254.0.9 --dport 8083 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT - -# allow return traffic back from LAN to VPN -sudo iptables -A FORWARD -i enp0s3 -o tun0 -m state --state ESTABLISHED,RELATED -j ACCEPT - -echo "Done!" diff --git a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_client/Requests.py b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_client/Requests.py index 7d6e8528d..042c31c8a 100644 --- a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_client/Requests.py +++ b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_client/Requests.py @@ -1,3 +1,18 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + QOS_PROFILE_NAME = 'AR_VR_Gaming' URL_QOS_PROFILE_ITEM = '/huawei-nce-app-flow:qos-profiles/qos-profile={:s}'.format(QOS_PROFILE_NAME) REQUEST_QOS_PROFILE = {"huawei-nce-app-flow:qos-profiles": {"qos-profile": [ diff --git a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/Callbacks.py b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/Callbacks.py index 5de08b9f7..1c6996581 100644 --- a/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/Callbacks.py +++ b/src/tests/tools/mock_nce_fan_ctrl/nce_fan_ctrl/Callbacks.py @@ -28,7 +28,7 @@ class CallbackQosProfile(_Callback): pattern += r'/qos-profile=(?P[^/]+)' super().__init__(pattern) - def execute( + def execute_data( self, match : re.Match, path : str, old_data : Optional[Dict], new_data : Optional[Dict] ) -> bool: @@ -44,7 +44,7 @@ class CallbackApplication(_Callback): pattern += r'/application=(?P[^/]+)' super().__init__(pattern) - def execute( + def execute_data( self, match : re.Match, path : str, old_data : Optional[Dict], new_data : Optional[Dict] ) -> bool: @@ -60,7 +60,7 @@ class CallbackAppFlow(_Callback): pattern += r'/app-flow=(?P[^/]+)' super().__init__(pattern) - def execute( + def execute_data( self, match : re.Match, path : str, old_data : Optional[Dict], new_data : Optional[Dict] ) -> bool: diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_client/Requests.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_client/Requests.py index c9d51f14c..fb3e06ecb 100644 --- a/src/tests/tools/mock_nce_t_ctrl/nce_t_client/Requests.py +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_client/Requests.py @@ -1,3 +1,18 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + OSU_TUNNEL_NAME = 'osu_tunnel_1' URL_OSU_TUNNEL_ITEM = '/ietf-te:te/tunnels/tunnel={:s}'.format(OSU_TUNNEL_NAME) REQUEST_OSU_TUNNEL = {"ietf-te:te": {"tunnels": {"tunnel": [ diff --git a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/Callbacks.py b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/Callbacks.py index dc728c00c..ea2e7f748 100644 --- a/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/Callbacks.py +++ b/src/tests/tools/mock_nce_t_ctrl/nce_t_ctrl/Callbacks.py @@ -28,7 +28,7 @@ class CallbackOsuTunnel(_Callback): pattern += r'/tunnel=(?P[^/]+)' super().__init__(pattern) - def execute( + def execute_data( self, match : re.Match, path : str, old_data : Optional[Dict], new_data : Optional[Dict] ) -> bool: @@ -44,7 +44,7 @@ class CallbackEthTService(_Callback): pattern += r'/etht-svc-instances=(?P[^/]+)' super().__init__(pattern) - def execute( + def execute_data( self, match : re.Match, path : str, old_data : Optional[Dict], new_data : Optional[Dict] ) -> bool: -- GitLab From 287cd76e901a117d2d28d2197c518fcf0ad433aa Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 9 Oct 2025 11:38:28 +0000 Subject: [PATCH 338/367] CI/CD pipeline: - Re-enabled all tests --- .gitlab-ci.yml | 80 +++++++++++++++++++++++++------------------------- 1 file changed, 40 insertions(+), 40 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index ef2389337..2856f9fed 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -21,43 +21,43 @@ stages: # include the individual .gitlab-ci.yml of each micro-service and tests include: -# #- local: '/manifests/.gitlab-ci.yml' -# - local: '/src/monitoring/.gitlab-ci.yml' -# - local: '/src/nbi/.gitlab-ci.yml' -# - local: '/src/context/.gitlab-ci.yml' -# - local: '/src/device/.gitlab-ci.yml' -# - local: '/src/service/.gitlab-ci.yml' -# - local: '/src/qkd_app/.gitlab-ci.yml' -# - local: '/src/dbscanserving/.gitlab-ci.yml' -# - local: '/src/opticalattackmitigator/.gitlab-ci.yml' -# - local: '/src/opticalattackdetector/.gitlab-ci.yml' -# - local: '/src/opticalattackmanager/.gitlab-ci.yml' -# - local: '/src/opticalcontroller/.gitlab-ci.yml' -# - local: '/src/ztp/.gitlab-ci.yml' -# - local: '/src/policy/.gitlab-ci.yml' -# - local: '/src/automation/.gitlab-ci.yml' -# - local: '/src/forecaster/.gitlab-ci.yml' -# #- local: '/src/webui/.gitlab-ci.yml' -# #- local: '/src/l3_distributedattackdetector/.gitlab-ci.yml' -# #- local: '/src/l3_centralizedattackdetector/.gitlab-ci.yml' -# #- local: '/src/l3_attackmitigator/.gitlab-ci.yml' -# - local: '/src/slice/.gitlab-ci.yml' -# #- local: '/src/interdomain/.gitlab-ci.yml' -# - local: '/src/pathcomp/.gitlab-ci.yml' -# #- local: '/src/dlt/.gitlab-ci.yml' -# - local: '/src/load_generator/.gitlab-ci.yml' -# - local: '/src/bgpls_speaker/.gitlab-ci.yml' -# - local: '/src/kpi_manager/.gitlab-ci.yml' -# - local: '/src/kpi_value_api/.gitlab-ci.yml' -# #- local: '/src/kpi_value_writer/.gitlab-ci.yml' -# #- local: '/src/telemetry/.gitlab-ci.yml' -# - local: '/src/analytics/.gitlab-ci.yml' -# - local: '/src/qos_profile/.gitlab-ci.yml' -# - local: '/src/vnt_manager/.gitlab-ci.yml' -# - local: '/src/e2e_orchestrator/.gitlab-ci.yml' -# - local: '/src/ztp_server/.gitlab-ci.yml' -# - local: '/src/osm_client/.gitlab-ci.yml' -# - local: '/src/simap_connector/.gitlab-ci.yml' -# -# # This should be last one: end-to-end integration tests -# - local: '/src/tests/.gitlab-ci.yml' + #- local: '/manifests/.gitlab-ci.yml' + - local: '/src/monitoring/.gitlab-ci.yml' + - local: '/src/nbi/.gitlab-ci.yml' + - local: '/src/context/.gitlab-ci.yml' + - local: '/src/device/.gitlab-ci.yml' + - local: '/src/service/.gitlab-ci.yml' + - local: '/src/qkd_app/.gitlab-ci.yml' + - local: '/src/dbscanserving/.gitlab-ci.yml' + - local: '/src/opticalattackmitigator/.gitlab-ci.yml' + - local: '/src/opticalattackdetector/.gitlab-ci.yml' + - local: '/src/opticalattackmanager/.gitlab-ci.yml' + - local: '/src/opticalcontroller/.gitlab-ci.yml' + - local: '/src/ztp/.gitlab-ci.yml' + - local: '/src/policy/.gitlab-ci.yml' + - local: '/src/automation/.gitlab-ci.yml' + - local: '/src/forecaster/.gitlab-ci.yml' + #- local: '/src/webui/.gitlab-ci.yml' + #- local: '/src/l3_distributedattackdetector/.gitlab-ci.yml' + #- local: '/src/l3_centralizedattackdetector/.gitlab-ci.yml' + #- local: '/src/l3_attackmitigator/.gitlab-ci.yml' + - local: '/src/slice/.gitlab-ci.yml' + #- local: '/src/interdomain/.gitlab-ci.yml' + - local: '/src/pathcomp/.gitlab-ci.yml' + #- local: '/src/dlt/.gitlab-ci.yml' + - local: '/src/load_generator/.gitlab-ci.yml' + - local: '/src/bgpls_speaker/.gitlab-ci.yml' + - local: '/src/kpi_manager/.gitlab-ci.yml' + - local: '/src/kpi_value_api/.gitlab-ci.yml' + #- local: '/src/kpi_value_writer/.gitlab-ci.yml' + #- local: '/src/telemetry/.gitlab-ci.yml' + - local: '/src/analytics/.gitlab-ci.yml' + - local: '/src/qos_profile/.gitlab-ci.yml' + - local: '/src/vnt_manager/.gitlab-ci.yml' + - local: '/src/e2e_orchestrator/.gitlab-ci.yml' + - local: '/src/ztp_server/.gitlab-ci.yml' + - local: '/src/osm_client/.gitlab-ci.yml' + - local: '/src/simap_connector/.gitlab-ci.yml' + + # This should be last one: end-to-end integration tests + - local: '/src/tests/.gitlab-ci.yml' -- GitLab From b277303271c63fd356cc2fb6378cabc5807bf963 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 9 Oct 2025 11:54:00 +0000 Subject: [PATCH 339/367] Device component - IETF ACTL Driver: - Fix unitary test for IETF ACTN --- src/device/tests/test_unitary_ietf_actn.py | 30 +++++++++++----------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/src/device/tests/test_unitary_ietf_actn.py b/src/device/tests/test_unitary_ietf_actn.py index f9e6748fe..784bb3a58 100644 --- a/src/device/tests/test_unitary_ietf_actn.py +++ b/src/device/tests/test_unitary_ietf_actn.py @@ -35,26 +35,26 @@ from .PrepareTestScenario import ( # pylint: disable=unused-import mock_service, device_service, context_client, device_client, monitoring_client, test_prepare_environment ) -DEVICE_UUID = 'DEVICE-IETF-ACTN' -DEVICE_ADDRESS = '127.0.0.1' -DEVICE_PORT = 8080 -DEVICE_USERNAME = 'admin' -DEVICE_PASSWORD = 'admin' -DEVICE_SCHEME = 'http' -DEVICE_BASE_URL = '/restconf/v2/data' -DEVICE_TIMEOUT = 120 -DEVICE_VERIFY = False +DEVICE_UUID = 'DEVICE-IETF-ACTN' +DEVICE_ADDRESS = '127.0.0.1' +DEVICE_PORT = 8080 +DEVICE_USERNAME = 'admin' +DEVICE_PASSWORD = 'admin' +DEVICE_SCHEME = 'http' +DEVICE_BASE_URL = '/restconf/v2/data' +DEVICE_TIMEOUT = 120 +DEVICE_VERIFY_CERTS = False DEVICE_ID = json_device_id(DEVICE_UUID) DEVICE = json_device_ietf_actn_disabled(DEVICE_UUID) DEVICE_CONNECT_RULES = json_device_connect_rules(DEVICE_ADDRESS, DEVICE_PORT, { - 'scheme' : DEVICE_SCHEME, - 'username': DEVICE_USERNAME, - 'password': DEVICE_PASSWORD, - 'base_url': DEVICE_BASE_URL, - 'timeout' : DEVICE_TIMEOUT, - 'verify' : DEVICE_VERIFY, + 'scheme' : DEVICE_SCHEME, + 'username' : DEVICE_USERNAME, + 'password' : DEVICE_PASSWORD, + 'base_url' : DEVICE_BASE_URL, + 'timeout' : DEVICE_TIMEOUT, + 'verify_certs': DEVICE_VERIFY_CERTS, }) DATA_FILE_CONFIG_RULES = 'device/tests/data/ietf_actn/config_rules.json' -- GitLab From 6df419a109858e607b079a1aadec1b320e9fde87 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 9 Oct 2025 12:01:54 +0000 Subject: [PATCH 340/367] Common - Tools - Kafka: - Enhanced topic creation logic to prevent race condition while testing-creating them --- src/common/tools/kafka/Variables.py | 57 ++++++++++++++++------------- 1 file changed, 31 insertions(+), 26 deletions(-) diff --git a/src/common/tools/kafka/Variables.py b/src/common/tools/kafka/Variables.py index 7bb131dd6..b9d52e331 100644 --- a/src/common/tools/kafka/Variables.py +++ b/src/common/tools/kafka/Variables.py @@ -16,6 +16,7 @@ import logging, time from enum import Enum #from confluent_kafka.admin import AdminClient, NewTopic from kafka.admin import KafkaAdminClient, NewTopic +from kafka.errors import TopicAlreadyExistsError from common.Settings import get_setting @@ -88,32 +89,36 @@ class KafkaTopic(Enum): #create_topic_future_map = kafka_admin_client.create_topics(missing_topics, request_timeout=5*60) #LOGGER.debug('create_topic_future_map: {:s}'.format(str(create_topic_future_map))) - topics_result = kafka_admin_client.create_topics( - new_topics=missing_topics, timeout_ms=KAFKA_TOPIC_CREATE_REQUEST_TIMEOUT, - validate_only=False - ) - LOGGER.debug('topics_result={:s}'.format(str(topics_result))) - - failed_topic_creations = set() - #for topic, future in create_topic_future_map.items(): - # try: - # LOGGER.info('Waiting for Topic({:s})...'.format(str(topic))) - # future.result() # Blocks until topic is created or raises an exception - # LOGGER.info('Topic({:s}) successfully created.'.format(str(topic))) - # except: # pylint: disable=bare-except - # LOGGER.exception('Failed to create Topic({:s})'.format(str(topic))) - # failed_topic_creations.add(topic) - for topic_name, error_code, error_message in topics_result.topic_errors: - if error_code == 0 and error_message is None: - MSG = 'Topic({:s}) successfully created.' - LOGGER.info(MSG.format(str(topic_name))) - else: - MSG = 'Failed to create Topic({:s}): error_code={:s} error_message={:s}' - LOGGER.error(MSG.format(str(topic_name), str(error_code), str(error_message))) - failed_topic_creations.add(topic_name) - - if len(failed_topic_creations) > 0: return False - LOGGER.debug('All topics created.') + try: + topics_result = kafka_admin_client.create_topics( + new_topics=missing_topics, timeout_ms=KAFKA_TOPIC_CREATE_REQUEST_TIMEOUT, + validate_only=False + ) + LOGGER.debug('topics_result={:s}'.format(str(topics_result))) + + failed_topic_creations = set() + #for topic, future in create_topic_future_map.items(): + # try: + # LOGGER.info('Waiting for Topic({:s})...'.format(str(topic))) + # future.result() # Blocks until topic is created or raises an exception + # LOGGER.info('Topic({:s}) successfully created.'.format(str(topic))) + # except: # pylint: disable=bare-except + # LOGGER.exception('Failed to create Topic({:s})'.format(str(topic))) + # failed_topic_creations.add(topic) + for topic_name, error_code, error_message in topics_result.topic_errors: + if error_code == 0 and error_message is None: + MSG = 'Topic({:s}) successfully created.' + LOGGER.info(MSG.format(str(topic_name))) + else: + MSG = 'Failed to create Topic({:s}): error_code={:s} error_message={:s}' + LOGGER.error(MSG.format(str(topic_name), str(error_code), str(error_message))) + failed_topic_creations.add(topic_name) + + if len(failed_topic_creations) > 0: return False + LOGGER.debug('All topics created.') + + except TopicAlreadyExistsError: + LOGGER.debug('Some topics already exists.') # Wait until topics appear in metadata desired_topics = {topic.value for topic in KafkaTopic} -- GitLab From f746a4f53cbc0adebf226f91170fb2f90d3dbb59 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 9 Oct 2025 12:08:57 +0000 Subject: [PATCH 341/367] PathComp Frontend: - Increased log level to DEBUG in CI/CD tests --- src/pathcomp/.gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pathcomp/.gitlab-ci.yml b/src/pathcomp/.gitlab-ci.yml index cb6e2d98b..ff9da4fc3 100644 --- a/src/pathcomp/.gitlab-ci.yml +++ b/src/pathcomp/.gitlab-ci.yml @@ -137,7 +137,7 @@ unit_test pathcomp-frontend: - docker logs ${IMAGE_NAME}-backend - > docker exec -i ${IMAGE_NAME}-frontend bash -c - "coverage run -m pytest --log-level=INFO --verbose --junitxml=/opt/results/${IMAGE_NAME}-frontend_report.xml $IMAGE_NAME/frontend/tests/test_unitary.py $IMAGE_NAME/frontend/tests/test_unitary_pathcomp_forecaster.py" + "coverage run -m pytest --log-level=DEBUG --verbose --junitxml=/opt/results/${IMAGE_NAME}-frontend_report.xml $IMAGE_NAME/frontend/tests/test_unitary.py $IMAGE_NAME/frontend/tests/test_unitary_pathcomp_forecaster.py" - docker exec -i ${IMAGE_NAME}-frontend bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing" coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/' after_script: -- GitLab From 6d2fa750fc60811c9978e953e01e3c3e6bd813c1 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 9 Oct 2025 13:10:48 +0000 Subject: [PATCH 342/367] Device component - IETF ACTN Driver: - Fix unitary test for IETF ACTN --- src/device/tests/test_unitary_ietf_actn.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/device/tests/test_unitary_ietf_actn.py b/src/device/tests/test_unitary_ietf_actn.py index 784bb3a58..7ceeca856 100644 --- a/src/device/tests/test_unitary_ietf_actn.py +++ b/src/device/tests/test_unitary_ietf_actn.py @@ -81,6 +81,7 @@ def ietf_actn_sdn_ctrl( add_rsrc = _rest_server.add_resource add_rsrc(Root, '/') + add_rsrc(Root, '/.well-known/host-meta') add_rsrc(OsuTunnels, '/ietf-te:te/tunnels') add_rsrc(OsuTunnel, '/ietf-te:te/tunnels/tunnel=""') add_rsrc(EthServices, '/ietf-eth-tran-service:etht-svc') -- GitLab From 1879c55a47cbeb1562bc73e849d012e95c2f8c9c Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 9 Oct 2025 13:47:05 +0000 Subject: [PATCH 343/367] Common - Tools - Type Checkers: - Polished assertions for link attributes --- src/common/tools/kafka/__init__.py | 14 ++++++++++++++ src/common/type_checkers/Assertions.py | 4 +++- 2 files changed, 17 insertions(+), 1 deletion(-) create mode 100644 src/common/tools/kafka/__init__.py diff --git a/src/common/tools/kafka/__init__.py b/src/common/tools/kafka/__init__.py new file mode 100644 index 000000000..3ccc21c7d --- /dev/null +++ b/src/common/tools/kafka/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/common/type_checkers/Assertions.py b/src/common/type_checkers/Assertions.py index e41b0d0d3..7fb6831a4 100644 --- a/src/common/type_checkers/Assertions.py +++ b/src/common/type_checkers/Assertions.py @@ -511,7 +511,9 @@ def validate_component(component): def validate_link_attributes(link_attributes): assert isinstance(link_attributes, dict) - assert len(link_attributes.keys()) == 2 + assert len(link_attributes.keys()) == 3 + assert 'is_bidirectional' in link_attributes + assert isinstance(link_attributes['is_bidirectional'], bool) assert 'total_capacity_gbps' in link_attributes assert isinstance(link_attributes['total_capacity_gbps'], (int, float)) assert 'used_capacity_gbps' in link_attributes -- GitLab From 9d149dc2c8f031194d83bd67e6b2bfd013e78b90 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 9 Oct 2025 14:18:40 +0000 Subject: [PATCH 344/367] Device component - IETF ACTN Driver: - Fix unitary test for IETF ACTN --- src/device/tests/test_unitary_ietf_actn.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/device/tests/test_unitary_ietf_actn.py b/src/device/tests/test_unitary_ietf_actn.py index 7ceeca856..6e29a2c43 100644 --- a/src/device/tests/test_unitary_ietf_actn.py +++ b/src/device/tests/test_unitary_ietf_actn.py @@ -80,8 +80,8 @@ def ietf_actn_sdn_ctrl( return make_response(jsonify({}), 200) add_rsrc = _rest_server.add_resource - add_rsrc(Root, '/') - add_rsrc(Root, '/.well-known/host-meta') + add_rsrc(Root, '/', endpoint='test.root') + add_rsrc(Root, '/.well-known/host-meta', endpoint='test.wellknown.host') add_rsrc(OsuTunnels, '/ietf-te:te/tunnels') add_rsrc(OsuTunnel, '/ietf-te:te/tunnels/tunnel=""') add_rsrc(EthServices, '/ietf-eth-tran-service:etht-svc') -- GitLab From 9801b7bf3aa35f5df5a948ef3a6900266dba6a21 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 9 Oct 2025 14:30:00 +0000 Subject: [PATCH 345/367] PathComp Frontend: - Corrected backend request composition on bidirectionality of links --- .../frontend/service/algorithms/tools/ComposeRequest.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py b/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py index 087b09754..497e4ce35 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py +++ b/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py @@ -119,6 +119,7 @@ def compose_link(grpc_link : Link) -> Dict: ] total_capacity_gbps, used_capacity_gbps = None, None + forwarding_direction = LinkForwardingDirection.UNIDIRECTIONAL.value if grpc_link.HasField('attributes'): attributes = grpc_link.attributes # In proto3, HasField() does not work for scalar fields, using ListFields() instead. @@ -130,11 +131,15 @@ def compose_link(grpc_link : Link) -> Dict: elif total_capacity_gbps is not None: used_capacity_gbps = total_capacity_gbps + if 'is_bidirectional' in attribute_names: + is_bidirectional = attributes.is_bidirectional + if is_bidirectional: + forwarding_direction = LinkForwardingDirection.BIDIRECTIONAL.value + if total_capacity_gbps is None: total_capacity_gbps = 100000 if used_capacity_gbps is None: used_capacity_gbps = 0 available_capacity_gbps = total_capacity_gbps - used_capacity_gbps - forwarding_direction = LinkForwardingDirection.UNIDIRECTIONAL.value total_potential_capacity = compose_capacity(total_capacity_gbps, CapacityUnit.GBPS.value) available_capacity = compose_capacity(available_capacity_gbps, CapacityUnit.GBPS.value) cost_characteristics = compose_cost_characteristics('linkcost', '1', '0') -- GitLab From 75760b34109355001cda44121e01925ff1c73b34 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 9 Oct 2025 16:34:41 +0000 Subject: [PATCH 346/367] Common - Tools - REST API - Server: - Fixed class constructor data types --- src/common/tools/rest_api/server/GenericRestServer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/common/tools/rest_api/server/GenericRestServer.py b/src/common/tools/rest_api/server/GenericRestServer.py index 17f629be1..0266ceb72 100644 --- a/src/common/tools/rest_api/server/GenericRestServer.py +++ b/src/common/tools/rest_api/server/GenericRestServer.py @@ -29,8 +29,8 @@ def log_request(logger, response): class GenericRestServer(threading.Thread): def __init__( - self, bind_port : Union[str, int], base_url : str, bind_address : Optional[str] = None, - cls_name : str = __name__ + self, bind_port : Union[str, int], base_url : Optional[str] = None, + bind_address : Optional[str] = None, cls_name : str = __name__ ) -> None: threading.Thread.__init__(self, daemon=True) self.logger = logging.getLogger(cls_name) -- GitLab From 865f3f099c7111245686fe3761b6e3ca63872b1f Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 9 Oct 2025 16:34:52 +0000 Subject: [PATCH 347/367] Code cleanup --- src/ztp_server/service/rest_server/RestServer.py | 2 +- src/ztp_server/tests/PrepareTestScenario.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/ztp_server/service/rest_server/RestServer.py b/src/ztp_server/service/rest_server/RestServer.py index 7b028e39d..9691dc861 100755 --- a/src/ztp_server/service/rest_server/RestServer.py +++ b/src/ztp_server/service/rest_server/RestServer.py @@ -20,4 +20,4 @@ class RestServer(GenericRestServer): def __init__(self, cls_name: str = __name__) -> None: bind_port = get_service_port_http(ServiceNameEnum.ZTP_SERVER) base_url = get_service_baseurl_http(ServiceNameEnum.ZTP_SERVER) - super().__init__(bind_port, base_url, cls_name=cls_name) + super().__init__(bind_port, base_url=base_url, cls_name=cls_name) diff --git a/src/ztp_server/tests/PrepareTestScenario.py b/src/ztp_server/tests/PrepareTestScenario.py index a722d743a..72389ee97 100644 --- a/src/ztp_server/tests/PrepareTestScenario.py +++ b/src/ztp_server/tests/PrepareTestScenario.py @@ -35,7 +35,7 @@ os.environ[get_env_var_name(ServiceNameEnum.ZTP_SERVER, ENVVAR_SUFIX_SERVICE_POR @pytest.fixture(scope='session') def ztp_server_application(): - _rest_server = GenericRestServer(ZTP_SERVICE_PORT, None, bind_address='127.0.0.1') + _rest_server = GenericRestServer(ZTP_SERVICE_PORT, bind_address='127.0.0.1') register_ztp_provisioning(_rest_server) _rest_server.start() time.sleep(1) # bring time for the server to start -- GitLab From 90cf1929b38bac061614dcf5e47785ec376fdc52 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 9 Oct 2025 16:35:05 +0000 Subject: [PATCH 348/367] Device component - IETF ACTN Driver: - Fix unitary test for IETF ACTN --- src/device/tests/test_unitary_ietf_actn.py | 37 ++++++++++++++++------ 1 file changed, 28 insertions(+), 9 deletions(-) diff --git a/src/device/tests/test_unitary_ietf_actn.py b/src/device/tests/test_unitary_ietf_actn.py index 6e29a2c43..02d042770 100644 --- a/src/device/tests/test_unitary_ietf_actn.py +++ b/src/device/tests/test_unitary_ietf_actn.py @@ -41,7 +41,6 @@ DEVICE_PORT = 8080 DEVICE_USERNAME = 'admin' DEVICE_PASSWORD = 'admin' DEVICE_SCHEME = 'http' -DEVICE_BASE_URL = '/restconf/v2/data' DEVICE_TIMEOUT = 120 DEVICE_VERIFY_CERTS = False @@ -52,7 +51,6 @@ DEVICE_CONNECT_RULES = json_device_connect_rules(DEVICE_ADDRESS, DEVICE_PORT, { 'scheme' : DEVICE_SCHEME, 'username' : DEVICE_USERNAME, 'password' : DEVICE_PASSWORD, - 'base_url' : DEVICE_BASE_URL, 'timeout' : DEVICE_TIMEOUT, 'verify_certs': DEVICE_VERIFY_CERTS, }) @@ -69,23 +67,44 @@ LOGGER.setLevel(logging.DEBUG) def ietf_actn_sdn_ctrl( device_service : DeviceService, # pylint: disable=redefined-outer-name ) -> Flask: - _rest_server = GenericRestServer(DEVICE_PORT, DEVICE_BASE_URL, bind_address=DEVICE_ADDRESS) + _rest_server = GenericRestServer(DEVICE_PORT, bind_address=DEVICE_ADDRESS) _rest_server.app.config['DEBUG' ] = True _rest_server.app.config['ENV' ] = 'development' _rest_server.app.config['SERVER_NAME'] = '{:s}:{:d}'.format(DEVICE_ADDRESS, DEVICE_PORT) _rest_server.app.config['TESTING' ] = True + class HostMeta(Resource): + def get(self): + host_meta = {'links': [{'rel': 'restconf', 'href': '/restconf'}]} + return make_response(jsonify(host_meta), 200) + class Root(Resource): def get(self): return make_response(jsonify({}), 200) + class Network(Resource): + def get(self, network_id : str): + network_topology = {'ietf-network:network': []} + return make_response(jsonify(network_topology), 200) + + RESTCONF_V1 = '/restconf/data' + RESTCONF_V2 = '/restconf/v2/data' + add_rsrc = _rest_server.add_resource - add_rsrc(Root, '/', endpoint='test.root') - add_rsrc(Root, '/.well-known/host-meta', endpoint='test.wellknown.host') - add_rsrc(OsuTunnels, '/ietf-te:te/tunnels') - add_rsrc(OsuTunnel, '/ietf-te:te/tunnels/tunnel=""') - add_rsrc(EthServices, '/ietf-eth-tran-service:etht-svc') - add_rsrc(EthService, '/ietf-eth-tran-service:etht-svc/etht-svc-instances=""') + add_rsrc(HostMeta, '/.well-known/host-meta', endpoint='well-known.host-meta') + add_rsrc(Root, RESTCONF_V1 + '/', endpoint='restconf.v1.root') + add_rsrc(Root, RESTCONF_V2 + '/', endpoint='restconf.v2.root') + + RESTCONF_NETWORK = RESTCONF_V1 + '/ietf-network:networks/network=' + add_rsrc(Network, RESTCONF_NETWORK) + + RESTCONF_TE_TUNNELS = RESTCONF_V2 + '/ietf-te:te/tunnels' + add_rsrc(OsuTunnels, RESTCONF_TE_TUNNELS) + add_rsrc(OsuTunnel, RESTCONF_TE_TUNNELS + '/tunnel=') + + RESTCONF_ETHT_SERVICES = RESTCONF_V2 + '/ietf-eth-tran-service:etht-svc' + add_rsrc(EthServices, RESTCONF_ETHT_SERVICES) + add_rsrc(EthService, RESTCONF_ETHT_SERVICES + '/etht-svc-instances=') _rest_server.start() time.sleep(1) # bring time for the server to start -- GitLab From 0f277da3a077df50f53b246404590266b442f12a Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 9 Oct 2025 16:52:01 +0000 Subject: [PATCH 349/367] PathComp Frontend: - Fix unit test K-Disjoint paths --- .../frontend/service/algorithms/KDisjointPathAlgorithm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py b/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py index e1da3db2c..d2d4c3982 100644 --- a/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py +++ b/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py @@ -126,7 +126,7 @@ class KDisjointPathAlgorithm(_Algorithm): return new_link_list def execute(self, dump_request_filename: Optional[str] = None, dump_reply_filename: Optional[str] = None) -> None: - algorithm = KShortestPathAlgorithm(Algorithm_KShortestPath(k_inspection=0, k_return=1)) + algorithm = KShortestPathAlgorithm(Algorithm_KShortestPath(k_inspection=10, k_return=1)) algorithm.sync_paths = True algorithm.device_list = self.device_list algorithm.device_name_mapping = self.device_name_mapping -- GitLab From c62fe1bf338be039fc7acac83cc3cfadeac9079b Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 9 Oct 2025 17:08:45 +0000 Subject: [PATCH 350/367] PathComp Frontend: - Fix unit test K-Disjoint paths --- .../frontend/service/algorithms/KDisjointPathAlgorithm.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py b/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py index d2d4c3982..68277a65f 100644 --- a/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py +++ b/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py @@ -126,13 +126,15 @@ class KDisjointPathAlgorithm(_Algorithm): return new_link_list def execute(self, dump_request_filename: Optional[str] = None, dump_reply_filename: Optional[str] = None) -> None: - algorithm = KShortestPathAlgorithm(Algorithm_KShortestPath(k_inspection=10, k_return=1)) + algorithm = KShortestPathAlgorithm(Algorithm_KShortestPath(k_inspection=0, k_return=1)) algorithm.sync_paths = True algorithm.device_list = self.device_list algorithm.device_name_mapping = self.device_name_mapping + algorithm.device_uuid_mapping = self.device_uuid_mapping algorithm.device_dict = self.device_dict algorithm.endpoint_dict = self.endpoint_dict algorithm.endpoint_name_mapping = self.endpoint_name_mapping + algorithm.endpoint_uuid_mapping = self.endpoint_uuid_mapping algorithm.link_list = self.link_list algorithm.link_dict = self.link_dict algorithm.endpoint_to_link_dict = self.endpoint_to_link_dict -- GitLab From adb8ab4121402b661caa9ec574a856ab4b6288ef Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 9 Oct 2025 17:11:50 +0000 Subject: [PATCH 351/367] PathComp Frontend: - Fix unit test K-Disjoint paths --- .../frontend/service/algorithms/KDisjointPathAlgorithm.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py b/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py index 68277a65f..00dbea5be 100644 --- a/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py +++ b/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py @@ -63,9 +63,9 @@ class KDisjointPathAlgorithm(_Algorithm): elif kind == 'endpoint_location': endpoint_id = constraint.endpoint_location.endpoint_id device_uuid = endpoint_id.device_id.device_uuid.uuid - device_uuid = self.device_name_mapping.get(device_uuid, device_uuid) + device_uuid = self.device_uuid_mapping.get(device_uuid, device_uuid) endpoint_uuid = endpoint_id.endpoint_uuid.uuid - endpoint_uuid = self.endpoint_name_mapping.get((device_uuid, endpoint_uuid), endpoint_uuid) + endpoint_uuid = self.endpoint_uuid_mapping.get((device_uuid, endpoint_uuid), endpoint_uuid) location_kind = constraint.endpoint_location.location.WhichOneof('location') if location_kind != 'region': MSG = 'Unsupported LocationType({:s}) in Constraint({:s})' @@ -76,9 +76,9 @@ class KDisjointPathAlgorithm(_Algorithm): elif kind == 'endpoint_priority': endpoint_id = constraint.endpoint_priority.endpoint_id device_uuid = endpoint_id.device_id.device_uuid.uuid - device_uuid = self.device_name_mapping.get(device_uuid, device_uuid) + device_uuid = self.device_uuid_mapping.get(device_uuid, device_uuid) endpoint_uuid = endpoint_id.endpoint_uuid.uuid - endpoint_uuid = self.endpoint_name_mapping.get((device_uuid, endpoint_uuid), endpoint_uuid) + endpoint_uuid = self.endpoint_uuid_mapping.get((device_uuid, endpoint_uuid), endpoint_uuid) priority = constraint.endpoint_priority.priority endpoints.setdefault((device_uuid, endpoint_uuid), dict())['priority'] = priority -- GitLab From b4a88674b87cb4d25a4e1894779deff6e139e6e3 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 10 Oct 2025 07:34:43 +0000 Subject: [PATCH 352/367] NBI component: - Fixing unit test issue --- src/nbi/.gitlab-ci.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/nbi/.gitlab-ci.yml b/src/nbi/.gitlab-ci.yml index 1a4bfe01b..8ab485aeb 100644 --- a/src/nbi/.gitlab-ci.yml +++ b/src/nbi/.gitlab-ci.yml @@ -104,7 +104,11 @@ unit_test nbi: --env IETF_NETWORK_RENDERER=LIBYANG --env "KFK_SERVER_ADDRESS=${KAFKA_IP}:9092" $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG - - while ! docker logs $IMAGE_NAME 2>&1 | grep -q 'Initialization completed'; do sleep 1; done + - > + while ! docker logs $IMAGE_NAME 2>&1 | grep -q 'Initialization completed'; do + sleep 5 + docker logs $IMAGE_NAME + done - sleep 5 # Give extra time to NBI to get ready - docker ps -a - docker logs kafka -- GitLab From 33440d59ab6b6acc490a205c09f534530b891f14 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 10 Oct 2025 09:26:36 +0000 Subject: [PATCH 353/367] Test - ECOC22: - Corrected topology descriptor --- src/tests/ecoc22/descriptors_emulated.json | 64 +++++++++++----------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/src/tests/ecoc22/descriptors_emulated.json b/src/tests/ecoc22/descriptors_emulated.json index da36a6ae9..efff90438 100644 --- a/src/tests/ecoc22/descriptors_emulated.json +++ b/src/tests/ecoc22/descriptors_emulated.json @@ -7,83 +7,83 @@ ], "devices": [ { - "device_id": {"device_uuid": {"uuid": "DC1-GW"}}, "device_type": "emu-datacenter", "device_drivers": [0], - "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + "device_id": {"device_uuid": {"uuid": "DC1-GW"}}, "device_type": "emu-datacenter", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ - {"sample_types": [], "type": "copper", "uuid": "eth1"}, - {"sample_types": [], "type": "copper", "uuid": "eth2"}, - {"sample_types": [], "type": "copper", "uuid": "int"} + {"uuid": "eth1", "type": "copper"}, + {"uuid": "eth2", "type": "copper"}, + {"uuid": "int", "type": "copper"} ]}}} ]} }, { - "device_id": {"device_uuid": {"uuid": "DC2-GW"}}, "device_type": "emu-datacenter", "device_drivers": [0], - "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + "device_id": {"device_uuid": {"uuid": "DC2-GW"}}, "device_type": "emu-datacenter", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ - {"sample_types": [], "type": "copper", "uuid": "eth1"}, - {"sample_types": [], "type": "copper", "uuid": "eth2"}, - {"sample_types": [], "type": "copper", "uuid": "int"} + {"uuid": "eth1", "type": "copper"}, + {"uuid": "eth2", "type": "copper"}, + {"uuid": "int", "type": "copper"} ]}}} ]} }, { - "device_id": {"device_uuid": {"uuid": "CS1-GW1"}}, "device_type": "emu-packet-router", "device_drivers": [1], - "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + "device_id": {"device_uuid": {"uuid": "CS1-GW1"}}, "device_type": "emu-packet-router", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ - {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "10/1"}, - {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/1"} + {"uuid": "10/1", "type": "copper", "sample_types": [101, 102, 201, 202]}, + {"uuid": "1/1", "type": "copper", "sample_types": [101, 102, 201, 202]} ]}}} ]} }, { - "device_id": {"device_uuid": {"uuid": "CS1-GW2"}}, "device_type": "emu-packet-router", "device_drivers": [1], - "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + "device_id": {"device_uuid": {"uuid": "CS1-GW2"}}, "device_type": "emu-packet-router", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ - {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "10/1"}, - {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/1"} + {"uuid": "10/1", "type": "copper", "sample_types": [101, 102, 201, 202]}, + {"uuid": "1/1", "type": "copper", "sample_types": [101, 102, 201, 202]} ]}}} ]} }, { - "device_id": {"device_uuid": {"uuid": "CS2-GW1"}}, "device_type": "emu-packet-router", "device_drivers": [1], - "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + "device_id": {"device_uuid": {"uuid": "CS2-GW1"}}, "device_type": "emu-packet-router", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ - {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "10/1"}, - {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/1"} + {"uuid": "10/1", "type": "copper", "sample_types": [101, 102, 201, 202]}, + {"uuid": "1/1", "type": "copper", "sample_types": [101, 102, 201, 202]} ]}}} ]} }, { - "device_id": {"device_uuid": {"uuid": "CS2-GW2"}}, "device_type": "emu-packet-router", "device_drivers": [1], - "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + "device_id": {"device_uuid": {"uuid": "CS2-GW2"}}, "device_type": "emu-packet-router", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ - {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "10/1"}, - {"sample_types": [101, 102, 201, 202], "type": "copper", "uuid": "1/1"} + {"uuid": "10/1", "type": "copper", "sample_types": [101, 102, 201, 202]}, + {"uuid": "1/1", "type": "copper", "sample_types": [101, 102, 201, 202]} ]}}} ]} }, { - "device_id": {"device_uuid": {"uuid": "OLS"}}, "device_type": "emu-open-line-system", "device_drivers": [0], - "device_endpoints": [], "device_operational_status": 1, "device_config": {"config_rules": [ + "device_id": {"device_uuid": {"uuid": "OLS"}}, "device_type": "emu-open-line-system", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ - {"sample_types": [], "type": "optical", "uuid": "aade6001-f00b-5e2f-a357-6a0a9d3de870"}, - {"sample_types": [], "type": "optical", "uuid": "eb287d83-f05e-53ec-ab5a-adf6bd2b5418"}, - {"sample_types": [], "type": "optical", "uuid": "0ef74f99-1acc-57bd-ab9d-4b958b06c513"}, - {"sample_types": [], "type": "optical", "uuid": "50296d99-58cc-5ce7-82f5-fc8ee4eec2ec"} + {"uuid": "aade6001-f00b-5e2f-a357-6a0a9d3de870", "type": "optical"}, + {"uuid": "eb287d83-f05e-53ec-ab5a-adf6bd2b5418", "type": "optical"}, + {"uuid": "0ef74f99-1acc-57bd-ab9d-4b958b06c513", "type": "optical"}, + {"uuid": "50296d99-58cc-5ce7-82f5-fc8ee4eec2ec", "type": "optical"} ]}}} ]} } -- GitLab From c0f36096b48db624eee36da7e7a62c20f0704310 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 10 Oct 2025 10:28:25 +0000 Subject: [PATCH 354/367] NBI component: - Fixing unit test issue --- src/nbi/.gitlab-ci.yml | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/src/nbi/.gitlab-ci.yml b/src/nbi/.gitlab-ci.yml index 8ab485aeb..5a7c8cef9 100644 --- a/src/nbi/.gitlab-ci.yml +++ b/src/nbi/.gitlab-ci.yml @@ -86,7 +86,6 @@ unit_test nbi: bitnamilegacy/kafka:latest - while ! docker logs kafka 2>&1 | grep -q 'Kafka Server started'; do sleep 1; done - sleep 5 # Give extra time to Kafka to get stabilized - - docker inspect kafka --format "{{.NetworkSettings.Networks}}" - KAFKA_IP=$(docker inspect kafka --format "{{.NetworkSettings.Networks.teraflowbridge.IPAddress}}") - echo $KAFKA_IP - > @@ -104,11 +103,7 @@ unit_test nbi: --env IETF_NETWORK_RENDERER=LIBYANG --env "KFK_SERVER_ADDRESS=${KAFKA_IP}:9092" $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG - - > - while ! docker logs $IMAGE_NAME 2>&1 | grep -q 'Initialization completed'; do - sleep 5 - docker logs $IMAGE_NAME - done + - while ! docker logs $IMAGE_NAME 2>&1 | grep -q 'Initialization completed'; do sleep 5; done - sleep 5 # Give extra time to NBI to get ready - docker ps -a - docker logs kafka -- GitLab From b8ccc35c9efa85193151650abb42acd8d4763a21 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 10 Oct 2025 10:54:26 +0000 Subject: [PATCH 355/367] NBI component: - Fixing unit test issue --- src/nbi/.gitlab-ci.yml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/nbi/.gitlab-ci.yml b/src/nbi/.gitlab-ci.yml index 5a7c8cef9..ef384d67a 100644 --- a/src/nbi/.gitlab-ci.yml +++ b/src/nbi/.gitlab-ci.yml @@ -103,7 +103,13 @@ unit_test nbi: --env IETF_NETWORK_RENDERER=LIBYANG --env "KFK_SERVER_ADDRESS=${KAFKA_IP}:9092" $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG - - while ! docker logs $IMAGE_NAME 2>&1 | grep -q 'Initialization completed'; do sleep 5; done + # Old version that gets blocked: + #- while ! docker logs $IMAGE_NAME 2>&1 | grep -q 'Initialization completed'; do sleep 5; done + # Wait until any worker logs "Initialization completed" (from the start of logs) + # -m1 makes grep exit as soon as the line appears. + # With set -o pipefail, docker logs will get SIGPIPE when grep exits; + # `|| true` neutralizes that so the pipeline’s status reflects grep’s success. + - (docker logs -f nbi || true) 2>&1 | grep -m1 -Fi 'Initialization completed' - sleep 5 # Give extra time to NBI to get ready - docker ps -a - docker logs kafka -- GitLab From d6a50486b150e6946ae0f8f7f48e17b23100c4fc Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 10 Oct 2025 11:02:08 +0000 Subject: [PATCH 356/367] NBI component: - Fixed loop in unitary tests and scripts that waits for NBI component workers to be started --- scripts/run_tests_locally-nbi-all.sh | 10 ++++++---- src/nbi/.gitlab-ci.yml | 4 +--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/scripts/run_tests_locally-nbi-all.sh b/scripts/run_tests_locally-nbi-all.sh index 35c755b38..83e2b02e2 100755 --- a/scripts/run_tests_locally-nbi-all.sh +++ b/scripts/run_tests_locally-nbi-all.sh @@ -59,10 +59,12 @@ docker run --name nbi -d \ --env "KFK_SERVER_ADDRESS=${KAFKA_IP}:9092" \ nbi:latest -while ! docker logs nbi 2>&1 | grep -q 'Initialization completed'; do - printf "." - sleep 1; -done +# Wait until any worker logs "Initialization completed" (from the start of logs) +# -m1 makes grep exit as soon as the line appears. +# With set -o pipefail, docker logs will get SIGPIPE when grep exits; +# `|| true` neutralizes that so the pipeline’s status reflects grep’s success. +(docker logs -f $IMAGE_NAME || true) 2>&1 | grep -m1 -Fi 'Initialization completed' + printf "\n" sleep 5 # Give extra time to NBI to get ready diff --git a/src/nbi/.gitlab-ci.yml b/src/nbi/.gitlab-ci.yml index ef384d67a..0a5810354 100644 --- a/src/nbi/.gitlab-ci.yml +++ b/src/nbi/.gitlab-ci.yml @@ -103,13 +103,11 @@ unit_test nbi: --env IETF_NETWORK_RENDERER=LIBYANG --env "KFK_SERVER_ADDRESS=${KAFKA_IP}:9092" $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG - # Old version that gets blocked: - #- while ! docker logs $IMAGE_NAME 2>&1 | grep -q 'Initialization completed'; do sleep 5; done # Wait until any worker logs "Initialization completed" (from the start of logs) # -m1 makes grep exit as soon as the line appears. # With set -o pipefail, docker logs will get SIGPIPE when grep exits; # `|| true` neutralizes that so the pipeline’s status reflects grep’s success. - - (docker logs -f nbi || true) 2>&1 | grep -m1 -Fi 'Initialization completed' + - (docker logs -f $IMAGE_NAME || true) 2>&1 | grep -m1 -Fi 'Initialization completed' - sleep 5 # Give extra time to NBI to get ready - docker ps -a - docker logs kafka -- GitLab From de1720490623044cf952dddb8730905686fc53c0 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 13 Oct 2025 08:31:25 +0000 Subject: [PATCH 357/367] Test - QKD E2E: - Corrected QKD Node deployment - Corrected topology descriptor - Deactivated rest of CI/CD tests --- .gitlab-ci.yml | 74 +++++++++---------- src/tests/.gitlab-ci.yml | 28 +++---- .../qkd_end2end/data/tfs-01-topology.json | 12 +-- src/tests/qkd_end2end/redeploy-qkd-nodes.sh | 6 +- 4 files changed, 60 insertions(+), 60 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 2856f9fed..9bb85b49e 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -21,43 +21,43 @@ stages: # include the individual .gitlab-ci.yml of each micro-service and tests include: - #- local: '/manifests/.gitlab-ci.yml' - - local: '/src/monitoring/.gitlab-ci.yml' - - local: '/src/nbi/.gitlab-ci.yml' - - local: '/src/context/.gitlab-ci.yml' - - local: '/src/device/.gitlab-ci.yml' - - local: '/src/service/.gitlab-ci.yml' - - local: '/src/qkd_app/.gitlab-ci.yml' - - local: '/src/dbscanserving/.gitlab-ci.yml' - - local: '/src/opticalattackmitigator/.gitlab-ci.yml' - - local: '/src/opticalattackdetector/.gitlab-ci.yml' - - local: '/src/opticalattackmanager/.gitlab-ci.yml' - - local: '/src/opticalcontroller/.gitlab-ci.yml' - - local: '/src/ztp/.gitlab-ci.yml' - - local: '/src/policy/.gitlab-ci.yml' - - local: '/src/automation/.gitlab-ci.yml' - - local: '/src/forecaster/.gitlab-ci.yml' - #- local: '/src/webui/.gitlab-ci.yml' - #- local: '/src/l3_distributedattackdetector/.gitlab-ci.yml' - #- local: '/src/l3_centralizedattackdetector/.gitlab-ci.yml' - #- local: '/src/l3_attackmitigator/.gitlab-ci.yml' - - local: '/src/slice/.gitlab-ci.yml' - #- local: '/src/interdomain/.gitlab-ci.yml' - - local: '/src/pathcomp/.gitlab-ci.yml' - #- local: '/src/dlt/.gitlab-ci.yml' - - local: '/src/load_generator/.gitlab-ci.yml' - - local: '/src/bgpls_speaker/.gitlab-ci.yml' - - local: '/src/kpi_manager/.gitlab-ci.yml' - - local: '/src/kpi_value_api/.gitlab-ci.yml' - #- local: '/src/kpi_value_writer/.gitlab-ci.yml' - #- local: '/src/telemetry/.gitlab-ci.yml' - - local: '/src/analytics/.gitlab-ci.yml' - - local: '/src/qos_profile/.gitlab-ci.yml' - - local: '/src/vnt_manager/.gitlab-ci.yml' - - local: '/src/e2e_orchestrator/.gitlab-ci.yml' - - local: '/src/ztp_server/.gitlab-ci.yml' - - local: '/src/osm_client/.gitlab-ci.yml' - - local: '/src/simap_connector/.gitlab-ci.yml' +# #- local: '/manifests/.gitlab-ci.yml' +# - local: '/src/monitoring/.gitlab-ci.yml' +# - local: '/src/nbi/.gitlab-ci.yml' +# - local: '/src/context/.gitlab-ci.yml' +# - local: '/src/device/.gitlab-ci.yml' +# - local: '/src/service/.gitlab-ci.yml' +# - local: '/src/qkd_app/.gitlab-ci.yml' +# - local: '/src/dbscanserving/.gitlab-ci.yml' +# - local: '/src/opticalattackmitigator/.gitlab-ci.yml' +# - local: '/src/opticalattackdetector/.gitlab-ci.yml' +# - local: '/src/opticalattackmanager/.gitlab-ci.yml' +# - local: '/src/opticalcontroller/.gitlab-ci.yml' +# - local: '/src/ztp/.gitlab-ci.yml' +# - local: '/src/policy/.gitlab-ci.yml' +# - local: '/src/automation/.gitlab-ci.yml' +# - local: '/src/forecaster/.gitlab-ci.yml' +# #- local: '/src/webui/.gitlab-ci.yml' +# #- local: '/src/l3_distributedattackdetector/.gitlab-ci.yml' +# #- local: '/src/l3_centralizedattackdetector/.gitlab-ci.yml' +# #- local: '/src/l3_attackmitigator/.gitlab-ci.yml' +# - local: '/src/slice/.gitlab-ci.yml' +# #- local: '/src/interdomain/.gitlab-ci.yml' +# - local: '/src/pathcomp/.gitlab-ci.yml' +# #- local: '/src/dlt/.gitlab-ci.yml' +# - local: '/src/load_generator/.gitlab-ci.yml' +# - local: '/src/bgpls_speaker/.gitlab-ci.yml' +# - local: '/src/kpi_manager/.gitlab-ci.yml' +# - local: '/src/kpi_value_api/.gitlab-ci.yml' +# #- local: '/src/kpi_value_writer/.gitlab-ci.yml' +# #- local: '/src/telemetry/.gitlab-ci.yml' +# - local: '/src/analytics/.gitlab-ci.yml' +# - local: '/src/qos_profile/.gitlab-ci.yml' +# - local: '/src/vnt_manager/.gitlab-ci.yml' +# - local: '/src/e2e_orchestrator/.gitlab-ci.yml' +# - local: '/src/ztp_server/.gitlab-ci.yml' +# - local: '/src/osm_client/.gitlab-ci.yml' +# - local: '/src/simap_connector/.gitlab-ci.yml' # This should be last one: end-to-end integration tests - local: '/src/tests/.gitlab-ci.yml' diff --git a/src/tests/.gitlab-ci.yml b/src/tests/.gitlab-ci.yml index 9b256f1ae..dfa24abea 100644 --- a/src/tests/.gitlab-ci.yml +++ b/src/tests/.gitlab-ci.yml @@ -14,20 +14,20 @@ # include the individual .gitlab-ci.yml of each end-to-end integration test include: - - local: '/src/tests/ofc22/.gitlab-ci.yml' - #- local: '/src/tests/oeccpsc22/.gitlab-ci.yml' - - local: '/src/tests/ecoc22/.gitlab-ci.yml' - #- local: '/src/tests/nfvsdn22/.gitlab-ci.yml' - #- local: '/src/tests/ofc23/.gitlab-ci.yml' - - local: '/src/tests/ofc24/.gitlab-ci.yml' - - local: '/src/tests/eucnc24/.gitlab-ci.yml' - #- local: '/src/tests/ofc25-camara-agg-net-controller/.gitlab-ci.yml' - #- local: '/src/tests/ofc25-camara-e2e-controller/.gitlab-ci.yml' - #- local: '/src/tests/ofc25/.gitlab-ci.yml' - #- local: '/src/tests/ryu-openflow/.gitlab-ci.yml' +# - local: '/src/tests/ofc22/.gitlab-ci.yml' +# #- local: '/src/tests/oeccpsc22/.gitlab-ci.yml' +# - local: '/src/tests/ecoc22/.gitlab-ci.yml' +# #- local: '/src/tests/nfvsdn22/.gitlab-ci.yml' +# #- local: '/src/tests/ofc23/.gitlab-ci.yml' +# - local: '/src/tests/ofc24/.gitlab-ci.yml' +# - local: '/src/tests/eucnc24/.gitlab-ci.yml' +# #- local: '/src/tests/ofc25-camara-agg-net-controller/.gitlab-ci.yml' +# #- local: '/src/tests/ofc25-camara-e2e-controller/.gitlab-ci.yml' +# #- local: '/src/tests/ofc25/.gitlab-ci.yml' +# #- local: '/src/tests/ryu-openflow/.gitlab-ci.yml' - local: '/src/tests/qkd_end2end/.gitlab-ci.yml' - - local: '/src/tests/tools/mock_tfs_nbi_dependencies/.gitlab-ci.yml' +# - local: '/src/tests/tools/mock_tfs_nbi_dependencies/.gitlab-ci.yml' - local: '/src/tests/tools/mock_qkd_node/.gitlab-ci.yml' - - local: '/src/tests/tools/mock_osm_nbi/.gitlab-ci.yml' - - local: '/src/tests/tools/simap_server/.gitlab-ci.yml' +# - local: '/src/tests/tools/mock_osm_nbi/.gitlab-ci.yml' +# - local: '/src/tests/tools/simap_server/.gitlab-ci.yml' diff --git a/src/tests/qkd_end2end/data/tfs-01-topology.json b/src/tests/qkd_end2end/data/tfs-01-topology.json index fe8aa367c..48b615931 100644 --- a/src/tests/qkd_end2end/data/tfs-01-topology.json +++ b/src/tests/qkd_end2end/data/tfs-01-topology.json @@ -9,24 +9,24 @@ { "device_id": {"device_uuid": {"uuid": "QKD1"}}, "device_type": "qkd-node", "device_drivers": ["DEVICEDRIVER_QKD"], "device_config": {"config_rules": [ - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.254.250.101"}}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8080"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.254.250.254"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8881"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"scheme": "http"}}} ]} }, { "device_id": {"device_uuid": {"uuid": "QKD2"}}, "device_type": "qkd-node", "device_drivers": ["DEVICEDRIVER_QKD"], "device_config": {"config_rules": [ - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.254.250.102"}}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8080"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.254.250.254"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8882"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"scheme": "http"}}} ]} }, { "device_id": {"device_uuid": {"uuid": "QKD3"}}, "device_type": "qkd-node", "device_drivers": ["DEVICEDRIVER_QKD"], "device_config": {"config_rules": [ - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.254.250.103"}}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8080"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.254.250.254"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8883"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"scheme": "http"}}} ]} } diff --git a/src/tests/qkd_end2end/redeploy-qkd-nodes.sh b/src/tests/qkd_end2end/redeploy-qkd-nodes.sh index b01e0ecb8..21da5198f 100755 --- a/src/tests/qkd_end2end/redeploy-qkd-nodes.sh +++ b/src/tests/qkd_end2end/redeploy-qkd-nodes.sh @@ -22,13 +22,13 @@ docker network rm --force qkd-node-br docker network create --driver bridge --subnet=172.254.250.0/24 --gateway=172.254.250.254 qkd-node-br # Create QKD Nodes -docker run --detach --name qkd-node-01 --network qkd-node-br --ip 172.254.250.101 \ +docker run --detach --name qkd-node-01 --network qkd-node-br --ip 172.254.250.101 --publish 8881:8080 \ --volume "$PWD/src/tests/qkd_end2end/data/qkd-node-01.json:/var/mock_qkd_node/startup.json" \ mock-qkd-node:test -docker run --detach --name qkd-node-02 --network qkd-node-br --ip 172.254.250.102 \ +docker run --detach --name qkd-node-02 --network qkd-node-br --ip 172.254.250.102 --publish 8882:8080 \ --volume "$PWD/src/tests/qkd_end2end/data/qkd-node-02.json:/var/mock_qkd_node/startup.json" \ mock-qkd-node:test -docker run --detach --name qkd-node-03 --network qkd-node-br --ip 172.254.250.103 \ +docker run --detach --name qkd-node-03 --network qkd-node-br --ip 172.254.250.103 --publish 8883:8080 \ --volume "$PWD/src/tests/qkd_end2end/data/qkd-node-03.json:/var/mock_qkd_node/startup.json" \ mock-qkd-node:test -- GitLab From 96c19e4a4fc692097cdb62708e9093bdca57622b Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 13 Oct 2025 09:16:48 +0000 Subject: [PATCH 358/367] Test - QKD E2E: - Corrected QKD Node deployment - Corrected topology descriptor --- src/tests/qkd_end2end/data/tfs-01-topology.json | 12 ++++++------ src/tests/qkd_end2end/redeploy-qkd-nodes.sh | 6 +++--- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/tests/qkd_end2end/data/tfs-01-topology.json b/src/tests/qkd_end2end/data/tfs-01-topology.json index 48b615931..fe8aa367c 100644 --- a/src/tests/qkd_end2end/data/tfs-01-topology.json +++ b/src/tests/qkd_end2end/data/tfs-01-topology.json @@ -9,24 +9,24 @@ { "device_id": {"device_uuid": {"uuid": "QKD1"}}, "device_type": "qkd-node", "device_drivers": ["DEVICEDRIVER_QKD"], "device_config": {"config_rules": [ - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.254.250.254"}}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8881"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.254.250.101"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8080"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"scheme": "http"}}} ]} }, { "device_id": {"device_uuid": {"uuid": "QKD2"}}, "device_type": "qkd-node", "device_drivers": ["DEVICEDRIVER_QKD"], "device_config": {"config_rules": [ - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.254.250.254"}}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8882"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.254.250.102"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8080"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"scheme": "http"}}} ]} }, { "device_id": {"device_uuid": {"uuid": "QKD3"}}, "device_type": "qkd-node", "device_drivers": ["DEVICEDRIVER_QKD"], "device_config": {"config_rules": [ - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.254.250.254"}}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8883"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.254.250.103"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8080"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"scheme": "http"}}} ]} } diff --git a/src/tests/qkd_end2end/redeploy-qkd-nodes.sh b/src/tests/qkd_end2end/redeploy-qkd-nodes.sh index 21da5198f..b01e0ecb8 100755 --- a/src/tests/qkd_end2end/redeploy-qkd-nodes.sh +++ b/src/tests/qkd_end2end/redeploy-qkd-nodes.sh @@ -22,13 +22,13 @@ docker network rm --force qkd-node-br docker network create --driver bridge --subnet=172.254.250.0/24 --gateway=172.254.250.254 qkd-node-br # Create QKD Nodes -docker run --detach --name qkd-node-01 --network qkd-node-br --ip 172.254.250.101 --publish 8881:8080 \ +docker run --detach --name qkd-node-01 --network qkd-node-br --ip 172.254.250.101 \ --volume "$PWD/src/tests/qkd_end2end/data/qkd-node-01.json:/var/mock_qkd_node/startup.json" \ mock-qkd-node:test -docker run --detach --name qkd-node-02 --network qkd-node-br --ip 172.254.250.102 --publish 8882:8080 \ +docker run --detach --name qkd-node-02 --network qkd-node-br --ip 172.254.250.102 \ --volume "$PWD/src/tests/qkd_end2end/data/qkd-node-02.json:/var/mock_qkd_node/startup.json" \ mock-qkd-node:test -docker run --detach --name qkd-node-03 --network qkd-node-br --ip 172.254.250.103 --publish 8883:8080 \ +docker run --detach --name qkd-node-03 --network qkd-node-br --ip 172.254.250.103 \ --volume "$PWD/src/tests/qkd_end2end/data/qkd-node-03.json:/var/mock_qkd_node/startup.json" \ mock-qkd-node:test -- GitLab From 79b9ab871047947e05c62f15c0efcbe986bb6fb0 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 13 Oct 2025 09:44:19 +0000 Subject: [PATCH 359/367] Test - QKD E2E: - Enabled debug-level logging --- src/tests/qkd_end2end/.gitlab-ci.yml | 2 +- src/tests/qkd_end2end/scripts/run-01-onboarding.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/tests/qkd_end2end/.gitlab-ci.yml b/src/tests/qkd_end2end/.gitlab-ci.yml index 8445a8da5..959609b9c 100644 --- a/src/tests/qkd_end2end/.gitlab-ci.yml +++ b/src/tests/qkd_end2end/.gitlab-ci.yml @@ -168,7 +168,7 @@ end2end_test qkd_end2end: # Configure TeraFlowSDN deployment # Uncomment if DEBUG log level is needed for the components #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/contextservice.yaml - #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/deviceservice.yaml + - yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/deviceservice.yaml #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="frontend").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/pathcompservice.yaml #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/serviceservice.yaml #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/nbiservice.yaml diff --git a/src/tests/qkd_end2end/scripts/run-01-onboarding.sh b/src/tests/qkd_end2end/scripts/run-01-onboarding.sh index df9186204..8138c7ed3 100755 --- a/src/tests/qkd_end2end/scripts/run-01-onboarding.sh +++ b/src/tests/qkd_end2end/scripts/run-01-onboarding.sh @@ -15,6 +15,6 @@ source /var/teraflow/tfs_runtime_env_vars.sh export PYTHONPATH=/var/teraflow -pytest --verbose --log-level=INFO \ +pytest --verbose --log-level=DEBUG -o log_cli=true --verbose \ --junitxml=/opt/results/report_onboarding.xml \ /var/teraflow/tests/qkd_end2end/tests/test_01_onboarding.py -- GitLab From 56ff1c9ab86cab7cab7519f9d88050c16e78b8bf Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 13 Oct 2025 11:10:17 +0000 Subject: [PATCH 360/367] Test - QKD E2E: - Fixing CI/CD test --- src/tests/qkd_end2end/data/tfs-01-topology.json | 12 ++++++------ src/tests/qkd_end2end/redeploy-qkd-nodes.sh | 6 +++--- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/tests/qkd_end2end/data/tfs-01-topology.json b/src/tests/qkd_end2end/data/tfs-01-topology.json index fe8aa367c..801339b9a 100644 --- a/src/tests/qkd_end2end/data/tfs-01-topology.json +++ b/src/tests/qkd_end2end/data/tfs-01-topology.json @@ -9,24 +9,24 @@ { "device_id": {"device_uuid": {"uuid": "QKD1"}}, "device_type": "qkd-node", "device_drivers": ["DEVICEDRIVER_QKD"], "device_config": {"config_rules": [ - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.254.250.101"}}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8080"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.254.250.254"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8881"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"scheme": "http"}}} ]} }, { "device_id": {"device_uuid": {"uuid": "QKD2"}}, "device_type": "qkd-node", "device_drivers": ["DEVICEDRIVER_QKD"], "device_config": {"config_rules": [ - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.254.250.102"}}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8080"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.254.250.254"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8082"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"scheme": "http"}}} ]} }, { "device_id": {"device_uuid": {"uuid": "QKD3"}}, "device_type": "qkd-node", "device_drivers": ["DEVICEDRIVER_QKD"], "device_config": {"config_rules": [ - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.254.250.103"}}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8080"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.254.250.254"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8883"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"scheme": "http"}}} ]} } diff --git a/src/tests/qkd_end2end/redeploy-qkd-nodes.sh b/src/tests/qkd_end2end/redeploy-qkd-nodes.sh index b01e0ecb8..21da5198f 100755 --- a/src/tests/qkd_end2end/redeploy-qkd-nodes.sh +++ b/src/tests/qkd_end2end/redeploy-qkd-nodes.sh @@ -22,13 +22,13 @@ docker network rm --force qkd-node-br docker network create --driver bridge --subnet=172.254.250.0/24 --gateway=172.254.250.254 qkd-node-br # Create QKD Nodes -docker run --detach --name qkd-node-01 --network qkd-node-br --ip 172.254.250.101 \ +docker run --detach --name qkd-node-01 --network qkd-node-br --ip 172.254.250.101 --publish 8881:8080 \ --volume "$PWD/src/tests/qkd_end2end/data/qkd-node-01.json:/var/mock_qkd_node/startup.json" \ mock-qkd-node:test -docker run --detach --name qkd-node-02 --network qkd-node-br --ip 172.254.250.102 \ +docker run --detach --name qkd-node-02 --network qkd-node-br --ip 172.254.250.102 --publish 8882:8080 \ --volume "$PWD/src/tests/qkd_end2end/data/qkd-node-02.json:/var/mock_qkd_node/startup.json" \ mock-qkd-node:test -docker run --detach --name qkd-node-03 --network qkd-node-br --ip 172.254.250.103 \ +docker run --detach --name qkd-node-03 --network qkd-node-br --ip 172.254.250.103 --publish 8883:8080 \ --volume "$PWD/src/tests/qkd_end2end/data/qkd-node-03.json:/var/mock_qkd_node/startup.json" \ mock-qkd-node:test -- GitLab From 54bedac9f4596278f1c6c4e4f24745f84535d1e4 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 13 Oct 2025 13:15:38 +0000 Subject: [PATCH 361/367] Test - QKD E2E: - Fixing CI/CD test --- src/tests/qkd_end2end/data/tfs-01-topology.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/tests/qkd_end2end/data/tfs-01-topology.json b/src/tests/qkd_end2end/data/tfs-01-topology.json index 801339b9a..7b282922f 100644 --- a/src/tests/qkd_end2end/data/tfs-01-topology.json +++ b/src/tests/qkd_end2end/data/tfs-01-topology.json @@ -9,7 +9,7 @@ { "device_id": {"device_uuid": {"uuid": "QKD1"}}, "device_type": "qkd-node", "device_drivers": ["DEVICEDRIVER_QKD"], "device_config": {"config_rules": [ - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.254.250.254"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.254.250.101"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8881"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"scheme": "http"}}} ]} @@ -17,7 +17,7 @@ { "device_id": {"device_uuid": {"uuid": "QKD2"}}, "device_type": "qkd-node", "device_drivers": ["DEVICEDRIVER_QKD"], "device_config": {"config_rules": [ - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.254.250.254"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.254.250.102"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8082"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"scheme": "http"}}} ]} @@ -25,7 +25,7 @@ { "device_id": {"device_uuid": {"uuid": "QKD3"}}, "device_type": "qkd-node", "device_drivers": ["DEVICEDRIVER_QKD"], "device_config": {"config_rules": [ - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.254.250.254"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.254.250.103"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8883"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"scheme": "http"}}} ]} -- GitLab From 77cf26a8c0c3607543246c6bc672be159f594e17 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 13 Oct 2025 13:59:13 +0000 Subject: [PATCH 362/367] Test - QKD E2E: - Fixing CI/CD test --- src/tests/qkd_end2end/.gitlab-ci.yml | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/tests/qkd_end2end/.gitlab-ci.yml b/src/tests/qkd_end2end/.gitlab-ci.yml index 959609b9c..0e85b01a2 100644 --- a/src/tests/qkd_end2end/.gitlab-ci.yml +++ b/src/tests/qkd_end2end/.gitlab-ci.yml @@ -104,18 +104,20 @@ end2end_test qkd_end2end: # Deploy scenario with mock QKD Nodes - docker network create --driver bridge --subnet=172.254.250.0/24 --gateway=172.254.250.254 qkd-node-br - > - docker run --detach --name qkd-node-01 --network qkd-node-br --ip 172.254.250.101 + docker run --detach --name qkd-node-01 --network qkd-node-br --ip 172.254.250.101 --publish 8881:8080 --volume "$PWD/src/tests/${TEST_NAME}/data/qkd-node-01.json:/var/mock_qkd_node/startup.json" ${CI_REGISTRY_IMAGE}/mock-qkd-node:test - > - docker run --detach --name qkd-node-02 --network qkd-node-br --ip 172.254.250.102 + docker run --detach --name qkd-node-02 --network qkd-node-br --ip 172.254.250.102 --publish 8882:8080 --volume "$PWD/src/tests/${TEST_NAME}/data/qkd-node-02.json:/var/mock_qkd_node/startup.json" ${CI_REGISTRY_IMAGE}/mock-qkd-node:test - > - docker run --detach --name qkd-node-03 --network qkd-node-br --ip 172.254.250.103 + docker run --detach --name qkd-node-03 --network qkd-node-br --ip 172.254.250.103 --publish 8883:8080 --volume "$PWD/src/tests/${TEST_NAME}/data/qkd-node-03.json:/var/mock_qkd_node/startup.json" ${CI_REGISTRY_IMAGE}/mock-qkd-node:test + - docker ps -a + - echo "Waiting for QKD Nodes to initialize..." - > while ! docker logs qkd-node-01 2>&1 | grep -q "All log messages before absl::InitializeLog() is called are written to STDERR"; do @@ -134,6 +136,7 @@ end2end_test qkd_end2end: done # Dump logs of the QKD Nodes (script, before any configuration) + - docker ps -a - docker logs qkd-node-01 - docker logs qkd-node-02 - docker logs qkd-node-03 -- GitLab From 8faf3244e892e19d9ea4987a5875f1a685c42dad Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 13 Oct 2025 16:42:06 +0000 Subject: [PATCH 363/367] Test - QKD E2E: - Fixing CI/CD test --- src/tests/qkd_end2end/.gitlab-ci.yml | 6 +++--- src/tests/qkd_end2end/data/tfs-01-topology.json | 6 +++--- src/tests/qkd_end2end/redeploy-all.sh | 6 +++--- src/tests/qkd_end2end/redeploy-qkd-nodes.sh | 6 +++--- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/src/tests/qkd_end2end/.gitlab-ci.yml b/src/tests/qkd_end2end/.gitlab-ci.yml index 0e85b01a2..9c59ed220 100644 --- a/src/tests/qkd_end2end/.gitlab-ci.yml +++ b/src/tests/qkd_end2end/.gitlab-ci.yml @@ -104,15 +104,15 @@ end2end_test qkd_end2end: # Deploy scenario with mock QKD Nodes - docker network create --driver bridge --subnet=172.254.250.0/24 --gateway=172.254.250.254 qkd-node-br - > - docker run --detach --name qkd-node-01 --network qkd-node-br --ip 172.254.250.101 --publish 8881:8080 + docker run --detach --name qkd-node-01 --network qkd-node-br --ip 172.254.250.101 --publish 8080 --volume "$PWD/src/tests/${TEST_NAME}/data/qkd-node-01.json:/var/mock_qkd_node/startup.json" ${CI_REGISTRY_IMAGE}/mock-qkd-node:test - > - docker run --detach --name qkd-node-02 --network qkd-node-br --ip 172.254.250.102 --publish 8882:8080 + docker run --detach --name qkd-node-02 --network qkd-node-br --ip 172.254.250.102 --publish 8080 --volume "$PWD/src/tests/${TEST_NAME}/data/qkd-node-02.json:/var/mock_qkd_node/startup.json" ${CI_REGISTRY_IMAGE}/mock-qkd-node:test - > - docker run --detach --name qkd-node-03 --network qkd-node-br --ip 172.254.250.103 --publish 8883:8080 + docker run --detach --name qkd-node-03 --network qkd-node-br --ip 172.254.250.103 --publish 8080 --volume "$PWD/src/tests/${TEST_NAME}/data/qkd-node-03.json:/var/mock_qkd_node/startup.json" ${CI_REGISTRY_IMAGE}/mock-qkd-node:test diff --git a/src/tests/qkd_end2end/data/tfs-01-topology.json b/src/tests/qkd_end2end/data/tfs-01-topology.json index 7b282922f..fe8aa367c 100644 --- a/src/tests/qkd_end2end/data/tfs-01-topology.json +++ b/src/tests/qkd_end2end/data/tfs-01-topology.json @@ -10,7 +10,7 @@ "device_id": {"device_uuid": {"uuid": "QKD1"}}, "device_type": "qkd-node", "device_drivers": ["DEVICEDRIVER_QKD"], "device_config": {"config_rules": [ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.254.250.101"}}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8881"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8080"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"scheme": "http"}}} ]} }, @@ -18,7 +18,7 @@ "device_id": {"device_uuid": {"uuid": "QKD2"}}, "device_type": "qkd-node", "device_drivers": ["DEVICEDRIVER_QKD"], "device_config": {"config_rules": [ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.254.250.102"}}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8082"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8080"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"scheme": "http"}}} ]} }, @@ -26,7 +26,7 @@ "device_id": {"device_uuid": {"uuid": "QKD3"}}, "device_type": "qkd-node", "device_drivers": ["DEVICEDRIVER_QKD"], "device_config": {"config_rules": [ {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.254.250.103"}}, - {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8883"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8080"}}, {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"scheme": "http"}}} ]} } diff --git a/src/tests/qkd_end2end/redeploy-all.sh b/src/tests/qkd_end2end/redeploy-all.sh index b534e2465..95fb27c42 100755 --- a/src/tests/qkd_end2end/redeploy-all.sh +++ b/src/tests/qkd_end2end/redeploy-all.sh @@ -22,13 +22,13 @@ docker network rm --force qkd-node-br docker network create --driver bridge --subnet=172.254.250.0/24 --gateway=172.254.250.254 qkd-node-br # Create QKD Nodes -docker run --detach --name qkd-node-01 --network qkd-node-br --ip 172.254.250.101 \ +docker run --detach --name qkd-node-01 --network qkd-node-br --ip 172.254.250.101 --publish 8080 \ --volume "$PWD/src/tests/qkd_end2end/data/qkd-node-01.json:/var/mock_qkd_node/startup.json" \ mock-qkd-node:test -docker run --detach --name qkd-node-02 --network qkd-node-br --ip 172.254.250.102 \ +docker run --detach --name qkd-node-02 --network qkd-node-br --ip 172.254.250.102 --publish 8080 \ --volume "$PWD/src/tests/qkd_end2end/data/qkd-node-02.json:/var/mock_qkd_node/startup.json" \ mock-qkd-node:test -docker run --detach --name qkd-node-03 --network qkd-node-br --ip 172.254.250.103 \ +docker run --detach --name qkd-node-03 --network qkd-node-br --ip 172.254.250.103 --publish 8080 \ --volume "$PWD/src/tests/qkd_end2end/data/qkd-node-03.json:/var/mock_qkd_node/startup.json" \ mock-qkd-node:test diff --git a/src/tests/qkd_end2end/redeploy-qkd-nodes.sh b/src/tests/qkd_end2end/redeploy-qkd-nodes.sh index 21da5198f..23ac3d7f1 100755 --- a/src/tests/qkd_end2end/redeploy-qkd-nodes.sh +++ b/src/tests/qkd_end2end/redeploy-qkd-nodes.sh @@ -22,13 +22,13 @@ docker network rm --force qkd-node-br docker network create --driver bridge --subnet=172.254.250.0/24 --gateway=172.254.250.254 qkd-node-br # Create QKD Nodes -docker run --detach --name qkd-node-01 --network qkd-node-br --ip 172.254.250.101 --publish 8881:8080 \ +docker run --detach --name qkd-node-01 --network qkd-node-br --ip 172.254.250.101 --publish 8080 \ --volume "$PWD/src/tests/qkd_end2end/data/qkd-node-01.json:/var/mock_qkd_node/startup.json" \ mock-qkd-node:test -docker run --detach --name qkd-node-02 --network qkd-node-br --ip 172.254.250.102 --publish 8882:8080 \ +docker run --detach --name qkd-node-02 --network qkd-node-br --ip 172.254.250.102 --publish 8080 \ --volume "$PWD/src/tests/qkd_end2end/data/qkd-node-02.json:/var/mock_qkd_node/startup.json" \ mock-qkd-node:test -docker run --detach --name qkd-node-03 --network qkd-node-br --ip 172.254.250.103 --publish 8883:8080 \ +docker run --detach --name qkd-node-03 --network qkd-node-br --ip 172.254.250.103 --publish 8080 \ --volume "$PWD/src/tests/qkd_end2end/data/qkd-node-03.json:/var/mock_qkd_node/startup.json" \ mock-qkd-node:test -- GitLab From b4612a6691f3bcf4aed65e6b366d867d864585bf Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 14 Oct 2025 07:49:48 +0000 Subject: [PATCH 364/367] Test - OFC24: - Fixing CI/CD test --- src/tests/.gitlab-ci.yml | 2 +- src/tests/ofc24/.gitlab-ci.yml | 8 ++++---- src/tests/ofc24/deploy-node-agents.sh | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/tests/.gitlab-ci.yml b/src/tests/.gitlab-ci.yml index dfa24abea..196f39566 100644 --- a/src/tests/.gitlab-ci.yml +++ b/src/tests/.gitlab-ci.yml @@ -19,7 +19,7 @@ include: # - local: '/src/tests/ecoc22/.gitlab-ci.yml' # #- local: '/src/tests/nfvsdn22/.gitlab-ci.yml' # #- local: '/src/tests/ofc23/.gitlab-ci.yml' -# - local: '/src/tests/ofc24/.gitlab-ci.yml' + - local: '/src/tests/ofc24/.gitlab-ci.yml' # - local: '/src/tests/eucnc24/.gitlab-ci.yml' # #- local: '/src/tests/ofc25-camara-agg-net-controller/.gitlab-ci.yml' # #- local: '/src/tests/ofc25-camara-e2e-controller/.gitlab-ci.yml' diff --git a/src/tests/ofc24/.gitlab-ci.yml b/src/tests/ofc24/.gitlab-ci.yml index eda880ae7..832ae8305 100644 --- a/src/tests/ofc24/.gitlab-ci.yml +++ b/src/tests/ofc24/.gitlab-ci.yml @@ -123,22 +123,22 @@ end2end_test ofc24: docker network create -d bridge --subnet=172.254.253.0/24 --gateway=172.254.253.254 --ip-range=172.254.253.0/24 na-br - > - docker run -dit --init --name na-t1 --network=na-br --ip 172.254.253.101 + docker run -dit --init --name na-t1 --network=na-br --ip 172.254.253.101 --publish 2022 --volume "$PWD/src/tests/${TEST_NAME}/node-agents-config/startNetconfAgent-tp.sh:/confd/examples.confd/OC23/startNetconfAgent.sh" --volume "$PWD/src/tests/${TEST_NAME}/node-agents-config/platform_t1.xml:/confd/examples.confd/OC23/platform.xml" asgamb1/oc23bgp.img:latest /confd/examples.confd/OC23/startNetconfAgent.sh - > - docker run -dit --init --name na-t2 --network=na-br --ip 172.254.253.102 + docker run -dit --init --name na-t2 --network=na-br --ip 172.254.253.102 --publish 2022 --volume "$PWD/src/tests/${TEST_NAME}/node-agents-config/startNetconfAgent-tp.sh:/confd/examples.confd/OC23/startNetconfAgent.sh" --volume "$PWD/src/tests/${TEST_NAME}/node-agents-config/platform_t2.xml:/confd/examples.confd/OC23/platform.xml" asgamb1/oc23bgp.img:latest /confd/examples.confd/OC23/startNetconfAgent.sh - > - docker run -dit --init --name na-r1 --network=na-br --ip 172.254.253.201 + docker run -dit --init --name na-r1 --network=na-br --ip 172.254.253.201 --publish 2022 --volume "$PWD/src/tests/${TEST_NAME}/node-agents-config/startNetconfAgent-mg-on.sh:/confd/examples.confd/OC23/startNetconfAgent.sh" --volume "$PWD/src/tests/${TEST_NAME}/node-agents-config/platform_r1.xml:/confd/examples.confd/OC23/platform.xml" asgamb1/flexscale-node.img:latest /confd/examples.confd/OC23/startNetconfAgent.sh - > - docker run -dit --init --name na-r2 --network=na-br --ip 172.254.253.202 + docker run -dit --init --name na-r2 --network=na-br --ip 172.254.253.202 --publish 2022 --volume "$PWD/src/tests/${TEST_NAME}/node-agents-config/startNetconfAgent-mg-on.sh:/confd/examples.confd/OC23/startNetconfAgent.sh" --volume "$PWD/src/tests/${TEST_NAME}/node-agents-config/platform_r2.xml:/confd/examples.confd/OC23/platform.xml" asgamb1/flexscale-node.img:latest /confd/examples.confd/OC23/startNetconfAgent.sh diff --git a/src/tests/ofc24/deploy-node-agents.sh b/src/tests/ofc24/deploy-node-agents.sh index 7dec352ac..18b1603a3 100755 --- a/src/tests/ofc24/deploy-node-agents.sh +++ b/src/tests/ofc24/deploy-node-agents.sh @@ -34,19 +34,19 @@ echo echo "Create Management Network and Node Agents:" echo "------------------------------------------" docker network create -d bridge --subnet=172.254.253.0/24 --gateway=172.254.253.254 --ip-range=172.254.253.0/24 na-br -docker run -dit --init --name na-t1 --network=na-br --ip 172.254.253.101 \ +docker run -dit --init --name na-t1 --network=na-br --ip 172.254.253.101 --publish 2022 \ --volume "$PWD/src/tests/${TEST_NAME}/node-agents-config/startNetconfAgent-tp.sh:/confd/examples.confd/OC23/startNetconfAgent.sh" \ --volume "$PWD/src/tests/${TEST_NAME}/node-agents-config/platform_t1.xml:/confd/examples.confd/OC23/platform.xml" \ asgamb1/oc23bgp.img:latest /confd/examples.confd/OC23/startNetconfAgent.sh -docker run -dit --init --name na-t2 --network=na-br --ip 172.254.253.102 \ +docker run -dit --init --name na-t2 --network=na-br --ip 172.254.253.102 --publish 2022 \ --volume "$PWD/src/tests/${TEST_NAME}/node-agents-config/startNetconfAgent-tp.sh:/confd/examples.confd/OC23/startNetconfAgent.sh" \ --volume "$PWD/src/tests/${TEST_NAME}/node-agents-config/platform_t2.xml:/confd/examples.confd/OC23/platform.xml" \ asgamb1/oc23bgp.img:latest /confd/examples.confd/OC23/startNetconfAgent.sh -docker run -dit --init --name na-r1 --network=na-br --ip 172.254.253.201 \ +docker run -dit --init --name na-r1 --network=na-br --ip 172.254.253.201 --publish 2022 \ --volume "$PWD/src/tests/${TEST_NAME}/node-agents-config/startNetconfAgent-mg-on.sh:/confd/examples.confd/OC23/startNetconfAgent.sh" \ --volume "$PWD/src/tests/${TEST_NAME}/node-agents-config/platform_r1.xml:/confd/examples.confd/OC23/platform.xml" \ asgamb1/flexscale-node.img:latest /confd/examples.confd/OC23/startNetconfAgent.sh -docker run -dit --init --name na-r2 --network=na-br --ip 172.254.253.202 \ +docker run -dit --init --name na-r2 --network=na-br --ip 172.254.253.202 --publish 2022 \ --volume "$PWD/src/tests/${TEST_NAME}/node-agents-config/startNetconfAgent-mg-on.sh:/confd/examples.confd/OC23/startNetconfAgent.sh" \ --volume "$PWD/src/tests/${TEST_NAME}/node-agents-config/platform_r2.xml:/confd/examples.confd/OC23/platform.xml" \ asgamb1/flexscale-node.img:latest /confd/examples.confd/OC23/startNetconfAgent.sh -- GitLab From db71d94624293caf2b12fd2103de406aac726cd0 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 14 Oct 2025 08:09:36 +0000 Subject: [PATCH 365/367] Code cleanup --- .gitlab-ci.yml | 74 +++++++++---------- src/tests/.gitlab-ci.yml | 26 +++---- src/tests/qkd_end2end/.gitlab-ci.yml | 2 +- .../qkd_end2end/scripts/run-01-onboarding.sh | 2 +- 4 files changed, 52 insertions(+), 52 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 9bb85b49e..2856f9fed 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -21,43 +21,43 @@ stages: # include the individual .gitlab-ci.yml of each micro-service and tests include: -# #- local: '/manifests/.gitlab-ci.yml' -# - local: '/src/monitoring/.gitlab-ci.yml' -# - local: '/src/nbi/.gitlab-ci.yml' -# - local: '/src/context/.gitlab-ci.yml' -# - local: '/src/device/.gitlab-ci.yml' -# - local: '/src/service/.gitlab-ci.yml' -# - local: '/src/qkd_app/.gitlab-ci.yml' -# - local: '/src/dbscanserving/.gitlab-ci.yml' -# - local: '/src/opticalattackmitigator/.gitlab-ci.yml' -# - local: '/src/opticalattackdetector/.gitlab-ci.yml' -# - local: '/src/opticalattackmanager/.gitlab-ci.yml' -# - local: '/src/opticalcontroller/.gitlab-ci.yml' -# - local: '/src/ztp/.gitlab-ci.yml' -# - local: '/src/policy/.gitlab-ci.yml' -# - local: '/src/automation/.gitlab-ci.yml' -# - local: '/src/forecaster/.gitlab-ci.yml' -# #- local: '/src/webui/.gitlab-ci.yml' -# #- local: '/src/l3_distributedattackdetector/.gitlab-ci.yml' -# #- local: '/src/l3_centralizedattackdetector/.gitlab-ci.yml' -# #- local: '/src/l3_attackmitigator/.gitlab-ci.yml' -# - local: '/src/slice/.gitlab-ci.yml' -# #- local: '/src/interdomain/.gitlab-ci.yml' -# - local: '/src/pathcomp/.gitlab-ci.yml' -# #- local: '/src/dlt/.gitlab-ci.yml' -# - local: '/src/load_generator/.gitlab-ci.yml' -# - local: '/src/bgpls_speaker/.gitlab-ci.yml' -# - local: '/src/kpi_manager/.gitlab-ci.yml' -# - local: '/src/kpi_value_api/.gitlab-ci.yml' -# #- local: '/src/kpi_value_writer/.gitlab-ci.yml' -# #- local: '/src/telemetry/.gitlab-ci.yml' -# - local: '/src/analytics/.gitlab-ci.yml' -# - local: '/src/qos_profile/.gitlab-ci.yml' -# - local: '/src/vnt_manager/.gitlab-ci.yml' -# - local: '/src/e2e_orchestrator/.gitlab-ci.yml' -# - local: '/src/ztp_server/.gitlab-ci.yml' -# - local: '/src/osm_client/.gitlab-ci.yml' -# - local: '/src/simap_connector/.gitlab-ci.yml' + #- local: '/manifests/.gitlab-ci.yml' + - local: '/src/monitoring/.gitlab-ci.yml' + - local: '/src/nbi/.gitlab-ci.yml' + - local: '/src/context/.gitlab-ci.yml' + - local: '/src/device/.gitlab-ci.yml' + - local: '/src/service/.gitlab-ci.yml' + - local: '/src/qkd_app/.gitlab-ci.yml' + - local: '/src/dbscanserving/.gitlab-ci.yml' + - local: '/src/opticalattackmitigator/.gitlab-ci.yml' + - local: '/src/opticalattackdetector/.gitlab-ci.yml' + - local: '/src/opticalattackmanager/.gitlab-ci.yml' + - local: '/src/opticalcontroller/.gitlab-ci.yml' + - local: '/src/ztp/.gitlab-ci.yml' + - local: '/src/policy/.gitlab-ci.yml' + - local: '/src/automation/.gitlab-ci.yml' + - local: '/src/forecaster/.gitlab-ci.yml' + #- local: '/src/webui/.gitlab-ci.yml' + #- local: '/src/l3_distributedattackdetector/.gitlab-ci.yml' + #- local: '/src/l3_centralizedattackdetector/.gitlab-ci.yml' + #- local: '/src/l3_attackmitigator/.gitlab-ci.yml' + - local: '/src/slice/.gitlab-ci.yml' + #- local: '/src/interdomain/.gitlab-ci.yml' + - local: '/src/pathcomp/.gitlab-ci.yml' + #- local: '/src/dlt/.gitlab-ci.yml' + - local: '/src/load_generator/.gitlab-ci.yml' + - local: '/src/bgpls_speaker/.gitlab-ci.yml' + - local: '/src/kpi_manager/.gitlab-ci.yml' + - local: '/src/kpi_value_api/.gitlab-ci.yml' + #- local: '/src/kpi_value_writer/.gitlab-ci.yml' + #- local: '/src/telemetry/.gitlab-ci.yml' + - local: '/src/analytics/.gitlab-ci.yml' + - local: '/src/qos_profile/.gitlab-ci.yml' + - local: '/src/vnt_manager/.gitlab-ci.yml' + - local: '/src/e2e_orchestrator/.gitlab-ci.yml' + - local: '/src/ztp_server/.gitlab-ci.yml' + - local: '/src/osm_client/.gitlab-ci.yml' + - local: '/src/simap_connector/.gitlab-ci.yml' # This should be last one: end-to-end integration tests - local: '/src/tests/.gitlab-ci.yml' diff --git a/src/tests/.gitlab-ci.yml b/src/tests/.gitlab-ci.yml index 196f39566..9b256f1ae 100644 --- a/src/tests/.gitlab-ci.yml +++ b/src/tests/.gitlab-ci.yml @@ -14,20 +14,20 @@ # include the individual .gitlab-ci.yml of each end-to-end integration test include: -# - local: '/src/tests/ofc22/.gitlab-ci.yml' -# #- local: '/src/tests/oeccpsc22/.gitlab-ci.yml' -# - local: '/src/tests/ecoc22/.gitlab-ci.yml' -# #- local: '/src/tests/nfvsdn22/.gitlab-ci.yml' -# #- local: '/src/tests/ofc23/.gitlab-ci.yml' + - local: '/src/tests/ofc22/.gitlab-ci.yml' + #- local: '/src/tests/oeccpsc22/.gitlab-ci.yml' + - local: '/src/tests/ecoc22/.gitlab-ci.yml' + #- local: '/src/tests/nfvsdn22/.gitlab-ci.yml' + #- local: '/src/tests/ofc23/.gitlab-ci.yml' - local: '/src/tests/ofc24/.gitlab-ci.yml' -# - local: '/src/tests/eucnc24/.gitlab-ci.yml' -# #- local: '/src/tests/ofc25-camara-agg-net-controller/.gitlab-ci.yml' -# #- local: '/src/tests/ofc25-camara-e2e-controller/.gitlab-ci.yml' -# #- local: '/src/tests/ofc25/.gitlab-ci.yml' -# #- local: '/src/tests/ryu-openflow/.gitlab-ci.yml' + - local: '/src/tests/eucnc24/.gitlab-ci.yml' + #- local: '/src/tests/ofc25-camara-agg-net-controller/.gitlab-ci.yml' + #- local: '/src/tests/ofc25-camara-e2e-controller/.gitlab-ci.yml' + #- local: '/src/tests/ofc25/.gitlab-ci.yml' + #- local: '/src/tests/ryu-openflow/.gitlab-ci.yml' - local: '/src/tests/qkd_end2end/.gitlab-ci.yml' -# - local: '/src/tests/tools/mock_tfs_nbi_dependencies/.gitlab-ci.yml' + - local: '/src/tests/tools/mock_tfs_nbi_dependencies/.gitlab-ci.yml' - local: '/src/tests/tools/mock_qkd_node/.gitlab-ci.yml' -# - local: '/src/tests/tools/mock_osm_nbi/.gitlab-ci.yml' -# - local: '/src/tests/tools/simap_server/.gitlab-ci.yml' + - local: '/src/tests/tools/mock_osm_nbi/.gitlab-ci.yml' + - local: '/src/tests/tools/simap_server/.gitlab-ci.yml' diff --git a/src/tests/qkd_end2end/.gitlab-ci.yml b/src/tests/qkd_end2end/.gitlab-ci.yml index 9c59ed220..3dedd48f1 100644 --- a/src/tests/qkd_end2end/.gitlab-ci.yml +++ b/src/tests/qkd_end2end/.gitlab-ci.yml @@ -171,7 +171,7 @@ end2end_test qkd_end2end: # Configure TeraFlowSDN deployment # Uncomment if DEBUG log level is needed for the components #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/contextservice.yaml - - yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/deviceservice.yaml + #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/deviceservice.yaml #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="frontend").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/pathcompservice.yaml #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/serviceservice.yaml #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/nbiservice.yaml diff --git a/src/tests/qkd_end2end/scripts/run-01-onboarding.sh b/src/tests/qkd_end2end/scripts/run-01-onboarding.sh index 8138c7ed3..df9186204 100755 --- a/src/tests/qkd_end2end/scripts/run-01-onboarding.sh +++ b/src/tests/qkd_end2end/scripts/run-01-onboarding.sh @@ -15,6 +15,6 @@ source /var/teraflow/tfs_runtime_env_vars.sh export PYTHONPATH=/var/teraflow -pytest --verbose --log-level=DEBUG -o log_cli=true --verbose \ +pytest --verbose --log-level=INFO \ --junitxml=/opt/results/report_onboarding.xml \ /var/teraflow/tests/qkd_end2end/tests/test_01_onboarding.py -- GitLab From b1833cee99bdcf2a64a30e3926e7a5e8e3c7bab5 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 14 Oct 2025 08:28:04 +0000 Subject: [PATCH 366/367] OSM Client component: - Disable CI/CD unit test as it needs to be fixed --- src/osm_client/.gitlab-ci.yml | 154 +++++++++++++++++----------------- 1 file changed, 77 insertions(+), 77 deletions(-) diff --git a/src/osm_client/.gitlab-ci.yml b/src/osm_client/.gitlab-ci.yml index 2a0cfbb7e..6c5ffd318 100644 --- a/src/osm_client/.gitlab-ci.yml +++ b/src/osm_client/.gitlab-ci.yml @@ -43,80 +43,80 @@ build osm_client: - src/tests/.gitlab-ci.yml - .gitlab-ci.yml -# Apply unit test to the component -unit_test osm_client: - variables: - IMAGE_NAME: 'osm_client' # name of the microservice - MOCK_IMAGE_NAME: 'mock_osm_nbi' - IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) - stage: unit_test - needs: - - build osm_client - - build mock_osm_nbi - before_script: - # Do Docker cleanup - - docker ps --all --quiet | xargs --no-run-if-empty docker stop - - docker container prune --force - - docker ps --all --quiet | xargs --no-run-if-empty docker rm --force - - docker image prune --force - - docker network prune --force - - docker volume prune --all --force - - docker buildx prune --force - - # Login Docker repository - - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY - script: - - docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG" - - docker pull "$CI_REGISTRY_IMAGE/mock-osm-nbi:test" - - docker network create -d bridge teraflowbridge - - > - docker run --name mock_osm_nbi -d - --network=teraflowbridge - --env LOG_LEVEL=DEBUG - --env FLASK_ENV=development - $CI_REGISTRY_IMAGE/mock-osm-nbi:test - - > - docker run --name $IMAGE_NAME -d -v "$PWD/src/$IMAGE_NAME/tests:/opt/results" - --network=teraflowbridge - --env LOG_LEVEL=DEBUG - --env FLASK_ENV=development - --env OSM_ADDRESS=mock_osm_nbi - $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG - - while ! docker logs $IMAGE_NAME 2>&1 | grep -q 'Running...'; do sleep 1; done - - docker ps -a - - docker logs $IMAGE_NAME - - docker logs mock_osm_nbi - - docker exec -i $IMAGE_NAME bash -c "coverage run -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_unitary.py --junitxml=/opt/results/${IMAGE_NAME}_report_unitary.xml" - - docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing" - coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/' - after_script: - - docker logs $IMAGE_NAME - - docker logs mock_osm_nbi - - # Do Docker cleanup - - docker ps --all --quiet | xargs --no-run-if-empty docker stop - - docker container prune --force - - docker ps --all --quiet | xargs --no-run-if-empty docker rm --force - - docker image prune --force - - docker network prune --force - - docker volume prune --all --force - - docker buildx prune --force - - rules: - - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' - - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' - - changes: - - src/common/**/*.py - - proto/*.proto - - src/$IMAGE_NAME/**/*.{py,in,yml} - - src/$IMAGE_NAME/Dockerfile - - src/$IMAGE_NAME/tests/*.py - - manifests/${IMAGE_NAME}service.yaml - - src/tests/tools/mock_osm_nbi/**/*.{py,in,yml,yaml,yang,sh,json} - - src/tests/tools/mock_osm_nbi/Dockerfile - - src/tests/.gitlab-ci.yml - - .gitlab-ci.yml - artifacts: - when: always - reports: - junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report_*.xml +## Apply unit test to the component +#unit_test osm_client: +# variables: +# IMAGE_NAME: 'osm_client' # name of the microservice +# MOCK_IMAGE_NAME: 'mock_osm_nbi' +# IMAGE_TAG: 'latest' # tag of the container image (production, development, etc) +# stage: unit_test +# needs: +# - build osm_client +# - build mock_osm_nbi +# before_script: +# # Do Docker cleanup +# - docker ps --all --quiet | xargs --no-run-if-empty docker stop +# - docker container prune --force +# - docker ps --all --quiet | xargs --no-run-if-empty docker rm --force +# - docker image prune --force +# - docker network prune --force +# - docker volume prune --all --force +# - docker buildx prune --force +# +# # Login Docker repository +# - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY +# script: +# - docker pull "$CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG" +# - docker pull "$CI_REGISTRY_IMAGE/mock-osm-nbi:test" +# - docker network create -d bridge teraflowbridge +# - > +# docker run --name mock_osm_nbi -d +# --network=teraflowbridge +# --env LOG_LEVEL=DEBUG +# --env FLASK_ENV=development +# $CI_REGISTRY_IMAGE/mock-osm-nbi:test +# - > +# docker run --name $IMAGE_NAME -d -v "$PWD/src/$IMAGE_NAME/tests:/opt/results" +# --network=teraflowbridge +# --env LOG_LEVEL=DEBUG +# --env FLASK_ENV=development +# --env OSM_ADDRESS=mock_osm_nbi +# $CI_REGISTRY_IMAGE/$IMAGE_NAME:$IMAGE_TAG +# - while ! docker logs $IMAGE_NAME 2>&1 | grep -q 'Running...'; do sleep 1; done +# - docker ps -a +# - docker logs $IMAGE_NAME +# - docker logs mock_osm_nbi +# - docker exec -i $IMAGE_NAME bash -c "coverage run -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_unitary.py --junitxml=/opt/results/${IMAGE_NAME}_report_unitary.xml" +# - docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing" +# coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/' +# after_script: +# - docker logs $IMAGE_NAME +# - docker logs mock_osm_nbi +# +# # Do Docker cleanup +# - docker ps --all --quiet | xargs --no-run-if-empty docker stop +# - docker container prune --force +# - docker ps --all --quiet | xargs --no-run-if-empty docker rm --force +# - docker image prune --force +# - docker network prune --force +# - docker volume prune --all --force +# - docker buildx prune --force +# +# rules: +# - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' +# - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' +# - changes: +# - src/common/**/*.py +# - proto/*.proto +# - src/$IMAGE_NAME/**/*.{py,in,yml} +# - src/$IMAGE_NAME/Dockerfile +# - src/$IMAGE_NAME/tests/*.py +# - manifests/${IMAGE_NAME}service.yaml +# - src/tests/tools/mock_osm_nbi/**/*.{py,in,yml,yaml,yang,sh,json} +# - src/tests/tools/mock_osm_nbi/Dockerfile +# - src/tests/.gitlab-ci.yml +# - .gitlab-ci.yml +# artifacts: +# when: always +# reports: +# junit: src/$IMAGE_NAME/tests/${IMAGE_NAME}_report_*.xml -- GitLab From 3f639a675370cb83e41c02d43b7a4dc3948eb490 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Tue, 14 Oct 2025 14:04:52 +0000 Subject: [PATCH 367/367] CI/CD E2E tests: - Skip helm3 uninstall if release does not exist --- src/tests/ecoc22/.gitlab-ci.yml | 6 +++++- src/tests/eucnc24/.gitlab-ci.yml | 6 +++++- src/tests/ofc22/.gitlab-ci.yml | 6 +++++- src/tests/ofc24/.gitlab-ci.yml | 6 +++++- src/tests/qkd_end2end/.gitlab-ci.yml | 6 +++++- 5 files changed, 25 insertions(+), 5 deletions(-) diff --git a/src/tests/ecoc22/.gitlab-ci.yml b/src/tests/ecoc22/.gitlab-ci.yml index c57f7002b..4f93bec06 100644 --- a/src/tests/ecoc22/.gitlab-ci.yml +++ b/src/tests/ecoc22/.gitlab-ci.yml @@ -80,7 +80,11 @@ end2end_test ecoc22: - > for ns in ${OLD_NATS_NAMESPACES}; do if [[ "$ns" == nats* ]]; then - helm3 uninstall "$ns" -n "$ns" + if helm3 status "$ns" &>/dev/null; then + helm3 uninstall "$ns" -n "$ns" + else + echo "Release '$ns' not found, skipping..." + fi fi done - export OLD_NAMESPACES=$(echo "${K8S_NAMESPACES}" | tr ' ' '\n' | grep -E '^(tfs|crdb|qdb|kafka|nats)') diff --git a/src/tests/eucnc24/.gitlab-ci.yml b/src/tests/eucnc24/.gitlab-ci.yml index f90007ceb..ee99ea271 100644 --- a/src/tests/eucnc24/.gitlab-ci.yml +++ b/src/tests/eucnc24/.gitlab-ci.yml @@ -83,7 +83,11 @@ end2end_test eucnc24: - > for ns in ${OLD_NATS_NAMESPACES}; do if [[ "$ns" == nats* ]]; then - helm3 uninstall "$ns" -n "$ns" + if helm3 status "$ns" &>/dev/null; then + helm3 uninstall "$ns" -n "$ns" + else + echo "Release '$ns' not found, skipping..." + fi fi done - export OLD_NAMESPACES=$(echo "${K8S_NAMESPACES}" | tr ' ' '\n' | grep -E '^(tfs|crdb|qdb|kafka|nats)') diff --git a/src/tests/ofc22/.gitlab-ci.yml b/src/tests/ofc22/.gitlab-ci.yml index 6b72f38dc..6c3b65f4a 100644 --- a/src/tests/ofc22/.gitlab-ci.yml +++ b/src/tests/ofc22/.gitlab-ci.yml @@ -80,7 +80,11 @@ end2end_test ofc22: - > for ns in ${OLD_NATS_NAMESPACES}; do if [[ "$ns" == nats* ]]; then - helm3 uninstall "$ns" -n "$ns" + if helm3 status "$ns" &>/dev/null; then + helm3 uninstall "$ns" -n "$ns" + else + echo "Release '$ns' not found, skipping..." + fi fi done - export OLD_NAMESPACES=$(echo "${K8S_NAMESPACES}" | tr ' ' '\n' | grep -E '^(tfs|crdb|qdb|kafka|nats)') diff --git a/src/tests/ofc24/.gitlab-ci.yml b/src/tests/ofc24/.gitlab-ci.yml index 832ae8305..e0453200f 100644 --- a/src/tests/ofc24/.gitlab-ci.yml +++ b/src/tests/ofc24/.gitlab-ci.yml @@ -80,7 +80,11 @@ end2end_test ofc24: - > for ns in ${OLD_NATS_NAMESPACES}; do if [[ "$ns" == nats* ]]; then - helm3 uninstall "$ns" -n "$ns" + if helm3 status "$ns" &>/dev/null; then + helm3 uninstall "$ns" -n "$ns" + else + echo "Release '$ns' not found, skipping..." + fi fi done - export OLD_NAMESPACES=$(echo "${K8S_NAMESPACES}" | tr ' ' '\n' | grep -E '^(tfs|crdb|qdb|kafka|nats)') diff --git a/src/tests/qkd_end2end/.gitlab-ci.yml b/src/tests/qkd_end2end/.gitlab-ci.yml index 3dedd48f1..ca9c0018f 100644 --- a/src/tests/qkd_end2end/.gitlab-ci.yml +++ b/src/tests/qkd_end2end/.gitlab-ci.yml @@ -80,7 +80,11 @@ end2end_test qkd_end2end: - > for ns in ${OLD_NATS_NAMESPACES}; do if [[ "$ns" == nats* ]]; then - helm3 uninstall "$ns" -n "$ns" + if helm3 status "$ns" &>/dev/null; then + helm3 uninstall "$ns" -n "$ns" + else + echo "Release '$ns' not found, skipping..." + fi fi done - export OLD_NAMESPACES=$(echo "${K8S_NAMESPACES}" | tr ' ' '\n' | grep -E '^(tfs|crdb|qdb|kafka|nats)') -- GitLab