From b596c12e4bbd957087733adf01001e0728168bbb Mon Sep 17 00:00:00 2001 From: PabloArmingolRobles Date: Thu, 19 May 2022 13:33:48 +0200 Subject: [PATCH 01/91] Commit per delete rule --- .../drivers/openconfig/OpenConfigDriver.py | 25 +++++++++++++------ .../tests/Device_OpenConfig_Template.py | 1 + 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/src/device/service/drivers/openconfig/OpenConfigDriver.py b/src/device/service/drivers/openconfig/OpenConfigDriver.py index 4965ced4e..8044ed29c 100644 --- a/src/device/service/drivers/openconfig/OpenConfigDriver.py +++ b/src/device/service/drivers/openconfig/OpenConfigDriver.py @@ -67,6 +67,7 @@ class NetconfSessionHandler: self.__look_for_keys = settings.get('look_for_keys', True) self.__allow_agent = settings.get('allow_agent', True) self.__force_running = settings.get('force_running', False) + self.__commit_per_delete = settings.get('delete_rule', False) self.__device_params = settings.get('device_params', {}) self.__manager_params = settings.get('manager_params', {}) self.__nc_params = settings.get('nc_params', {}) @@ -91,6 +92,9 @@ class NetconfSessionHandler: @property def use_candidate(self): return self.__candidate_supported and not self.__force_running + @property + def commit_per_delete_rule(self): return self.__commit_per_delete + @RETRY_DECORATOR def get(self, filter=None, with_defaults=None): # pylint: disable=redefined-builtin with self.__lock: @@ -182,8 +186,8 @@ def do_sampling(samples_cache : SamplesCache, resource_key : str, out_samples : LOGGER.exception('Error retrieving samples') def edit_config( - netconf_handler : NetconfSessionHandler, resources : List[Tuple[str, Any]], delete=False, target='running', - default_operation='merge', test_option=None, error_option=None, format='xml' # pylint: disable=redefined-builtin + netconf_handler : NetconfSessionHandler, resources : List[Tuple[str, Any]], delete=False, commit_per_rule= False, + target='running', default_operation='merge', test_option=None, error_option=None, format='xml' # pylint: disable=redefined-builtin ): str_method = 'DeleteConfig' if delete else 'SetConfig' LOGGER.info('[{:s}] resources = {:s}'.format(str_method, str(resources))) @@ -203,6 +207,8 @@ def edit_config( netconf_handler.edit_config( config=str_config_message, target=target, default_operation=default_operation, test_option=test_option, error_option=error_option, format=format) + if commit_per_rule: + netconf_handler.commit() results[i] = True except Exception as e: # pylint: disable=broad-except str_operation = 'preparing' if target == 'candidate' else ('deleting' if delete else 'setting') @@ -295,12 +301,15 @@ class OpenConfigDriver(_Driver): with self.__lock: if self.__netconf_handler.use_candidate: with self.__netconf_handler.locked(target='candidate'): - results = edit_config(self.__netconf_handler, resources, target='candidate', delete=True) - try: - self.__netconf_handler.commit() - except Exception as e: # pylint: disable=broad-except - LOGGER.exception('[DeleteConfig] Exception commiting resources: {:s}'.format(str(resources))) - results = [e for _ in resources] # if commit fails, set exception in each resource + if self.__netconf_handler.commit_per_delete_rule: + results = edit_config(self.__netconf_handler, resources, target='candidate', delete=True, commit_per_rule= True) + else: + results = edit_config(self.__netconf_handler, resources, target='candidate', delete=True) + try: + self.__netconf_handler.commit() + except Exception as e: # pylint: disable=broad-except + LOGGER.exception('[DeleteConfig] Exception commiting resources: {:s}'.format(str(resources))) + results = [e for _ in resources] # if commit fails, set exception in each resource else: results = edit_config(self.__netconf_handler, resources, delete=True) return results diff --git a/src/device/tests/Device_OpenConfig_Template.py b/src/device/tests/Device_OpenConfig_Template.py index d95e86dfa..2e38661f5 100644 --- a/src/device/tests/Device_OpenConfig_Template.py +++ b/src/device/tests/Device_OpenConfig_Template.py @@ -32,6 +32,7 @@ DEVICE_OC_CONNECT_RULES = json_device_connect_rules(DEVICE_OC_ADDRESS, DEVICE_OC 'hostkey_verify' : True, 'look_for_keys' : True, 'allow_agent' : True, + 'delete_rule' : False, 'device_params' : {'name': 'default'}, 'manager_params' : {'timeout' : DEVICE_OC_TIMEOUT}, }) -- GitLab From a606f3ea69b67926fef6fbfd852b0896fd9d4135 Mon Sep 17 00:00:00 2001 From: PabloArmingolRobles Date: Thu, 19 May 2022 13:39:15 +0200 Subject: [PATCH 02/91] Commit per delete rule --- src/device/service/drivers/openconfig/OpenConfigDriver.py | 1 - src/device/tests/Device_OpenConfig_Template.py | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/src/device/service/drivers/openconfig/OpenConfigDriver.py b/src/device/service/drivers/openconfig/OpenConfigDriver.py index a8932fbcf..8461e83e4 100644 --- a/src/device/service/drivers/openconfig/OpenConfigDriver.py +++ b/src/device/service/drivers/openconfig/OpenConfigDriver.py @@ -30,7 +30,6 @@ from device.service.driver_api.AnyTreeTools import TreeNode, get_subnode, set_su from .templates import ALL_RESOURCE_KEYS, EMPTY_CONFIG, compose_config, get_filter, parse from .RetryDecorator import retry - DEBUG_MODE = False logging.getLogger('ncclient.manager').setLevel(logging.DEBUG if DEBUG_MODE else logging.WARNING) logging.getLogger('ncclient.transport.ssh').setLevel(logging.DEBUG if DEBUG_MODE else logging.WARNING) diff --git a/src/device/tests/Device_OpenConfig_Template.py b/src/device/tests/Device_OpenConfig_Template.py index 293874095..af339cce4 100644 --- a/src/device/tests/Device_OpenConfig_Template.py +++ b/src/device/tests/Device_OpenConfig_Template.py @@ -37,5 +37,6 @@ DEVICE_OC_CONNECT_RULES = json_device_connect_rules(DEVICE_OC_ADDRESS, DEVICE_OC 'manager_params' : {'timeout' : DEVICE_OC_TIMEOUT}, }) + DEVICE_OC_CONFIG_RULES = [] # populate your configuration rules to test DEVICE_OC_DECONFIG_RULES = [] # populate your deconfiguration rules to test -- GitLab From 09881403f5ba810f196569fa06aae2f360346744 Mon Sep 17 00:00:00 2001 From: PabloArmingolRobles Date: Thu, 19 May 2022 13:42:15 +0200 Subject: [PATCH 03/91] Changes in templates --- .../templates/acl/acl-set/acl-entry/edit_config.xml | 2 ++ .../templates/acl/interfaces/egress/edit_config.xml | 8 ++++++-- .../templates/acl/interfaces/ingress/edit_config.xml | 8 ++++++-- 3 files changed, 14 insertions(+), 4 deletions(-) diff --git a/src/device/service/drivers/openconfig/templates/acl/acl-set/acl-entry/edit_config.xml b/src/device/service/drivers/openconfig/templates/acl/acl-set/acl-entry/edit_config.xml index fac259b6f..297563cca 100644 --- a/src/device/service/drivers/openconfig/templates/acl/acl-set/acl-entry/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/acl/acl-set/acl-entry/edit_config.xml @@ -13,6 +13,7 @@ {{sequence_id}} + {% if config is defined %} {% if source_address is defined %}{{source_address}}{% endif%} @@ -35,6 +36,7 @@ {% if log_action is defined %}{{log_action}}{% endif%} + {% endif%} diff --git a/src/device/service/drivers/openconfig/templates/acl/interfaces/egress/edit_config.xml b/src/device/service/drivers/openconfig/templates/acl/interfaces/egress/edit_config.xml index d987b0cc4..b070b305a 100644 --- a/src/device/service/drivers/openconfig/templates/acl/interfaces/egress/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/acl/interfaces/egress/edit_config.xml @@ -1,18 +1,21 @@ - + {{id}} {{id}} + {% if interface is defined %} {{interface}} {% if subinterface is defined %}{{subinterface}}{% endif%} + {% endif%} + {% if set_name_egress is defined %} - + > {{set_name_egress}} {{type_egress}} @@ -21,6 +24,7 @@ + {% endif%} diff --git a/src/device/service/drivers/openconfig/templates/acl/interfaces/ingress/edit_config.xml b/src/device/service/drivers/openconfig/templates/acl/interfaces/ingress/edit_config.xml index 144a03c55..d1f18efb2 100644 --- a/src/device/service/drivers/openconfig/templates/acl/interfaces/ingress/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/acl/interfaces/ingress/edit_config.xml @@ -1,18 +1,21 @@ - + {{id}} {{id}} + {% if interface is defined %} {{interface}} {% if subinterface is defined %}{{subinterface}}{% endif%} + {% endif%} + {% if set_name_ingress is defined %} - + {{set_name_ingress}} {{type_ingress}} @@ -21,6 +24,7 @@ + {% endif%} -- GitLab From d1712b4a788a1b02a353e7a2e6bd91a0db1618d7 Mon Sep 17 00:00:00 2001 From: Lluis Gifre Date: Thu, 23 Jun 2022 10:59:43 +0200 Subject: [PATCH 04/91] Initial implementations of ECOC'22 Demo - not functiona, just skeleton to generate the demo paper. - logic for disjoint paths to be implemented --- ecoc22 | 1 + src/common/DeviceTypes.py | 2 + src/common/tools/object_factory/Device.py | 9 + .../nbi_plugins/ietf_l2vpn/Constants.py | 65 ++-- .../nbi_plugins/ietf_l2vpn/L2VPN_Service.py | 3 + .../ietf_l2vpn/L2VPN_SiteNetworkAccesses.py | 93 ++++-- .../tests/mock_osm/WimconnectorIETFL2VPN.py | 41 ++- src/tests/ecoc22/.gitignore | 2 + src/tests/ecoc22/README.md | 93 ++++++ src/tests/ecoc22/__init__.py | 14 + src/tests/ecoc22/deploy_in_kubernetes.sh | 27 ++ src/tests/ecoc22/dump_logs.sh | 24 ++ src/tests/ecoc22/expose_services.yaml | 112 +++++++ src/tests/ecoc22/run_test_01_bootstrap.sh | 51 +++ .../ecoc22/run_test_02_create_service.sh | 41 +++ .../ecoc22/run_test_03_delete_service.sh | 41 +++ src/tests/ecoc22/run_test_04_cleanup.sh | 41 +++ src/tests/ecoc22/show_deploy.sh | 18 ++ src/tests/ecoc22/show_logs_compute.sh | 17 + src/tests/ecoc22/show_logs_context.sh | 17 + src/tests/ecoc22/show_logs_device.sh | 17 + src/tests/ecoc22/show_logs_service.sh | 17 + src/tests/ecoc22/show_logs_slice.sh | 17 + src/tests/ecoc22/show_logs_webui.sh | 17 + src/tests/ecoc22/tests/.gitignore | 1 + src/tests/ecoc22/tests/Credentials.py | 0 src/tests/ecoc22/tests/Objects.py | 303 ++++++++++++++++++ src/tests/ecoc22/tests/Tools.py | 36 +++ src/tests/ecoc22/tests/__init__.py | 14 + .../ecoc22/tests/test_functional_bootstrap.py | 146 +++++++++ .../ecoc22/tests/test_functional_cleanup.py | 123 +++++++ .../tests/test_functional_create_service.py | 89 +++++ .../tests/test_functional_delete_service.py | 134 ++++++++ src/tests/oeccpsc22/tests/Tools.py | 2 +- src/tests/ofc22/README.md | 2 +- src/webui/service/static/topology.js | 45 ++- .../topology_icons/Acknowledgements.txt | 3 + .../static/topology_icons/datacenter.png | Bin 0 -> 9417 bytes .../static/topology_icons/emu-datacenter.png | Bin 0 -> 7690 bytes 39 files changed, 1615 insertions(+), 63 deletions(-) create mode 120000 ecoc22 create mode 100644 src/tests/ecoc22/.gitignore create mode 100644 src/tests/ecoc22/README.md create mode 100644 src/tests/ecoc22/__init__.py create mode 100755 src/tests/ecoc22/deploy_in_kubernetes.sh create mode 100755 src/tests/ecoc22/dump_logs.sh create mode 100644 src/tests/ecoc22/expose_services.yaml create mode 100755 src/tests/ecoc22/run_test_01_bootstrap.sh create mode 100755 src/tests/ecoc22/run_test_02_create_service.sh create mode 100755 src/tests/ecoc22/run_test_03_delete_service.sh create mode 100755 src/tests/ecoc22/run_test_04_cleanup.sh create mode 100755 src/tests/ecoc22/show_deploy.sh create mode 100755 src/tests/ecoc22/show_logs_compute.sh create mode 100755 src/tests/ecoc22/show_logs_context.sh create mode 100755 src/tests/ecoc22/show_logs_device.sh create mode 100755 src/tests/ecoc22/show_logs_service.sh create mode 100755 src/tests/ecoc22/show_logs_slice.sh create mode 100755 src/tests/ecoc22/show_logs_webui.sh create mode 100644 src/tests/ecoc22/tests/.gitignore create mode 100644 src/tests/ecoc22/tests/Credentials.py create mode 100644 src/tests/ecoc22/tests/Objects.py create mode 100644 src/tests/ecoc22/tests/Tools.py create mode 100644 src/tests/ecoc22/tests/__init__.py create mode 100644 src/tests/ecoc22/tests/test_functional_bootstrap.py create mode 100644 src/tests/ecoc22/tests/test_functional_cleanup.py create mode 100644 src/tests/ecoc22/tests/test_functional_create_service.py create mode 100644 src/tests/ecoc22/tests/test_functional_delete_service.py create mode 100644 src/webui/service/static/topology_icons/datacenter.png create mode 100644 src/webui/service/static/topology_icons/emu-datacenter.png diff --git a/ecoc22 b/ecoc22 new file mode 120000 index 000000000..3c61895e5 --- /dev/null +++ b/ecoc22 @@ -0,0 +1 @@ +src/tests/ecoc22/ \ No newline at end of file diff --git a/src/common/DeviceTypes.py b/src/common/DeviceTypes.py index 44f8e3981..432f8d19c 100644 --- a/src/common/DeviceTypes.py +++ b/src/common/DeviceTypes.py @@ -15,8 +15,10 @@ from enum import Enum class DeviceTypeEnum(Enum): + EMULATED_DATACENTER = 'emu-datacenter' EMULATED_OPTICAL_LINE_SYSTEM = 'emu-optical-line-system' EMULATED_PACKET_ROUTER = 'emu-packet-router' + DATACENTER = 'datacenter' OPTICAL_ROADM = 'optical-roadm' OPTICAL_TRANDPONDER = 'optical-trandponder' OPTICAL_LINE_SYSTEM = 'optical-line-system' diff --git a/src/common/tools/object_factory/Device.py b/src/common/tools/object_factory/Device.py index ae065e9c0..3a016f51b 100644 --- a/src/common/tools/object_factory/Device.py +++ b/src/common/tools/object_factory/Device.py @@ -20,6 +20,7 @@ from context.proto.context_pb2 import DeviceDriverEnum, DeviceOperationalStatusE DEVICE_DISABLED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED +DEVICE_EMUDC_TYPE = DeviceTypeEnum.EMULATED_DATACENTER.value DEVICE_EMUPR_TYPE = DeviceTypeEnum.EMULATED_PACKET_ROUTER.value DEVICE_EMUOLS_TYPE = DeviceTypeEnum.EMULATED_OPTICAL_LINE_SYSTEM.value DEVICE_EMU_DRIVERS = [DeviceDriverEnum.DEVICEDRIVER_UNDEFINED] @@ -67,6 +68,14 @@ def json_device_emulated_tapi_disabled( device_uuid, DEVICE_EMUOLS_TYPE, DEVICE_DISABLED, endpoints=endpoints, config_rules=config_rules, drivers=drivers) +def json_device_emulated_datacenter_disabled( + device_uuid : str, endpoints : List[Dict] = [], config_rules : List[Dict] = [], + drivers : List[Dict] = DEVICE_EMU_DRIVERS + ): + return json_device( + device_uuid, DEVICE_EMUDC_TYPE, DEVICE_DISABLED, endpoints=endpoints, config_rules=config_rules, + drivers=drivers) + def json_device_packetrouter_disabled( device_uuid : str, endpoints : List[Dict] = [], config_rules : List[Dict] = [], drivers : List[Dict] = DEVICE_PR_DRIVERS diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/Constants.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/Constants.py index b7f377254..7c0ef0183 100644 --- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/Constants.py +++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/Constants.py @@ -21,34 +21,43 @@ DEFAULT_BGP_ROUTE_TARGET = '{:d}:{:d}'.format(DEFAULT_BGP_AS, 333) # device_uuid:endpoint_uuid => ( # device_uuid, endpoint_uuid, router_id, route_distinguisher, sub_if_index, address_ip, address_prefix) BEARER_MAPPINGS = { - 'R1-INF:13/2/1': ('R1-INF', '13/2/1', '10.10.10.1', '65000:100', 400, '3.3.2.1', 24), - 'R2-EMU:13/2/1': ('R2-EMU', '13/2/1', '12.12.12.1', '65000:120', 450, '3.4.2.1', 24), - 'R3-INF:13/2/1': ('R3-INF', '13/2/1', '20.20.20.1', '65000:200', 500, '3.3.1.1', 24), - 'R4-EMU:13/2/1': ('R4-EMU', '13/2/1', '22.22.22.1', '65000:220', 550, '3.4.1.1', 24), + # OFC'22 + #'R1-INF:13/2/1': ('R1-INF', '13/2/1', '10.10.10.1', '65000:100', 400, '3.3.2.1', 24), + #'R2-EMU:13/2/1': ('R2-EMU', '13/2/1', '12.12.12.1', '65000:120', 450, '3.4.2.1', 24), + #'R3-INF:13/2/1': ('R3-INF', '13/2/1', '20.20.20.1', '65000:200', 500, '3.3.1.1', 24), + #'R4-EMU:13/2/1': ('R4-EMU', '13/2/1', '22.22.22.1', '65000:220', 550, '3.4.1.1', 24), - 'R1@D1:3/1': ('R1@D1', '3/1', '10.0.1.1', '65001:101', 100, '1.1.3.1', 24), - 'R1@D1:3/2': ('R1@D1', '3/2', '10.0.1.1', '65001:101', 100, '1.1.3.2', 24), - 'R1@D1:3/3': ('R1@D1', '3/3', '10.0.1.1', '65001:101', 100, '1.1.3.3', 24), - 'R2@D1:3/1': ('R2@D1', '3/1', '10.0.1.2', '65001:102', 100, '1.2.3.1', 24), - 'R2@D1:3/2': ('R2@D1', '3/2', '10.0.1.2', '65001:102', 100, '1.2.3.2', 24), - 'R2@D1:3/3': ('R2@D1', '3/3', '10.0.1.2', '65001:102', 100, '1.2.3.3', 24), - 'R3@D1:3/1': ('R3@D1', '3/1', '10.0.1.3', '65001:103', 100, '1.3.3.1', 24), - 'R3@D1:3/2': ('R3@D1', '3/2', '10.0.1.3', '65001:103', 100, '1.3.3.2', 24), - 'R3@D1:3/3': ('R3@D1', '3/3', '10.0.1.3', '65001:103', 100, '1.3.3.3', 24), - 'R4@D1:3/1': ('R4@D1', '3/1', '10.0.1.4', '65001:104', 100, '1.4.3.1', 24), - 'R4@D1:3/2': ('R4@D1', '3/2', '10.0.1.4', '65001:104', 100, '1.4.3.2', 24), - 'R4@D1:3/3': ('R4@D1', '3/3', '10.0.1.4', '65001:104', 100, '1.4.3.3', 24), + # OECC/PSC'22 - domain 1 + #'R1@D1:3/1': ('R1@D1', '3/1', '10.0.1.1', '65001:101', 100, '1.1.3.1', 24), + #'R1@D1:3/2': ('R1@D1', '3/2', '10.0.1.1', '65001:101', 100, '1.1.3.2', 24), + #'R1@D1:3/3': ('R1@D1', '3/3', '10.0.1.1', '65001:101', 100, '1.1.3.3', 24), + #'R2@D1:3/1': ('R2@D1', '3/1', '10.0.1.2', '65001:102', 100, '1.2.3.1', 24), + #'R2@D1:3/2': ('R2@D1', '3/2', '10.0.1.2', '65001:102', 100, '1.2.3.2', 24), + #'R2@D1:3/3': ('R2@D1', '3/3', '10.0.1.2', '65001:102', 100, '1.2.3.3', 24), + #'R3@D1:3/1': ('R3@D1', '3/1', '10.0.1.3', '65001:103', 100, '1.3.3.1', 24), + #'R3@D1:3/2': ('R3@D1', '3/2', '10.0.1.3', '65001:103', 100, '1.3.3.2', 24), + #'R3@D1:3/3': ('R3@D1', '3/3', '10.0.1.3', '65001:103', 100, '1.3.3.3', 24), + #'R4@D1:3/1': ('R4@D1', '3/1', '10.0.1.4', '65001:104', 100, '1.4.3.1', 24), + #'R4@D1:3/2': ('R4@D1', '3/2', '10.0.1.4', '65001:104', 100, '1.4.3.2', 24), + #'R4@D1:3/3': ('R4@D1', '3/3', '10.0.1.4', '65001:104', 100, '1.4.3.3', 24), - 'R1@D2:3/1': ('R1@D2', '3/1', '10.0.2.1', '65002:101', 100, '2.1.3.1', 24), - 'R1@D2:3/2': ('R1@D2', '3/2', '10.0.2.1', '65002:101', 100, '2.1.3.2', 24), - 'R1@D2:3/3': ('R1@D2', '3/3', '10.0.2.1', '65002:101', 100, '2.1.3.3', 24), - 'R2@D2:3/1': ('R2@D2', '3/1', '10.0.2.2', '65002:102', 100, '2.2.3.1', 24), - 'R2@D2:3/2': ('R2@D2', '3/2', '10.0.2.2', '65002:102', 100, '2.2.3.2', 24), - 'R2@D2:3/3': ('R2@D2', '3/3', '10.0.2.2', '65002:102', 100, '2.2.3.3', 24), - 'R3@D2:3/1': ('R3@D2', '3/1', '10.0.2.3', '65002:103', 100, '2.3.3.1', 24), - 'R3@D2:3/2': ('R3@D2', '3/2', '10.0.2.3', '65002:103', 100, '2.3.3.2', 24), - 'R3@D2:3/3': ('R3@D2', '3/3', '10.0.2.3', '65002:103', 100, '2.3.3.3', 24), - 'R4@D2:3/1': ('R4@D2', '3/1', '10.0.2.4', '65002:104', 100, '2.4.3.1', 24), - 'R4@D2:3/2': ('R4@D2', '3/2', '10.0.2.4', '65002:104', 100, '2.4.3.2', 24), - 'R4@D2:3/3': ('R4@D2', '3/3', '10.0.2.4', '65002:104', 100, '2.4.3.3', 24), + # OECC/PSC'22 - domain 2 + #'R1@D2:3/1': ('R1@D2', '3/1', '10.0.2.1', '65002:101', 100, '2.1.3.1', 24), + #'R1@D2:3/2': ('R1@D2', '3/2', '10.0.2.1', '65002:101', 100, '2.1.3.2', 24), + #'R1@D2:3/3': ('R1@D2', '3/3', '10.0.2.1', '65002:101', 100, '2.1.3.3', 24), + #'R2@D2:3/1': ('R2@D2', '3/1', '10.0.2.2', '65002:102', 100, '2.2.3.1', 24), + #'R2@D2:3/2': ('R2@D2', '3/2', '10.0.2.2', '65002:102', 100, '2.2.3.2', 24), + #'R2@D2:3/3': ('R2@D2', '3/3', '10.0.2.2', '65002:102', 100, '2.2.3.3', 24), + #'R3@D2:3/1': ('R3@D2', '3/1', '10.0.2.3', '65002:103', 100, '2.3.3.1', 24), + #'R3@D2:3/2': ('R3@D2', '3/2', '10.0.2.3', '65002:103', 100, '2.3.3.2', 24), + #'R3@D2:3/3': ('R3@D2', '3/3', '10.0.2.3', '65002:103', 100, '2.3.3.3', 24), + #'R4@D2:3/1': ('R4@D2', '3/1', '10.0.2.4', '65002:104', 100, '2.4.3.1', 24), + #'R4@D2:3/2': ('R4@D2', '3/2', '10.0.2.4', '65002:104', 100, '2.4.3.2', 24), + #'R4@D2:3/3': ('R4@D2', '3/3', '10.0.2.4', '65002:104', 100, '2.4.3.3', 24), + + # ECOC'22 + 'CE1-PE1': ('PE1', '1/1', '10.0.0.101', '65000:101', 300, None, None), + 'CE2-PE2': ('PE2', '1/1', '10.0.0.102', '65000:102', 300, None, None), + 'CE3-PE3': ('PE3', '1/1', '10.0.0.103', '65000:103', 300, None, None), + 'CE4-PE4': ('PE4', '1/1', '10.0.0.104', '65000:104', 300, None, None), } diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py index 27489410f..ecaf9281f 100644 --- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py +++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py @@ -41,7 +41,10 @@ class L2VPN_Service(Resource): LOGGER.debug('VPN_Id: {:s}'.format(str(vpn_id))) LOGGER.debug('Request: {:s}'.format(str(request))) + # TODO: HACK ECOC'22, to be corrected response = jsonify({}) + response.status_code = HTTP_OK + return response try: target = get_service(self.context_client, vpn_id) diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py index 2c0245b9a..3f6b1760f 100644 --- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py +++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import time, random from ctypes import Union import json, logging from typing import Dict @@ -39,6 +40,12 @@ def process_site_network_access(context_client : ContextClient, site_network_acc vpn_id = site_network_access['vpn-attachment']['vpn-id'] cvlan_id = site_network_access['connection']['tagged-interface']['dot1q-vlan-tagged']['cvlan-id'] bearer_reference = site_network_access['bearer']['bearer-reference'] + access_priority = site_network_access.get('availability', {}).get('access-priority') + single_active = site_network_access.get('availability', {}).get('single-active') + all_active = site_network_access.get('availability', {}).get('all-active') + diversity_constraints = site_network_access.get('access-diversity', {}).get('constraints', {}).get('constraint', []) + # TODO: manage targets of constraints, right now, only type of constraint is considered + diversity_constraints = [constraint['constraint-type'] for constraint in diversity_constraints] mapping = BEARER_MAPPINGS.get(bearer_reference) if mapping is None: @@ -139,19 +146,29 @@ def process_site_network_access(context_client : ContextClient, site_network_acc raise Exception(msg.format( str(json_settings['vlan_id']), str(cvlan_id))) - if 'address_ip' not in json_settings: # missing, add it - json_settings['address_ip'] = address_ip - elif json_settings['address_ip'] != address_ip: # differs, raise exception - msg = 'Specified AddressIP({:s}) differs from Service AddressIP({:s})' - raise Exception(msg.format( - str(json_settings['address_ip']), str(address_ip))) + if address_ip is not None: + if 'address_ip' not in json_settings: # missing, add it + json_settings['address_ip'] = address_ip + elif json_settings['address_ip'] != address_ip: # differs, raise exception + msg = 'Specified AddressIP({:s}) differs from Service AddressIP({:s})' + raise Exception(msg.format( + str(json_settings['address_ip']), str(address_ip))) - if 'address_prefix' not in json_settings: # missing, add it - json_settings['address_prefix'] = address_prefix - elif json_settings['address_prefix'] != address_prefix: # differs, raise exception - msg = 'Specified AddressPrefix({:s}) differs from Service AddressPrefix({:s})' - raise Exception(msg.format( - str(json_settings['address_prefix']), str(address_prefix))) + if address_prefix is not None: + if 'address_prefix' not in json_settings: # missing, add it + json_settings['address_prefix'] = address_prefix + elif json_settings['address_prefix'] != address_prefix: # differs, raise exception + msg = 'Specified AddressPrefix({:s}) differs from Service AddressPrefix({:s})' + raise Exception(msg.format( + str(json_settings['address_prefix']), str(address_prefix))) + + if address_prefix is not None: + if 'address_prefix' not in json_settings: # missing, add it + json_settings['address_prefix'] = address_prefix + elif json_settings['address_prefix'] != address_prefix: # differs, raise exception + msg = 'Specified AddressPrefix({:s}) differs from Service AddressPrefix({:s})' + raise Exception(msg.format( + str(json_settings['address_prefix']), str(address_prefix))) config_rule.resource_value = json.dumps(json_settings, sort_keys=True) break @@ -160,14 +177,31 @@ def process_site_network_access(context_client : ContextClient, site_network_acc config_rule = target.service_config.config_rules.add() # pylint: disable=no-member config_rule.action = ConfigActionEnum.CONFIGACTION_SET config_rule.resource_key = endpoint_settings_key - config_rule.resource_value = json.dumps({ + resource_value = { 'router_id': router_id, 'route_distinguisher': route_distinguisher, 'sub_interface_index': sub_if_index, 'vlan_id': cvlan_id, 'address_ip': address_ip, 'address_prefix': address_prefix, - }, sort_keys=True) + } + if access_priority is not None: resource_value['access_priority'] = access_priority + if single_active is not None and len(single_active) > 0: resource_value['access_active'] = 'single' + if all_active is not None and len(all_active) > 0: resource_value['access_active'] = 'all' + config_rule.resource_value = json.dumps(resource_value, sort_keys=True) + + for constraint in target.service_constraints: # pylint: disable=no-member + if constraint.constraint_type == 'diversity' and len(diversity_constraints) > 0: + constraint_value = set(json.loads(constraint.constraint_value)) + constraint_value.update(diversity_constraints) + constraint.constraint_value = json.dumps(sorted(list(constraint_value)), sort_keys=True) + break + else: + # not found, and there are diversity constraints, add them + if len(diversity_constraints) > 0: + constraint = target.service_constraints.add() # pylint: disable=no-member + constraint.constraint_type = 'diversity' + constraint.constraint_value = json.dumps(sorted(list(diversity_constraints)), sort_keys=True) return target @@ -183,21 +217,22 @@ def process_list_site_network_access( for site_network_access in request_data['ietf-l2vpn-svc:site-network-access']: sna_request = process_site_network_access(context_client, site_network_access) LOGGER.debug('sna_request = {:s}'.format(grpc_message_to_json_string(sna_request))) - try: - if isinstance(sna_request, Service): - sna_reply = service_client.UpdateService(sna_request) - if sna_reply != sna_request.service_id: # pylint: disable=no-member - raise Exception('Service update failed. Wrong Service Id was returned') - elif isinstance(sna_request, Slice): - sna_reply = slice_client.UpdateSlice(sna_request) - if sna_reply != sna_request.slice_id: # pylint: disable=no-member - raise Exception('Slice update failed. Wrong Slice Id was returned') - else: - raise NotImplementedError('Support for Class({:s}) not implemented'.format(str(type(sna_request)))) - except Exception as e: # pylint: disable=broad-except - msg = 'Something went wrong Updating Service {:s}' - LOGGER.exception(msg.format(grpc_message_to_json_string(sna_request))) - errors.append({'error': str(e)}) + #try: + # if isinstance(sna_request, Service): + # sna_reply = service_client.UpdateService(sna_request) + # if sna_reply != sna_request.service_id: # pylint: disable=no-member + # raise Exception('Service update failed. Wrong Service Id was returned') + # elif isinstance(sna_request, Slice): + # sna_reply = slice_client.UpdateSlice(sna_request) + # if sna_reply != sna_request.slice_id: # pylint: disable=no-member + # raise Exception('Slice update failed. Wrong Slice Id was returned') + # else: + # raise NotImplementedError('Support for Class({:s}) not implemented'.format(str(type(sna_request)))) + #except Exception as e: # pylint: disable=broad-except + # msg = 'Something went wrong Updating Service {:s}' + # LOGGER.exception(msg.format(grpc_message_to_json_string(sna_request))) + # errors.append({'error': str(e)}) + time.sleep(random.random() / 10.0) response = jsonify(errors) response.status_code = HTTP_NOCONTENT if len(errors) == 0 else HTTP_SERVERERROR diff --git a/src/compute/tests/mock_osm/WimconnectorIETFL2VPN.py b/src/compute/tests/mock_osm/WimconnectorIETFL2VPN.py index b9639e804..ec9918ff0 100644 --- a/src/compute/tests/mock_osm/WimconnectorIETFL2VPN.py +++ b/src/compute/tests/mock_osm/WimconnectorIETFL2VPN.py @@ -33,6 +33,7 @@ the Layer 2 service. import requests import uuid import logging +import copy #from osm_ro_plugin.sdnconn import SdnConnectorBase, SdnConnectorError from .sdnconn import SdnConnectorBase, SdnConnectorError @@ -222,8 +223,29 @@ class WimconnectorIETFL2VPN(SdnConnectorBase): http_code=response_service_creation.status_code, ) - """Second step, create the connections and vpn attachments""" + self.logger.info('connection_points = {:s}'.format(str(connection_points))) + + # Check if protected paths are requested + extended_connection_points = [] for connection_point in connection_points: + extended_connection_points.append(connection_point) + + connection_point_wan_info = self.search_mapp(connection_point) + service_mapping_info = connection_point_wan_info.get('service_mapping_info', {}) + redundant_service_endpoint_ids = service_mapping_info.get('redundant') + + if redundant_service_endpoint_ids is None: continue + if len(redundant_service_endpoint_ids) == 0: continue + + for redundant_service_endpoint_id in redundant_service_endpoint_ids: + redundant_connection_point = copy.deepcopy(connection_point) + redundant_connection_point['service_endpoint_id'] = redundant_service_endpoint_id + extended_connection_points.append(redundant_connection_point) + + self.logger.info('extended_connection_points = {:s}'.format(str(extended_connection_points))) + + """Second step, create the connections and vpn attachments""" + for connection_point in extended_connection_points: connection_point_wan_info = self.search_mapp(connection_point) site_network_access = {} connection = {} @@ -264,6 +286,23 @@ class WimconnectorIETFL2VPN(SdnConnectorBase): site_network_access["bearer"] = connection_point_wan_info[ "service_mapping_info" ]["bearer"] + + access_priority = connection_point_wan_info["service_mapping_info"].get("priority") + if access_priority is not None: + availability = {} + availability["access-priority"] = access_priority + availability["single-active"] = [None] + site_network_access["availability"] = availability + + constraint = {} + constraint['constraint-type'] = 'end-to-end-diverse' + constraint['target'] = {'all-other-accesses': [None]} + + access_diversity = {} + access_diversity['constraints'] = {'constraint': []} + access_diversity['constraints']['constraint'].append(constraint) + site_network_access["access-diversity"] = access_diversity + site_network_accesses = {} site_network_access_list = [] site_network_access_list.append(site_network_access) diff --git a/src/tests/ecoc22/.gitignore b/src/tests/ecoc22/.gitignore new file mode 100644 index 000000000..0a3f4400d --- /dev/null +++ b/src/tests/ecoc22/.gitignore @@ -0,0 +1,2 @@ +# Add here your files containing confidential testbed details such as IP addresses, ports, usernames, passwords, etc. +descriptors_real.json diff --git a/src/tests/ecoc22/README.md b/src/tests/ecoc22/README.md new file mode 100644 index 000000000..0e23de037 --- /dev/null +++ b/src/tests/ecoc22/README.md @@ -0,0 +1,93 @@ +# ECOC'22 Demo - Disjoint DC-2-DC L3VPN Service +This functional test reproduces the experimental assessment of "" presented at [ECOC'22](https://www.ecoc2022.org/). + +## Functional test folder +This functional test can be found in folder `./src/tests/ecoc22/`. A convenience alias `./ecoc22/` pointing to that folder has been defined. + +## Execute with real devices +This functional test has only been tested with emulated devices; however, if you have access to real devices, you can modify the files `./ecoc22/tests/Objects.py` and `./ofc22/tests/Credentials.py` to point to your devices, and map to your network topology. + +__Important:__ The OpenConfigDriver, the P4Driver, and the TrandportApiDriver have to be considered as experimental. The configuration and monitoring capabilities they support are limited or partially implemented. Use them with care. + +## Deployment +To run this functional test, it is assumed you have deployed a Kubernetes-based environment as described in [Wiki: Installing Kubernetes on your Linux machine](https://gitlab.com/teraflow-h2020/controller/-/wikis/Installing-Kubernetes-on-your-Linux-machine). + +After installing Kubernetes, you can run it to deploy the appropriate components. Feel free to adapt it your particular case following the instructions described in [Wiki: Deploying a TeraFlow OS test instance](https://gitlab.com/teraflow-h2020/controller/-/wikis/Deploying-a-TeraFlow-OS-test-instance). + +__Important:__ +- The `./ecoc22/deploy_in_kubernetes.sh` assumes you have installed the appropriate development dependencies using the `install_development_dependencies.sh` script. +- Before running the scripts in this folder, remember to update the environment variable K8S_HOSTNAME to point to the Kubernetes node you will be using as described in [Wiki: Deploying a TeraFlow OS test instance](https://gitlab.com/teraflow-h2020/controller/-/wikis/Deploying-a-TeraFlow-OS-test-instance). + +For your convenience, the configuration script `./ecoc22/deploy_in_kubernetes.sh` has been already defined. The script will take some minutes to download the dependencies, build the micro-services, deploy them, and leave them ready for operation. The deployment will finish with a report of the items that have been created. + +## Access to the WebUI and Dashboard +When the deployment completes, you can connect to the TeraFlow OS WebUI and Dashboards as described in [Wiki: Using the WebUI](https://gitlab.com/teraflow-h2020/controller/-/wikis/Using-the-WebUI), or directly navigating to `http://[your-node-ip]:30800` for the WebUI and `http://[your-node-ip]:30300` for the Grafana Dashboard. + +Notes: +- the default credentials for the Grafana Dashboiard is user/pass: `admin`/`admin123+`. +- this functional test does not involve the Monitoring component, so no monitoring data is plotted in Grafana. + +## Test execution +To execute this functional test, four main steps needs to be carried out: +1. Device bootstrapping +2. L3VPN Service creation +3. L3VPN Service removal +4. Cleanup + +Upon the execution of each test progresses, a report will be generated indicating PASSED / FAILED / SKIPPED. If there is some error during the execution, you should see a detailed report on the error. See the troubleshooting section in that case. + +Feel free to check the logs of the different components using the appropriate `ecoc22/show_logs_[component].sh` scripts after you execute each step. + +### 1. Device bootstrapping + +This step configures some basic entities (Context and Topology), the devices, and the links in the topology. The expected results are: +- The devices to be incorporated into the Topology. +- The devices to be pre-configured and initialized as ENABLED by the Automation component. +- The monitoring for the device ports (named as endpoints in TeraFlow OS) to be activated and data collection to automatically start. +- The links to be added to the topology. + +To run this step, execute the following script: +`./ofc22/run_test_01_bootstrap.sh` + +When the script finishes, check in the Grafana L3-Monitoring Dashboard and you should see the monitoring data being plotted and updated every 5 seconds (by default). Given that there is no service configured, you should see a 0-valued flat plot. + +In the WebUI, select the "admin" Context. In the "Devices" tab you should see that 5 different emulated devices have been created and activated: 4 packet routers, and 1 optical line system controller. Besides, in the "Services" tab you should see that there is no service created. Note here that the emulated devices produce synthetic randomly-generated data and do not care about the services configured. + +### 2. L3VPN Service creation + +This step configures a new service emulating the request an OSM WIM would make by means of a Mock OSM instance. + +To run this step, execute the following script: +`./ofc22/run_test_02_create_service.sh` + +When the script finishes, check the WebUI "Services" tab. You should see that two services have been created, one for the optical layer and another for the packet layer. Besides, you can check the "Devices" tab to see the configuration rules that have been configured in each device. In the Grafana Dashboard, given that there is now a service configured, you should see the plots with the monitored data for the device. By default, device R1-INF is selected. + +### 3. L3VPN Service removal + +This step deconfigures the previously created services emulating the request an OSM WIM would make by means of a Mock OSM instance. + +To run this step, execute the following script: +`./ofc22/run_test_03_delete_service.sh` + +When the script finishes, check the WebUI "Services" tab. You should see that the two services have been removed. Besides, in the "Devices" tab you can see that the appropriate configuration rules have been deconfigured. In the Grafana Dashboard, given that there is no service configured, you should see a 0-valued flat plot again. + +### 4. Cleanup + +This last step just performs a cleanup of the scenario removing all the TeraFlow OS entities for completeness. + +To run this step, execute the following script: +`./ofc22/run_test_04_cleanup.sh` + +When the script finishes, check the WebUI "Devices" tab, you should see that the devices have been removed. Besides, in the "Services" tab you can see that the "admin" Context has no services given that that context has been removed. + +## Troubleshooting + +Different scripts are provided to help in troubleshooting issues in the execution of the test. These scripts are: +- `./ofc22/show_deployment.sh`: this script reports the items belonging to this deployment. Use it to validate that all the pods, deployments and replica sets are ready and have a state of "running"; and the services are deployed and have appropriate IP addresses and ports. +- `ofc22/show_logs_automation.sh`: this script reports the logs for the automation component. +- `ofc22/show_logs_compute.sh`: this script reports the logs for the compute component. +- `ofc22/show_logs_context.sh`: this script reports the logs for the context component. +- `ofc22/show_logs_device.sh`: this script reports the logs for the device component. +- `ofc22/show_logs_monitoring.sh`: this script reports the logs for the monitoring component. +- `ofc22/show_logs_service.sh`: this script reports the logs for the service component. +- `ofc22/show_logs_webui.sh`: this script reports the logs for the webui component. diff --git a/src/tests/ecoc22/__init__.py b/src/tests/ecoc22/__init__.py new file mode 100644 index 000000000..70a332512 --- /dev/null +++ b/src/tests/ecoc22/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/tests/ecoc22/deploy_in_kubernetes.sh b/src/tests/ecoc22/deploy_in_kubernetes.sh new file mode 100755 index 000000000..8cd32ff19 --- /dev/null +++ b/src/tests/ecoc22/deploy_in_kubernetes.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# ECOC 22 deployment settings + +export REGISTRY_IMAGE="" +export COMPONENTS="context device service slice compute webui" +export IMAGE_TAG="ecoc22" +export K8S_NAMESPACE="ecoc22" +export K8S_HOSTNAME="kubernetes-master" +export EXTRA_MANIFESTS="./ecoc22/expose_services.yaml" +export GRAFANA_PASSWORD="admin123+" + +./deploy_in_kubernetes.sh diff --git a/src/tests/ecoc22/dump_logs.sh b/src/tests/ecoc22/dump_logs.sh new file mode 100755 index 000000000..85372f85a --- /dev/null +++ b/src/tests/ecoc22/dump_logs.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +export COMPONENTS="context device service slice compute webui" +export K8S_NAMESPACE="ecoc22" + +mkdir -p tmp/exec_logs/$K8S_NAMESPACE/ +rm tmp/exec_logs/$K8S_NAMESPACE/* + +for COMPONENT in $COMPONENTS; do + kubectl --namespace $K8S_NAMESPACE logs deployment/${COMPONENT}service -c server > tmp/exec_logs/$K8S_NAMESPACE/$COMPONENT.log +done diff --git a/src/tests/ecoc22/expose_services.yaml b/src/tests/ecoc22/expose_services.yaml new file mode 100644 index 000000000..d51438361 --- /dev/null +++ b/src/tests/ecoc22/expose_services.yaml @@ -0,0 +1,112 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- +apiVersion: v1 +kind: Service +metadata: + name: contextservice-public + labels: + app: contextservice +spec: + type: NodePort + selector: + app: contextservice + ports: + - name: grpc + protocol: TCP + port: 1010 + targetPort: 1010 + nodePort: 30101 + - name: redis + protocol: TCP + port: 6379 + targetPort: 6379 + nodePort: 30637 + - name: http + protocol: TCP + port: 8080 + targetPort: 8080 + nodePort: 31808 +--- +apiVersion: v1 +kind: Service +metadata: + name: deviceservice-public + labels: + app: deviceservice +spec: + type: NodePort + selector: + app: deviceservice + ports: + - name: grpc + protocol: TCP + port: 2020 + targetPort: 2020 + nodePort: 30202 +--- +apiVersion: v1 +kind: Service +metadata: + name: monitoringservice-public + labels: + app: monitoringservice +spec: + type: NodePort + selector: + app: monitoringservice + ports: + - name: influx + protocol: TCP + port: 8086 + targetPort: 8086 + nodePort: 30886 +--- +apiVersion: v1 +kind: Service +metadata: + name: computeservice-public +spec: + type: NodePort + selector: + app: computeservice + ports: + - name: http + protocol: TCP + port: 8080 + targetPort: 8080 + nodePort: 30808 +--- +apiVersion: v1 +kind: Service +metadata: + name: webuiservice-public + labels: + app: webuiservice +spec: + type: NodePort + selector: + app: webuiservice + ports: + - name: http + protocol: TCP + port: 8004 + targetPort: 8004 + nodePort: 30800 + - name: grafana + protocol: TCP + port: 3000 + targetPort: 3000 + nodePort: 30300 diff --git a/src/tests/ecoc22/run_test_01_bootstrap.sh b/src/tests/ecoc22/run_test_01_bootstrap.sh new file mode 100755 index 000000000..f4d3b9ba3 --- /dev/null +++ b/src/tests/ecoc22/run_test_01_bootstrap.sh @@ -0,0 +1,51 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +PROJECTDIR=`pwd` + +cd $PROJECTDIR/src +RCFILE=$PROJECTDIR/coverage/.coveragerc +COVERAGEFILE=$PROJECTDIR/coverage/.coverage + +# Configure the correct folder on the .coveragerc file +cat $PROJECTDIR/coverage/.coveragerc.template | sed s+~/teraflow/controller+$PROJECTDIR+g > $RCFILE + +# Destroy old coverage file +rm -f $COVERAGEFILE + +# Set the name of the Kubernetes namespace and hostname to use. +K8S_NAMESPACE="ecoc22" +# K8S_HOSTNAME="kubernetes-master" +# dynamically gets the name of the K8s master node +K8S_HOSTNAME=`kubectl get nodes --selector=node-role.kubernetes.io/master | tr -s " " | cut -f1 -d" " | sed -n '2 p'` + +# Flush Context database +kubectl --namespace $K8S_NAMESPACE exec -it deployment/contextservice --container redis -- redis-cli FLUSHALL + +export CONTEXTSERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}') +export CONTEXTSERVICE_SERVICE_PORT_GRPC=$(kubectl get service contextservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==1010)].nodePort}') +export DEVICESERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}') +export DEVICESERVICE_SERVICE_PORT_GRPC=$(kubectl get service deviceservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==2020)].nodePort}') +export COMPUTESERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}') +export COMPUTESERVICE_SERVICE_PORT_HTTP=$(kubectl get service computeservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==8080)].nodePort}') + +# Useful flags for pytest: +#-o log_cli=true -o log_file=device.log -o log_file_level=DEBUG + +# Run functional test and analyze coverage of code at same time + +coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ + tests/ecoc22/tests/test_functional_bootstrap.py diff --git a/src/tests/ecoc22/run_test_02_create_service.sh b/src/tests/ecoc22/run_test_02_create_service.sh new file mode 100755 index 000000000..f426e8cd8 --- /dev/null +++ b/src/tests/ecoc22/run_test_02_create_service.sh @@ -0,0 +1,41 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +PROJECTDIR=`pwd` + +cd $PROJECTDIR/src +RCFILE=$PROJECTDIR/coverage/.coveragerc +COVERAGEFILE=$PROJECTDIR/coverage/.coverage + +# Set the name of the Kubernetes namespace and hostname to use. +K8S_NAMESPACE="ecoc22" +# dynamically gets the name of the K8s master node +K8S_HOSTNAME=`kubectl get nodes --selector=node-role.kubernetes.io/master | tr -s " " | cut -f1 -d" " | sed -n '2 p'` + +export CONTEXTSERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}') +export CONTEXTSERVICE_SERVICE_PORT_GRPC=$(kubectl get service contextservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==1010)].nodePort}') +export DEVICESERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}') +export DEVICESERVICE_SERVICE_PORT_GRPC=$(kubectl get service deviceservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==2020)].nodePort}') +export COMPUTESERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}') +export COMPUTESERVICE_SERVICE_PORT_HTTP=$(kubectl get service computeservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==8080)].nodePort}') + +# Useful flags for pytest: +#-o log_cli=true -o log_file=device.log -o log_file_level=DEBUG + +# Run functional test and analyze coverage of code at same time + +coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose -o log_cli=true \ + tests/ecoc22/tests/test_functional_create_service.py diff --git a/src/tests/ecoc22/run_test_03_delete_service.sh b/src/tests/ecoc22/run_test_03_delete_service.sh new file mode 100755 index 000000000..a589ddf68 --- /dev/null +++ b/src/tests/ecoc22/run_test_03_delete_service.sh @@ -0,0 +1,41 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +PROJECTDIR=`pwd` + +cd $PROJECTDIR/src +RCFILE=$PROJECTDIR/coverage/.coveragerc +COVERAGEFILE=$PROJECTDIR/coverage/.coverage + +# Set the name of the Kubernetes namespace and hostname to use. +K8S_NAMESPACE="ecoc22" +# dynamically gets the name of the K8s master node +K8S_HOSTNAME=`kubectl get nodes --selector=node-role.kubernetes.io/master | tr -s " " | cut -f1 -d" " | sed -n '2 p'` + +export CONTEXTSERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}') +export CONTEXTSERVICE_SERVICE_PORT_GRPC=$(kubectl get service contextservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==1010)].nodePort}') +export DEVICESERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}') +export DEVICESERVICE_SERVICE_PORT_GRPC=$(kubectl get service deviceservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==2020)].nodePort}') +export COMPUTESERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}') +export COMPUTESERVICE_SERVICE_PORT_HTTP=$(kubectl get service computeservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==8080)].nodePort}') + +# Useful flags for pytest: +#-o log_cli=true -o log_file=device.log -o log_file_level=DEBUG + +# Run functional test and analyze coverage of code at same time + +coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ + tests/ecoc22/tests/test_functional_delete_service.py diff --git a/src/tests/ecoc22/run_test_04_cleanup.sh b/src/tests/ecoc22/run_test_04_cleanup.sh new file mode 100755 index 000000000..0b8b30519 --- /dev/null +++ b/src/tests/ecoc22/run_test_04_cleanup.sh @@ -0,0 +1,41 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +PROJECTDIR=`pwd` + +cd $PROJECTDIR/src +RCFILE=$PROJECTDIR/coverage/.coveragerc +COVERAGEFILE=$PROJECTDIR/coverage/.coverage + +# Set the name of the Kubernetes namespace and hostname to use. +K8S_NAMESPACE="ecoc22" +# dynamically gets the name of the K8s master node +K8S_HOSTNAME=`kubectl get nodes --selector=node-role.kubernetes.io/master | tr -s " " | cut -f1 -d" " | sed -n '2 p'` + +export CONTEXTSERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}') +export CONTEXTSERVICE_SERVICE_PORT_GRPC=$(kubectl get service contextservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==1010)].nodePort}') +export DEVICESERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}') +export DEVICESERVICE_SERVICE_PORT_GRPC=$(kubectl get service deviceservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==2020)].nodePort}') +export COMPUTESERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}') +export COMPUTESERVICE_SERVICE_PORT_HTTP=$(kubectl get service computeservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==8080)].nodePort}') + +# Useful flags for pytest: +#-o log_cli=true -o log_file=device.log -o log_file_level=DEBUG + +# Run functional test and analyze coverage of code at same time + +coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ + tests/ecoc22/tests/test_functional_cleanup.py diff --git a/src/tests/ecoc22/show_deploy.sh b/src/tests/ecoc22/show_deploy.sh new file mode 100755 index 000000000..3e1b283a9 --- /dev/null +++ b/src/tests/ecoc22/show_deploy.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +K8S_NAMESPACE="ecoc22" +kubectl --namespace $K8S_NAMESPACE get all diff --git a/src/tests/ecoc22/show_logs_compute.sh b/src/tests/ecoc22/show_logs_compute.sh new file mode 100755 index 000000000..7d27f477d --- /dev/null +++ b/src/tests/ecoc22/show_logs_compute.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +K8S_NAMESPACE="ecoc22" +kubectl --namespace $K8S_NAMESPACE logs deployment/computeservice diff --git a/src/tests/ecoc22/show_logs_context.sh b/src/tests/ecoc22/show_logs_context.sh new file mode 100755 index 000000000..814c486bd --- /dev/null +++ b/src/tests/ecoc22/show_logs_context.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +K8S_NAMESPACE="ecoc22" +kubectl --namespace $K8S_NAMESPACE logs deployment/contextservice -c server diff --git a/src/tests/ecoc22/show_logs_device.sh b/src/tests/ecoc22/show_logs_device.sh new file mode 100755 index 000000000..5e291e7ca --- /dev/null +++ b/src/tests/ecoc22/show_logs_device.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +K8S_NAMESPACE="ecoc22" +kubectl --namespace $K8S_NAMESPACE logs deployment/deviceservice diff --git a/src/tests/ecoc22/show_logs_service.sh b/src/tests/ecoc22/show_logs_service.sh new file mode 100755 index 000000000..0189b8c2e --- /dev/null +++ b/src/tests/ecoc22/show_logs_service.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +K8S_NAMESPACE="ecoc22" +kubectl --namespace $K8S_NAMESPACE logs deployment/serviceservice diff --git a/src/tests/ecoc22/show_logs_slice.sh b/src/tests/ecoc22/show_logs_slice.sh new file mode 100755 index 000000000..b92aab8b7 --- /dev/null +++ b/src/tests/ecoc22/show_logs_slice.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +K8S_NAMESPACE="ecoc22" +kubectl --namespace $K8S_NAMESPACE logs deployment/sliceservice diff --git a/src/tests/ecoc22/show_logs_webui.sh b/src/tests/ecoc22/show_logs_webui.sh new file mode 100755 index 000000000..5c6bada20 --- /dev/null +++ b/src/tests/ecoc22/show_logs_webui.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +K8S_NAMESPACE="ecoc22" +kubectl --namespace $K8S_NAMESPACE logs deployment/webuiservice -c server diff --git a/src/tests/ecoc22/tests/.gitignore b/src/tests/ecoc22/tests/.gitignore new file mode 100644 index 000000000..6b97d6fe3 --- /dev/null +++ b/src/tests/ecoc22/tests/.gitignore @@ -0,0 +1 @@ +# Add here your files containing confidential testbed details such as IP addresses, ports, usernames, passwords, etc. diff --git a/src/tests/ecoc22/tests/Credentials.py b/src/tests/ecoc22/tests/Credentials.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/tests/ecoc22/tests/Objects.py b/src/tests/ecoc22/tests/Objects.py new file mode 100644 index 000000000..062a00516 --- /dev/null +++ b/src/tests/ecoc22/tests/Objects.py @@ -0,0 +1,303 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID +from common.tools.object_factory.Context import json_context, json_context_id +from common.tools.object_factory.Device import ( + json_device_emulated_connect_rules, json_device_emulated_datacenter_disabled, + json_device_emulated_packet_router_disabled, json_device_id) +from common.tools.object_factory.Topology import json_topology, json_topology_id +from .Tools import compose_bearer, compose_service_endpoint_id, json_endpoint_ids, link + + +# ----- Context -------------------------------------------------------------------------------------------------------- +CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID) +CONTEXT = json_context(DEFAULT_CONTEXT_UUID) + + +# ----- Topology ------------------------------------------------------------------------------------------------------- +TOPOLOGY_ID = json_topology_id(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID) +TOPOLOGY = json_topology(DEFAULT_TOPOLOGY_UUID, context_id=CONTEXT_ID) + + +# ----- Customer Equipment (CE) Devices -------------------------------------------------------------------------------- +DEVICE_CE1_UUID = 'CE1' +DEVICE_CE1_ENDPOINT_DEFS = [('1/1', 'copper', [])] +DEVICE_CE1_ID = json_device_id(DEVICE_CE1_UUID) +DEVICE_CE1_ENDPOINT_IDS = json_endpoint_ids(DEVICE_CE1_ID, DEVICE_CE1_ENDPOINT_DEFS) +DEVICE_CE1 = json_device_emulated_packet_router_disabled(DEVICE_CE1_UUID) +ENDPOINT_ID_CE1_1_1 = DEVICE_CE1_ENDPOINT_IDS[0] +DEVICE_CE1_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_CE1_ENDPOINT_DEFS) + +DEVICE_CE2_UUID = 'CE2' +DEVICE_CE2_ENDPOINT_DEFS = [('1/1', 'copper', [])] +DEVICE_CE2_ID = json_device_id(DEVICE_CE2_UUID) +DEVICE_CE2_ENDPOINT_IDS = json_endpoint_ids(DEVICE_CE2_ID, DEVICE_CE2_ENDPOINT_DEFS) +DEVICE_CE2 = json_device_emulated_packet_router_disabled(DEVICE_CE2_UUID) +ENDPOINT_ID_CE2_1_1 = DEVICE_CE2_ENDPOINT_IDS[0] +DEVICE_CE2_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_CE2_ENDPOINT_DEFS) + +DEVICE_CE3_UUID = 'CE3' +DEVICE_CE3_ENDPOINT_DEFS = [('1/1', 'copper', [])] +DEVICE_CE3_ID = json_device_id(DEVICE_CE3_UUID) +DEVICE_CE3_ENDPOINT_IDS = json_endpoint_ids(DEVICE_CE3_ID, DEVICE_CE3_ENDPOINT_DEFS) +DEVICE_CE3 = json_device_emulated_packet_router_disabled(DEVICE_CE3_UUID) +ENDPOINT_ID_CE3_1_1 = DEVICE_CE3_ENDPOINT_IDS[0] +DEVICE_CE3_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_CE3_ENDPOINT_DEFS) + +DEVICE_CE4_UUID = 'CE4' +DEVICE_CE4_ENDPOINT_DEFS = [('1/1', 'copper', [])] +DEVICE_CE4_ID = json_device_id(DEVICE_CE4_UUID) +DEVICE_CE4_ENDPOINT_IDS = json_endpoint_ids(DEVICE_CE4_ID, DEVICE_CE4_ENDPOINT_DEFS) +DEVICE_CE4 = json_device_emulated_packet_router_disabled(DEVICE_CE4_UUID) +ENDPOINT_ID_CE4_1_1 = DEVICE_CE4_ENDPOINT_IDS[0] +DEVICE_CE4_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_CE4_ENDPOINT_DEFS) + +# ----- Provider Equipment (PE) Devices -------------------------------------------------------------------------------- +DEVICE_PE1_UUID = 'PE1' +DEVICE_PE1_ENDPOINT_DEFS = [('1/1', 'copper', []), + ('2/1', 'copper', []), ('2/2', 'copper', [])] +DEVICE_PE1_ID = json_device_id(DEVICE_PE1_UUID) +DEVICE_PE1_ENDPOINT_IDS = json_endpoint_ids(DEVICE_PE1_ID, DEVICE_PE1_ENDPOINT_DEFS) +DEVICE_PE1 = json_device_emulated_packet_router_disabled(DEVICE_PE1_UUID) +ENDPOINT_ID_PE1_1_1 = DEVICE_PE1_ENDPOINT_IDS[0] +ENDPOINT_ID_PE1_2_1 = DEVICE_PE1_ENDPOINT_IDS[1] +ENDPOINT_ID_PE1_2_2 = DEVICE_PE1_ENDPOINT_IDS[2] +DEVICE_PE1_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_PE1_ENDPOINT_DEFS) + +DEVICE_PE2_UUID = 'PE2' +DEVICE_PE2_ENDPOINT_DEFS = [('1/1', 'copper', []), + ('2/1', 'copper', []), ('2/2', 'copper', [])] +DEVICE_PE2_ID = json_device_id(DEVICE_PE2_UUID) +DEVICE_PE2_ENDPOINT_IDS = json_endpoint_ids(DEVICE_PE2_ID, DEVICE_PE2_ENDPOINT_DEFS) +DEVICE_PE2 = json_device_emulated_packet_router_disabled(DEVICE_PE2_UUID) +ENDPOINT_ID_PE2_1_1 = DEVICE_PE2_ENDPOINT_IDS[0] +ENDPOINT_ID_PE2_2_1 = DEVICE_PE2_ENDPOINT_IDS[1] +ENDPOINT_ID_PE2_2_2 = DEVICE_PE2_ENDPOINT_IDS[2] +DEVICE_PE2_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_PE2_ENDPOINT_DEFS) + +DEVICE_PE3_UUID = 'PE3' +DEVICE_PE3_ENDPOINT_DEFS = [('1/1', 'copper', []), + ('2/1', 'copper', []), ('2/2', 'copper', [])] +DEVICE_PE3_ID = json_device_id(DEVICE_PE3_UUID) +DEVICE_PE3_ENDPOINT_IDS = json_endpoint_ids(DEVICE_PE3_ID, DEVICE_PE3_ENDPOINT_DEFS) +DEVICE_PE3 = json_device_emulated_packet_router_disabled(DEVICE_PE3_UUID) +ENDPOINT_ID_PE3_1_1 = DEVICE_PE3_ENDPOINT_IDS[0] +ENDPOINT_ID_PE3_2_1 = DEVICE_PE3_ENDPOINT_IDS[1] +ENDPOINT_ID_PE3_2_2 = DEVICE_PE3_ENDPOINT_IDS[2] +DEVICE_PE3_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_PE3_ENDPOINT_DEFS) + +DEVICE_PE4_UUID = 'PE4' +DEVICE_PE4_ENDPOINT_DEFS = [('1/1', 'copper', []), + ('2/1', 'copper', []), ('2/2', 'copper', [])] +DEVICE_PE4_ID = json_device_id(DEVICE_PE4_UUID) +DEVICE_PE4_ENDPOINT_IDS = json_endpoint_ids(DEVICE_PE4_ID, DEVICE_PE4_ENDPOINT_DEFS) +DEVICE_PE4 = json_device_emulated_packet_router_disabled(DEVICE_PE4_UUID) +ENDPOINT_ID_PE4_1_1 = DEVICE_PE4_ENDPOINT_IDS[0] +ENDPOINT_ID_PE4_2_1 = DEVICE_PE4_ENDPOINT_IDS[1] +ENDPOINT_ID_PE4_2_2 = DEVICE_PE4_ENDPOINT_IDS[2] +DEVICE_PE4_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_PE4_ENDPOINT_DEFS) + +# ----- BackBone (BB) Devices ------------------------------------------------------------------------------------------ +DEVICE_BB1_UUID = 'BB1' +DEVICE_BB1_ENDPOINT_DEFS = [('1/1', 'copper', []), ('1/2', 'copper', []), + ('2/1', 'copper', []), ('2/2', 'copper', []), ('2/3', 'copper', [])] +DEVICE_BB1_ID = json_device_id(DEVICE_BB1_UUID) +DEVICE_BB1_ENDPOINT_IDS = json_endpoint_ids(DEVICE_BB1_ID, DEVICE_BB1_ENDPOINT_DEFS) +DEVICE_BB1 = json_device_emulated_packet_router_disabled(DEVICE_BB1_UUID) +ENDPOINT_ID_BB1_1_1 = DEVICE_BB1_ENDPOINT_IDS[0] +ENDPOINT_ID_BB1_1_2 = DEVICE_BB1_ENDPOINT_IDS[1] +ENDPOINT_ID_BB1_2_1 = DEVICE_BB1_ENDPOINT_IDS[2] +ENDPOINT_ID_BB1_2_2 = DEVICE_BB1_ENDPOINT_IDS[3] +ENDPOINT_ID_BB1_2_3 = DEVICE_BB1_ENDPOINT_IDS[4] +DEVICE_BB1_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_BB1_ENDPOINT_DEFS) + +DEVICE_BB2_UUID = 'BB2' +DEVICE_BB2_ENDPOINT_DEFS = [('1/1', 'copper', []), ('1/2', 'copper', []), + ('2/1', 'copper', []), ('2/2', 'copper', []), ('2/3', 'copper', [])] +DEVICE_BB2_ID = json_device_id(DEVICE_BB2_UUID) +DEVICE_BB2_ENDPOINT_IDS = json_endpoint_ids(DEVICE_BB2_ID, DEVICE_BB2_ENDPOINT_DEFS) +DEVICE_BB2 = json_device_emulated_packet_router_disabled(DEVICE_BB2_UUID) +ENDPOINT_ID_BB2_1_1 = DEVICE_BB2_ENDPOINT_IDS[0] +ENDPOINT_ID_BB2_1_2 = DEVICE_BB2_ENDPOINT_IDS[1] +ENDPOINT_ID_BB2_2_1 = DEVICE_BB2_ENDPOINT_IDS[2] +ENDPOINT_ID_BB2_2_2 = DEVICE_BB2_ENDPOINT_IDS[3] +ENDPOINT_ID_BB2_2_3 = DEVICE_BB2_ENDPOINT_IDS[4] +DEVICE_BB2_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_BB2_ENDPOINT_DEFS) + +DEVICE_BB3_UUID = 'BB3' +DEVICE_BB3_ENDPOINT_DEFS = [('2/1', 'copper', []), ('2/2', 'copper', []), ('2/3', 'copper', [])] +DEVICE_BB3_ID = json_device_id(DEVICE_BB3_UUID) +DEVICE_BB3_ENDPOINT_IDS = json_endpoint_ids(DEVICE_BB3_ID, DEVICE_BB3_ENDPOINT_DEFS) +DEVICE_BB3 = json_device_emulated_packet_router_disabled(DEVICE_BB3_UUID) +ENDPOINT_ID_BB3_2_1 = DEVICE_BB3_ENDPOINT_IDS[0] +ENDPOINT_ID_BB3_2_2 = DEVICE_BB3_ENDPOINT_IDS[1] +ENDPOINT_ID_BB3_2_3 = DEVICE_BB3_ENDPOINT_IDS[2] +DEVICE_BB3_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_BB3_ENDPOINT_DEFS) + +DEVICE_BB4_UUID = 'BB4' +DEVICE_BB4_ENDPOINT_DEFS = [('1/1', 'copper', []), ('1/2', 'copper', []), + ('2/1', 'copper', []), ('2/2', 'copper', []), ('2/3', 'copper', [])] +DEVICE_BB4_ID = json_device_id(DEVICE_BB4_UUID) +DEVICE_BB4_ENDPOINT_IDS = json_endpoint_ids(DEVICE_BB4_ID, DEVICE_BB4_ENDPOINT_DEFS) +DEVICE_BB4 = json_device_emulated_packet_router_disabled(DEVICE_BB4_UUID) +ENDPOINT_ID_BB4_1_1 = DEVICE_BB4_ENDPOINT_IDS[0] +ENDPOINT_ID_BB4_1_2 = DEVICE_BB4_ENDPOINT_IDS[1] +ENDPOINT_ID_BB4_2_1 = DEVICE_BB4_ENDPOINT_IDS[2] +ENDPOINT_ID_BB4_2_2 = DEVICE_BB4_ENDPOINT_IDS[3] +ENDPOINT_ID_BB4_2_3 = DEVICE_BB4_ENDPOINT_IDS[4] +DEVICE_BB4_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_BB4_ENDPOINT_DEFS) + +DEVICE_BB5_UUID = 'BB5' +DEVICE_BB5_ENDPOINT_DEFS = [('1/1', 'copper', []), ('1/2', 'copper', []), + ('2/1', 'copper', []), ('2/2', 'copper', []), ('2/3', 'copper', [])] +DEVICE_BB5_ID = json_device_id(DEVICE_BB5_UUID) +DEVICE_BB5_ENDPOINT_IDS = json_endpoint_ids(DEVICE_BB5_ID, DEVICE_BB5_ENDPOINT_DEFS) +DEVICE_BB5 = json_device_emulated_packet_router_disabled(DEVICE_BB5_UUID) +ENDPOINT_ID_BB5_1_1 = DEVICE_BB5_ENDPOINT_IDS[0] +ENDPOINT_ID_BB5_1_2 = DEVICE_BB5_ENDPOINT_IDS[1] +ENDPOINT_ID_BB5_2_1 = DEVICE_BB5_ENDPOINT_IDS[2] +ENDPOINT_ID_BB5_2_2 = DEVICE_BB5_ENDPOINT_IDS[3] +ENDPOINT_ID_BB5_2_3 = DEVICE_BB5_ENDPOINT_IDS[4] +DEVICE_BB5_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_BB5_ENDPOINT_DEFS) + +DEVICE_BB6_UUID = 'BB6' +DEVICE_BB6_ENDPOINT_DEFS = [('2/1', 'copper', []), ('2/2', 'copper', []), ('2/3', 'copper', [])] +DEVICE_BB6_ID = json_device_id(DEVICE_BB6_UUID) +DEVICE_BB6_ENDPOINT_IDS = json_endpoint_ids(DEVICE_BB6_ID, DEVICE_BB6_ENDPOINT_DEFS) +DEVICE_BB6 = json_device_emulated_packet_router_disabled(DEVICE_BB6_UUID) +ENDPOINT_ID_BB6_2_1 = DEVICE_BB6_ENDPOINT_IDS[0] +ENDPOINT_ID_BB6_2_2 = DEVICE_BB6_ENDPOINT_IDS[1] +ENDPOINT_ID_BB6_2_3 = DEVICE_BB6_ENDPOINT_IDS[2] +DEVICE_BB6_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_BB6_ENDPOINT_DEFS) + +DEVICE_BB7_UUID = 'BB7' +DEVICE_BB7_ENDPOINT_DEFS = [('2/1', 'copper', []), ('2/2', 'copper', []), ('2/3', 'copper', []), ('2/4', 'copper', []), + ('2/5', 'copper', []), ('2/6', 'copper', [])] +DEVICE_BB7_ID = json_device_id(DEVICE_BB7_UUID) +DEVICE_BB7_ENDPOINT_IDS = json_endpoint_ids(DEVICE_BB7_ID, DEVICE_BB7_ENDPOINT_DEFS) +DEVICE_BB7 = json_device_emulated_packet_router_disabled(DEVICE_BB7_UUID) +ENDPOINT_ID_BB7_2_1 = DEVICE_BB7_ENDPOINT_IDS[0] +ENDPOINT_ID_BB7_2_2 = DEVICE_BB7_ENDPOINT_IDS[1] +ENDPOINT_ID_BB7_2_3 = DEVICE_BB7_ENDPOINT_IDS[2] +ENDPOINT_ID_BB7_2_4 = DEVICE_BB7_ENDPOINT_IDS[3] +ENDPOINT_ID_BB7_2_5 = DEVICE_BB7_ENDPOINT_IDS[4] +ENDPOINT_ID_BB7_2_6 = DEVICE_BB7_ENDPOINT_IDS[5] +DEVICE_BB7_CONNECT_RULES = json_device_emulated_connect_rules(DEVICE_BB7_ENDPOINT_DEFS) + + +# ----- Links ---------------------------------------------------------------------------------------------------------- +LINK_CE1_PE1_UUID, LINK_CE1_PE1_ID, LINK_CE1_PE1 = link(ENDPOINT_ID_CE1_1_1, ENDPOINT_ID_PE1_1_1) +LINK_CE2_PE2_UUID, LINK_CE2_PE2_ID, LINK_CE2_PE2 = link(ENDPOINT_ID_CE2_1_1, ENDPOINT_ID_PE2_1_1) +LINK_CE3_PE3_UUID, LINK_CE3_PE3_ID, LINK_CE3_PE3 = link(ENDPOINT_ID_CE3_1_1, ENDPOINT_ID_PE3_1_1) +LINK_CE4_PE4_UUID, LINK_CE4_PE4_ID, LINK_CE4_PE4 = link(ENDPOINT_ID_CE4_1_1, ENDPOINT_ID_PE4_1_1) + +LINK_PE1_BB1_UUID, LINK_PE1_BB1_ID, LINK_PE1_BB1 = link(ENDPOINT_ID_PE1_2_1, ENDPOINT_ID_BB1_1_1) +LINK_PE1_BB2_UUID, LINK_PE1_BB2_ID, LINK_PE1_BB2 = link(ENDPOINT_ID_PE1_2_2, ENDPOINT_ID_BB2_1_1) +LINK_PE2_BB1_UUID, LINK_PE2_BB1_ID, LINK_PE2_BB1 = link(ENDPOINT_ID_PE2_2_1, ENDPOINT_ID_BB1_1_2) +LINK_PE2_BB2_UUID, LINK_PE2_BB2_ID, LINK_PE2_BB2 = link(ENDPOINT_ID_PE2_2_2, ENDPOINT_ID_BB2_1_2) + +LINK_PE3_BB4_UUID, LINK_PE3_BB4_ID, LINK_PE3_BB4 = link(ENDPOINT_ID_PE3_2_1, ENDPOINT_ID_BB4_1_1) +LINK_PE3_BB5_UUID, LINK_PE3_BB5_ID, LINK_PE3_BB5 = link(ENDPOINT_ID_PE3_2_2, ENDPOINT_ID_BB5_1_1) +LINK_PE4_BB4_UUID, LINK_PE4_BB4_ID, LINK_PE4_BB4 = link(ENDPOINT_ID_PE4_2_1, ENDPOINT_ID_BB4_1_2) +LINK_PE4_BB5_UUID, LINK_PE4_BB5_ID, LINK_PE4_BB5 = link(ENDPOINT_ID_PE4_2_2, ENDPOINT_ID_BB5_1_2) + +LINK_BB1_BB2_UUID, LINK_BB1_BB2_ID, LINK_BB1_BB2 = link(ENDPOINT_ID_BB1_2_1, ENDPOINT_ID_BB2_2_2) +LINK_BB2_BB3_UUID, LINK_BB2_BB3_ID, LINK_BB2_BB3 = link(ENDPOINT_ID_BB2_2_1, ENDPOINT_ID_BB3_2_2) +LINK_BB3_BB4_UUID, LINK_BB3_BB4_ID, LINK_BB3_BB4 = link(ENDPOINT_ID_BB3_2_1, ENDPOINT_ID_BB4_2_2) +LINK_BB4_BB5_UUID, LINK_BB4_BB5_ID, LINK_BB4_BB5 = link(ENDPOINT_ID_BB4_2_1, ENDPOINT_ID_BB5_2_2) +LINK_BB5_BB6_UUID, LINK_BB5_BB6_ID, LINK_BB5_BB6 = link(ENDPOINT_ID_BB5_2_1, ENDPOINT_ID_BB6_2_2) +LINK_BB6_BB1_UUID, LINK_BB6_BB1_ID, LINK_BB6_BB1 = link(ENDPOINT_ID_BB6_2_1, ENDPOINT_ID_BB1_2_2) + +LINK_BB1_BB7_UUID, LINK_BB1_BB7_ID, LINK_BB1_BB7 = link(ENDPOINT_ID_BB1_2_3, ENDPOINT_ID_BB7_2_1) +LINK_BB2_BB7_UUID, LINK_BB2_BB7_ID, LINK_BB2_BB7 = link(ENDPOINT_ID_BB2_2_3, ENDPOINT_ID_BB7_2_2) +LINK_BB3_BB7_UUID, LINK_BB3_BB7_ID, LINK_BB3_BB7 = link(ENDPOINT_ID_BB3_2_3, ENDPOINT_ID_BB7_2_3) +LINK_BB4_BB7_UUID, LINK_BB4_BB7_ID, LINK_BB4_BB7 = link(ENDPOINT_ID_BB4_2_3, ENDPOINT_ID_BB7_2_4) +LINK_BB5_BB7_UUID, LINK_BB5_BB7_ID, LINK_BB5_BB7 = link(ENDPOINT_ID_BB5_2_3, ENDPOINT_ID_BB7_2_5) +LINK_BB6_BB7_UUID, LINK_BB6_BB7_ID, LINK_BB6_BB7 = link(ENDPOINT_ID_BB6_2_3, ENDPOINT_ID_BB7_2_6) + + +# ----- WIM Service Settings ------------------------------------------------------------------------------------------- +WIM_USERNAME = 'admin' +WIM_PASSWORD = 'admin' + +def mapping(site_id, ce_endpoint_id, pe_device_id, priority=None, redundant=[]): + ce_device_uuid = ce_endpoint_id['device_id']['device_uuid']['uuid'] + ce_endpoint_uuid = ce_endpoint_id['endpoint_uuid']['uuid'] + pe_device_uuid = pe_device_id['device_uuid']['uuid'] + service_endpoint_id = '{:s}-{:s}-{:s}'.format(site_id, ce_device_uuid, ce_endpoint_uuid) + bearer = '{:s}-{:s}'.format(ce_device_uuid, pe_device_uuid) + _mapping = { + 'service_endpoint_id': service_endpoint_id, + 'datacenter_id': site_id, 'device_id': ce_device_uuid, 'device_interface_id': ce_endpoint_uuid, + 'service_mapping_info': { + 'site-id': site_id, + 'bearer': {'bearer-reference': bearer}, + } + } + if priority is not None: _mapping['service_mapping_info']['priority'] = priority + if len(redundant) > 0: _mapping['service_mapping_info']['redundant'] = redundant + return service_endpoint_id, _mapping + +WIM_SEP_DC1_PRI, WIM_MAP_DC1_PRI = mapping('DC1', ENDPOINT_ID_CE1_1_1, DEVICE_PE1_ID, priority=10, redundant=['DC1-CE2-1/1']) +WIM_SEP_DC1_SEC, WIM_MAP_DC1_SEC = mapping('DC1', ENDPOINT_ID_CE2_1_1, DEVICE_PE2_ID, priority=20) +WIM_SEP_DC2_PRI, WIM_MAP_DC2_PRI = mapping('DC2', ENDPOINT_ID_CE3_1_1, DEVICE_PE3_ID, priority=10, redundant=['DC2-CE4-1/1']) +WIM_SEP_DC2_SEC, WIM_MAP_DC2_SEC = mapping('DC2', ENDPOINT_ID_CE4_1_1, DEVICE_PE4_ID, priority=20) + +WIM_MAPPING = [WIM_MAP_DC1_PRI, WIM_MAP_DC1_SEC, WIM_MAP_DC2_PRI, WIM_MAP_DC2_SEC] + +WIM_SRV_VLAN_ID = 300 +WIM_SERVICE_TYPE = 'ELINE' +WIM_SERVICE_CONNECTION_POINTS = [ + {'service_endpoint_id': WIM_SEP_DC1_PRI, + 'service_endpoint_encapsulation_type': 'dot1q', + 'service_endpoint_encapsulation_info': {'vlan': WIM_SRV_VLAN_ID}}, + {'service_endpoint_id': WIM_SEP_DC2_PRI, + 'service_endpoint_encapsulation_type': 'dot1q', + 'service_endpoint_encapsulation_info': {'vlan': WIM_SRV_VLAN_ID}}, +] + + +# ----- Object Collections --------------------------------------------------------------------------------------------- + +CONTEXTS = [CONTEXT] +TOPOLOGIES = [TOPOLOGY] + +DEVICES = [ + (DEVICE_CE1, DEVICE_CE1_CONNECT_RULES), + (DEVICE_CE2, DEVICE_CE2_CONNECT_RULES), + (DEVICE_CE3, DEVICE_CE3_CONNECT_RULES), + (DEVICE_CE4, DEVICE_CE4_CONNECT_RULES), + + (DEVICE_PE1, DEVICE_PE1_CONNECT_RULES), + (DEVICE_PE2, DEVICE_PE2_CONNECT_RULES), + (DEVICE_PE3, DEVICE_PE3_CONNECT_RULES), + (DEVICE_PE4, DEVICE_PE4_CONNECT_RULES), + + (DEVICE_BB1, DEVICE_BB1_CONNECT_RULES), + (DEVICE_BB2, DEVICE_BB2_CONNECT_RULES), + (DEVICE_BB6, DEVICE_BB6_CONNECT_RULES), + (DEVICE_BB7, DEVICE_BB7_CONNECT_RULES), + (DEVICE_BB3, DEVICE_BB3_CONNECT_RULES), + (DEVICE_BB5, DEVICE_BB5_CONNECT_RULES), + (DEVICE_BB4, DEVICE_BB4_CONNECT_RULES), +] + +LINKS = [ + LINK_CE1_PE1, LINK_CE2_PE2, LINK_CE3_PE3, LINK_CE4_PE4, + LINK_PE1_BB1, LINK_PE1_BB2, LINK_PE2_BB1, LINK_PE2_BB2, + LINK_PE3_BB5, LINK_PE3_BB4, LINK_PE4_BB5, LINK_PE4_BB4, + LINK_BB1_BB2, LINK_BB2_BB3, LINK_BB3_BB4, LINK_BB4_BB5, LINK_BB5_BB6, LINK_BB6_BB1, + LINK_BB1_BB7, LINK_BB2_BB7, LINK_BB3_BB7, LINK_BB4_BB7, LINK_BB5_BB7, LINK_BB6_BB7, +] diff --git a/src/tests/ecoc22/tests/Tools.py b/src/tests/ecoc22/tests/Tools.py new file mode 100644 index 000000000..33205da9b --- /dev/null +++ b/src/tests/ecoc22/tests/Tools.py @@ -0,0 +1,36 @@ +from typing import Dict, List, Tuple +from common.tools.object_factory.EndPoint import json_endpoint, json_endpoint_id +from common.tools.object_factory.Link import json_link, json_link_id + +def json_endpoint_ids(device_id : Dict, endpoint_descriptors : List[Tuple[str, str, List[int]]]): + return [ + json_endpoint_id(device_id, ep_uuid, topology_id=None) + for ep_uuid, _, _ in endpoint_descriptors + ] + +def json_endpoints(device_id : Dict, endpoint_descriptors : List[Tuple[str, str, List[int]]]): + return [ + json_endpoint(device_id, ep_uuid, ep_type, topology_id=None, kpi_sample_types=ep_sample_types) + for ep_uuid, ep_type, ep_sample_types in endpoint_descriptors + ] + +def get_link_uuid(a_endpoint_id : Dict, z_endpoint_id : Dict) -> str: + return '{:s}/{:s}=={:s}/{:s}'.format( + a_endpoint_id['device_id']['device_uuid']['uuid'], a_endpoint_id['endpoint_uuid']['uuid'], + a_endpoint_id['device_id']['device_uuid']['uuid'], z_endpoint_id['endpoint_uuid']['uuid']) + +def link(a_endpoint_id, z_endpoint_id) -> Tuple[str, Dict, Dict]: + link_uuid = get_link_uuid(a_endpoint_id, z_endpoint_id) + link_id = json_link_id(link_uuid) + link_data = json_link(link_uuid, [a_endpoint_id, z_endpoint_id]) + return link_uuid, link_id, link_data + +def compose_service_endpoint_id(endpoint_id): + device_uuid = endpoint_id['device_id']['device_uuid']['uuid'] + endpoint_uuid = endpoint_id['endpoint_uuid']['uuid'] + return ':'.join([device_uuid, endpoint_uuid]) + +def compose_bearer(endpoint_id): + device_uuid = endpoint_id['device_id']['device_uuid']['uuid'] + endpoint_uuid = endpoint_id['endpoint_uuid']['uuid'] + return ':'.join([device_uuid, endpoint_uuid]) diff --git a/src/tests/ecoc22/tests/__init__.py b/src/tests/ecoc22/tests/__init__.py new file mode 100644 index 000000000..70a332512 --- /dev/null +++ b/src/tests/ecoc22/tests/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/tests/ecoc22/tests/test_functional_bootstrap.py b/src/tests/ecoc22/tests/test_functional_bootstrap.py new file mode 100644 index 000000000..7626ce304 --- /dev/null +++ b/src/tests/ecoc22/tests/test_functional_bootstrap.py @@ -0,0 +1,146 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy, logging, pytest +from common.Settings import get_setting +from context.client.ContextClient import ContextClient +from context.proto.context_pb2 import Context, ContextId, Device, Empty, Link, Topology +from device.client.DeviceClient import DeviceClient +from .Objects import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES + + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + + +@pytest.fixture(scope='session') +def context_client(): + _client = ContextClient(get_setting('CONTEXTSERVICE_SERVICE_HOST'), get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC')) + yield _client + _client.close() + + +@pytest.fixture(scope='session') +def device_client(): + _client = DeviceClient(get_setting('DEVICESERVICE_SERVICE_HOST'), get_setting('DEVICESERVICE_SERVICE_PORT_GRPC')) + yield _client + _client.close() + + +def test_scenario_empty(context_client : ContextClient): # pylint: disable=redefined-outer-name + # ----- List entities - Ensure database is empty ------------------------------------------------------------------- + response = context_client.ListContexts(Empty()) + assert len(response.contexts) == 0 + + response = context_client.ListDevices(Empty()) + assert len(response.devices) == 0 + + response = context_client.ListLinks(Empty()) + assert len(response.links) == 0 + + +def test_prepare_scenario(context_client : ContextClient): # pylint: disable=redefined-outer-name + + # ----- Create Contexts and Topologies ----------------------------------------------------------------------------- + for context in CONTEXTS: + context_uuid = context['context_id']['context_uuid']['uuid'] + LOGGER.info('Adding Context {:s}'.format(context_uuid)) + response = context_client.SetContext(Context(**context)) + assert response.context_uuid.uuid == context_uuid + + for topology in TOPOLOGIES: + context_uuid = topology['topology_id']['context_id']['context_uuid']['uuid'] + topology_uuid = topology['topology_id']['topology_uuid']['uuid'] + LOGGER.info('Adding Topology {:s}/{:s}'.format(context_uuid, topology_uuid)) + response = context_client.SetTopology(Topology(**topology)) + assert response.context_id.context_uuid.uuid == context_uuid + assert response.topology_uuid.uuid == topology_uuid + + +def test_scenario_ready(context_client : ContextClient): # pylint: disable=redefined-outer-name + # ----- List entities - Ensure scenario is ready ------------------------------------------------------------------- + response = context_client.ListContexts(Empty()) + assert len(response.contexts) == len(CONTEXTS) + + response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) + assert len(response.topologies) == len(TOPOLOGIES) + + response = context_client.ListDevices(Empty()) + assert len(response.devices) == 0 + + response = context_client.ListLinks(Empty()) + assert len(response.links) == 0 + + response = context_client.ListServices(ContextId(**CONTEXT_ID)) + assert len(response.services) == 0 + + +def test_devices_bootstraping( + context_client : ContextClient, device_client : DeviceClient): # pylint: disable=redefined-outer-name + + # ----- Create Devices and Validate Collected Events --------------------------------------------------------------- + for device, connect_rules in DEVICES: + device_uuid = device['device_id']['device_uuid']['uuid'] + LOGGER.info('Adding Device {:s}'.format(device_uuid)) + + device_with_connect_rules = copy.deepcopy(device) + device_with_connect_rules['device_config']['config_rules'].extend(connect_rules) + response = device_client.AddDevice(Device(**device_with_connect_rules)) + assert response.device_uuid.uuid == device_uuid + + +def test_devices_bootstrapped(context_client : ContextClient): # pylint: disable=redefined-outer-name + # ----- List entities - Ensure bevices are created ----------------------------------------------------------------- + response = context_client.ListContexts(Empty()) + assert len(response.contexts) == len(CONTEXTS) + + response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) + assert len(response.topologies) == len(TOPOLOGIES) + + response = context_client.ListDevices(Empty()) + assert len(response.devices) == len(DEVICES) + + response = context_client.ListLinks(Empty()) + assert len(response.links) == 0 + + response = context_client.ListServices(ContextId(**CONTEXT_ID)) + assert len(response.services) == 0 + + +def test_links_creation(context_client : ContextClient): # pylint: disable=redefined-outer-name + + # ----- Create Links and Validate Collected Events ----------------------------------------------------------------- + for link in LINKS: + link_uuid = link['link_id']['link_uuid']['uuid'] + LOGGER.info('Adding Link {:s}'.format(link_uuid)) + response = context_client.SetLink(Link(**link)) + assert response.link_uuid.uuid == link_uuid + + +def test_links_created(context_client : ContextClient): # pylint: disable=redefined-outer-name + # ----- List entities - Ensure links are created ------------------------------------------------------------------- + response = context_client.ListContexts(Empty()) + assert len(response.contexts) == len(CONTEXTS) + + response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) + assert len(response.topologies) == len(TOPOLOGIES) + + response = context_client.ListDevices(Empty()) + assert len(response.devices) == len(DEVICES) + + response = context_client.ListLinks(Empty()) + assert len(response.links) == len(LINKS) + + response = context_client.ListServices(ContextId(**CONTEXT_ID)) + assert len(response.services) == 0 diff --git a/src/tests/ecoc22/tests/test_functional_cleanup.py b/src/tests/ecoc22/tests/test_functional_cleanup.py new file mode 100644 index 000000000..eb78a5850 --- /dev/null +++ b/src/tests/ecoc22/tests/test_functional_cleanup.py @@ -0,0 +1,123 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, pytest +from common.Settings import get_setting +from common.tests.EventTools import EVENT_REMOVE, check_events +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Device import json_device_id +from common.tools.object_factory.Link import json_link_id +from common.tools.object_factory.Topology import json_topology_id +from context.client.ContextClient import ContextClient +from context.client.EventsCollector import EventsCollector +from context.proto.context_pb2 import ContextId, DeviceId, Empty, LinkId, TopologyId +from device.client.DeviceClient import DeviceClient +from .Objects import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + + +@pytest.fixture(scope='session') +def context_client(): + _client = ContextClient(get_setting('CONTEXTSERVICE_SERVICE_HOST'), get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC')) + yield _client + _client.close() + + +@pytest.fixture(scope='session') +def device_client(): + _client = DeviceClient(get_setting('DEVICESERVICE_SERVICE_HOST'), get_setting('DEVICESERVICE_SERVICE_PORT_GRPC')) + yield _client + _client.close() + + +def test_services_removed(context_client : ContextClient): # pylint: disable=redefined-outer-name + # ----- List entities - Ensure service is removed ------------------------------------------------------------------ + response = context_client.ListContexts(Empty()) + assert len(response.contexts) == len(CONTEXTS) + + response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) + assert len(response.topologies) == len(TOPOLOGIES) + + response = context_client.ListDevices(Empty()) + assert len(response.devices) == len(DEVICES) + + response = context_client.ListLinks(Empty()) + assert len(response.links) == len(LINKS) + + response = context_client.ListServices(ContextId(**CONTEXT_ID)) + assert len(response.services) == 0 + + +def test_scenario_cleanup( + context_client : ContextClient, device_client : DeviceClient): # pylint: disable=redefined-outer-name + + # ----- Start the EventsCollector ---------------------------------------------------------------------------------- + events_collector = EventsCollector(context_client) + events_collector.start() + + expected_events = [] + + # ----- Delete Links and Validate Collected Events ----------------------------------------------------------------- + for link in LINKS: + link_id = link['link_id'] + link_uuid = link_id['link_uuid']['uuid'] + LOGGER.info('Deleting Link {:s}'.format(link_uuid)) + context_client.RemoveLink(LinkId(**link_id)) + expected_events.append(('LinkEvent', EVENT_REMOVE, json_link_id(link_uuid))) + + # ----- Delete Devices and Validate Collected Events --------------------------------------------------------------- + for device, _ in DEVICES: + device_id = device['device_id'] + device_uuid = device_id['device_uuid']['uuid'] + LOGGER.info('Deleting Device {:s}'.format(device_uuid)) + device_client.DeleteDevice(DeviceId(**device_id)) + expected_events.append(('DeviceEvent', EVENT_REMOVE, json_device_id(device_uuid))) + + # ----- Delete Topologies and Validate Collected Events ------------------------------------------------------------ + for topology in TOPOLOGIES: + topology_id = topology['topology_id'] + context_uuid = topology_id['context_id']['context_uuid']['uuid'] + topology_uuid = topology_id['topology_uuid']['uuid'] + LOGGER.info('Deleting Topology {:s}/{:s}'.format(context_uuid, topology_uuid)) + context_client.RemoveTopology(TopologyId(**topology_id)) + context_id = json_context_id(context_uuid) + expected_events.append(('TopologyEvent', EVENT_REMOVE, json_topology_id(topology_uuid, context_id=context_id))) + + # ----- Delete Contexts and Validate Collected Events -------------------------------------------------------------- + for context in CONTEXTS: + context_id = context['context_id'] + context_uuid = context_id['context_uuid']['uuid'] + LOGGER.info('Deleting Context {:s}'.format(context_uuid)) + context_client.RemoveContext(ContextId(**context_id)) + expected_events.append(('ContextEvent', EVENT_REMOVE, json_context_id(context_uuid))) + + # ----- Validate Collected Events ---------------------------------------------------------------------------------- + check_events(events_collector, expected_events) + + # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- + events_collector.stop() + + +def test_scenario_empty_again(context_client : ContextClient): # pylint: disable=redefined-outer-name + # ----- List entities - Ensure database is empty again ------------------------------------------------------------- + response = context_client.ListContexts(Empty()) + assert len(response.contexts) == 0 + + response = context_client.ListDevices(Empty()) + assert len(response.devices) == 0 + + response = context_client.ListLinks(Empty()) + assert len(response.links) == 0 diff --git a/src/tests/ecoc22/tests/test_functional_create_service.py b/src/tests/ecoc22/tests/test_functional_create_service.py new file mode 100644 index 000000000..58d87dada --- /dev/null +++ b/src/tests/ecoc22/tests/test_functional_create_service.py @@ -0,0 +1,89 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, pytest +from common.Settings import get_setting +from common.tools.grpc.Tools import grpc_message_to_json_string +from compute.tests.mock_osm.MockOSM import MockOSM +from context.client.ContextClient import ContextClient +from context.proto.context_pb2 import ContextId, Empty +from .Objects import ( + CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_MAPPING, WIM_PASSWORD, WIM_SERVICE_CONNECTION_POINTS, + WIM_SERVICE_TYPE, WIM_USERNAME) + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + + +@pytest.fixture(scope='session') +def context_client(): + _client = ContextClient() + yield _client + _client.close() + + +@pytest.fixture(scope='session') +def osm_wim(): + wim_url = 'http://{:s}:{:s}'.format( + get_setting('COMPUTESERVICE_SERVICE_HOST'), str(get_setting('COMPUTESERVICE_SERVICE_PORT_HTTP'))) + return MockOSM(wim_url, WIM_MAPPING, WIM_USERNAME, WIM_PASSWORD) + + +def test_scenario_is_correct(context_client : ContextClient): # pylint: disable=redefined-outer-name + # ----- List entities - Ensure links are created ------------------------------------------------------------------- + response = context_client.ListContexts(Empty()) + assert len(response.contexts) == len(CONTEXTS) + + response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) + assert len(response.topologies) == len(TOPOLOGIES) + + response = context_client.ListDevices(Empty()) + assert len(response.devices) == len(DEVICES) + + response = context_client.ListLinks(Empty()) + assert len(response.links) == len(LINKS) + + response = context_client.ListServices(ContextId(**CONTEXT_ID)) + assert len(response.services) == 0 + + +def test_service_creation(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name + # ----- Create Service --------------------------------------------------------------------------------------------- + service_uuid = osm_wim.create_connectivity_service(WIM_SERVICE_TYPE, WIM_SERVICE_CONNECTION_POINTS) + osm_wim.get_connectivity_service_status(service_uuid) + + +def test_scenario_service_created(context_client : ContextClient): # pylint: disable=redefined-outer-name + # ----- List entities - Ensure service is created ------------------------------------------------------------------ + response = context_client.ListContexts(Empty()) + assert len(response.contexts) == len(CONTEXTS) + + response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) + assert len(response.topologies) == len(TOPOLOGIES) + + response = context_client.ListDevices(Empty()) + assert len(response.devices) == len(DEVICES) + + response = context_client.ListLinks(Empty()) + assert len(response.links) == len(LINKS) + + response = context_client.ListServices(ContextId(**CONTEXT_ID)) + LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) + assert len(response.services) == 1 # L3NM + for service in response.services: + service_id = service.service_id + response = context_client.ListConnections(service_id) + LOGGER.info(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( + grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response))) + assert len(response.connections) == 2 # 2 connections per service (primary + backup) diff --git a/src/tests/ecoc22/tests/test_functional_delete_service.py b/src/tests/ecoc22/tests/test_functional_delete_service.py new file mode 100644 index 000000000..51e91a596 --- /dev/null +++ b/src/tests/ecoc22/tests/test_functional_delete_service.py @@ -0,0 +1,134 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, pytest +from common.DeviceTypes import DeviceTypeEnum +from common.Settings import get_setting +from common.tests.EventTools import EVENT_REMOVE, EVENT_UPDATE, check_events +from common.tools.object_factory.Connection import json_connection_id +from common.tools.object_factory.Device import json_device_id +from common.tools.object_factory.Service import json_service_id +from common.tools.grpc.Tools import grpc_message_to_json_string +from compute.tests.mock_osm.MockOSM import MockOSM +from context.client.ContextClient import ContextClient +from context.client.EventsCollector import EventsCollector +from context.proto.context_pb2 import ContextId, Empty +from .Objects import ( + CONTEXT_ID, CONTEXTS, DEVICE_O1_UUID, DEVICE_R1_UUID, DEVICE_R3_UUID, DEVICES, LINKS, TOPOLOGIES, WIM_MAPPING, + WIM_PASSWORD, WIM_USERNAME) + + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +DEVTYPE_EMU_PR = DeviceTypeEnum.EMULATED_PACKET_ROUTER.value +DEVTYPE_EMU_OLS = DeviceTypeEnum.EMULATED_OPTICAL_LINE_SYSTEM.value + + +@pytest.fixture(scope='session') +def context_client(): + _client = ContextClient(get_setting('CONTEXTSERVICE_SERVICE_HOST'), get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC')) + yield _client + _client.close() + + +@pytest.fixture(scope='session') +def osm_wim(): + wim_url = 'http://{:s}:{:s}'.format( + get_setting('COMPUTESERVICE_SERVICE_HOST'), str(get_setting('COMPUTESERVICE_SERVICE_PORT_HTTP'))) + return MockOSM(wim_url, WIM_MAPPING, WIM_USERNAME, WIM_PASSWORD) + + +def test_scenario_is_correct(context_client : ContextClient): # pylint: disable=redefined-outer-name + # ----- List entities - Ensure service is created ------------------------------------------------------------------ + response = context_client.ListContexts(Empty()) + assert len(response.contexts) == len(CONTEXTS) + + response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) + assert len(response.topologies) == len(TOPOLOGIES) + + response = context_client.ListDevices(Empty()) + assert len(response.devices) == len(DEVICES) + + response = context_client.ListLinks(Empty()) + assert len(response.links) == len(LINKS) + + response = context_client.ListServices(ContextId(**CONTEXT_ID)) + LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) + assert len(response.services) == 2 # L3NM + TAPI + for service in response.services: + service_id = service.service_id + response = context_client.ListConnections(service_id) + LOGGER.info(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( + grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response))) + assert len(response.connections) == 1 # one connection per service + + +def test_service_removal(context_client : ContextClient, osm_wim : MockOSM): # pylint: disable=redefined-outer-name + # ----- Start the EventsCollector ---------------------------------------------------------------------------------- + events_collector = EventsCollector(context_client, log_events_received=True) + events_collector.start() + + # ----- Delete Service --------------------------------------------------------------------------------------------- + response = context_client.ListServiceIds(ContextId(**CONTEXT_ID)) + LOGGER.info('Services[{:d}] = {:s}'.format(len(response.service_ids), grpc_message_to_json_string(response))) + assert len(response.service_ids) == 2 # L3NM + TAPI + service_uuids = set() + for service_id in response.service_ids: + service_uuid = service_id.service_uuid.uuid + if service_uuid.endswith(':optical'): continue + service_uuids.add(service_uuid) + osm_wim.conn_info[service_uuid] = {} + + assert len(service_uuids) == 1 # assume a single service has been created + service_uuid = set(service_uuids).pop() + + osm_wim.delete_connectivity_service(service_uuid) + + # ----- Validate collected events ---------------------------------------------------------------------------------- + packet_connection_uuid = '{:s}:{:s}'.format(service_uuid, DEVTYPE_EMU_PR) + optical_connection_uuid = '{:s}:optical:{:s}'.format(service_uuid, DEVTYPE_EMU_OLS) + optical_service_uuid = '{:s}:optical'.format(service_uuid) + + expected_events = [ + ('ConnectionEvent', EVENT_REMOVE, json_connection_id(packet_connection_uuid)), + ('DeviceEvent', EVENT_UPDATE, json_device_id(DEVICE_R1_UUID)), + ('DeviceEvent', EVENT_UPDATE, json_device_id(DEVICE_R3_UUID)), + ('ServiceEvent', EVENT_REMOVE, json_service_id(service_uuid, context_id=CONTEXT_ID)), + ('ConnectionEvent', EVENT_REMOVE, json_connection_id(optical_connection_uuid)), + ('DeviceEvent', EVENT_UPDATE, json_device_id(DEVICE_O1_UUID)), + ('ServiceEvent', EVENT_REMOVE, json_service_id(optical_service_uuid, context_id=CONTEXT_ID)), + ] + check_events(events_collector, expected_events) + + # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- + events_collector.stop() + + +def test_services_removed(context_client : ContextClient): # pylint: disable=redefined-outer-name + # ----- List entities - Ensure service is removed ------------------------------------------------------------------ + response = context_client.ListContexts(Empty()) + assert len(response.contexts) == len(CONTEXTS) + + response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) + assert len(response.topologies) == len(TOPOLOGIES) + + response = context_client.ListDevices(Empty()) + assert len(response.devices) == len(DEVICES) + + response = context_client.ListLinks(Empty()) + assert len(response.links) == len(LINKS) + + response = context_client.ListServices(ContextId(**CONTEXT_ID)) + assert len(response.services) == 0 diff --git a/src/tests/oeccpsc22/tests/Tools.py b/src/tests/oeccpsc22/tests/Tools.py index a782b6bb3..d26c8ae11 100644 --- a/src/tests/oeccpsc22/tests/Tools.py +++ b/src/tests/oeccpsc22/tests/Tools.py @@ -12,7 +12,7 @@ def json_endpoint_ids(device_id : Dict, endpoint_descriptors : List[Tuple[str, s def get_link_uuid(a_endpoint_id : Dict, z_endpoint_id : Dict) -> str: return '{:s}/{:s}=={:s}/{:s}'.format( a_endpoint_id['device_id']['device_uuid']['uuid'], a_endpoint_id['endpoint_uuid']['uuid'], - a_endpoint_id['device_id']['device_uuid']['uuid'], z_endpoint_id['endpoint_uuid']['uuid']) + z_endpoint_id['device_id']['device_uuid']['uuid'], z_endpoint_id['endpoint_uuid']['uuid']) def compose_service_endpoint_id(endpoint_id): device_uuid = endpoint_id['device_id']['device_uuid']['uuid'] diff --git a/src/tests/ofc22/README.md b/src/tests/ofc22/README.md index 07fd4f72f..bfc06bf0e 100644 --- a/src/tests/ofc22/README.md +++ b/src/tests/ofc22/README.md @@ -22,7 +22,7 @@ __Important:__ - The `./ofc22/deploy_in_kubernetes.sh` assumes you have installed the appropriate development dependencies using the `install_development_dependencies.sh` script. - Before running the scripts in this folder, remember to update the environment variable K8S_HOSTNAME to point to the Kubernetes node you will be using as described in [Wiki: Deploying a TeraFlow OS test instance](https://gitlab.com/teraflow-h2020/controller/-/wikis/Deploying-a-TeraFlow-OS-test-instance). -For your convenience, the configuration s sript `./ofc22/deploy_in_kubernetes.sh` has been already defined. The script will take some minutes to download the dependencies, build the micro-services, deploy them, and leave them ready for operation. The deployment will finish with a report of the items that have been created. +For your convenience, the configuration script `./ofc22/deploy_in_kubernetes.sh` has been already defined. The script will take some minutes to download the dependencies, build the micro-services, deploy them, and leave them ready for operation. The deployment will finish with a report of the items that have been created. ## Access to the WebUI and Dashboard When the deployment completes, you can connect to the TeraFlow OS WebUI and Dashboards as described in [Wiki: Using the WebUI](https://gitlab.com/teraflow-h2020/controller/-/wikis/Using-the-WebUI), or directly navigating to `http://[your-node-ip]:30800` for the WebUI and `http://[your-node-ip]:30300` for the Grafana Dashboard. diff --git a/src/webui/service/static/topology.js b/src/webui/service/static/topology.js index dd58388cd..92a3c70ec 100644 --- a/src/webui/service/static/topology.js +++ b/src/webui/service/static/topology.js @@ -15,6 +15,15 @@ // Based on: // https://www.d3-graph-gallery.com/graph/network_basic.html // https://bl.ocks.org/steveharoz/8c3e2524079a8c440df60c1ab72b5d03 +// https://www.d3indepth.com/zoom-and-pan/ + +// Pan & Zoom does not work; to be reviewed +// +// +// +// +// +// // set the dimensions and margins of the graph const margin = {top: 5, right: 5, bottom: 5, left: 5}; @@ -22,16 +31,24 @@ const margin = {top: 5, right: 5, bottom: 5, left: 5}; const icon_width = 40; const icon_height = 40; -width = 800 - margin.left - margin.right; -height = 500 - margin.top - margin.bottom; +width = 1000 - margin.left - margin.right; +height = 600 - margin.top - margin.bottom; + +//function handleZoom(e) { +// console.dir(e); +// d3.select('svg g').attr('transform', e.transform); +//} +//let zoom = d3.zoom().scaleExtent([0.01, 10]).translateExtent([[0, 0], [width, height]]).on('zoom', handleZoom); // append the svg object to the body of the page const svg = d3.select('#topology') .append('svg') .attr('width', width + margin.left + margin.right) .attr('height', height + margin.top + margin.bottom) + //.call(zoom) .append('g') - .attr('transform', `translate(${margin.left}, ${margin.top})`); + .attr('transform', `translate(${margin.left}, ${margin.top})`) + ; // svg objects var link, node; @@ -146,3 +163,25 @@ d3.select(window).on("resize", function(){ height = +svg.node().getBoundingClientRect().height; simulation.alpha(1).restart(); }); + +///******************** UI ACTIONS *******************/ +// +//function resetZoom() { +// d3.select('svg').transition().call(zoom.scaleTo, 1.0); +//} +//function zoomIn() { +// d3.select('svg').transition().call(zoom.scaleBy, 2.0); +//} +//function zoomOut() { +// d3.select('svg').transition().call(zoom.scaleBy, 0.5); +//} +// +//function center() { +// d3.select('svg').transition().call(zoom.translateTo, 0.5 * width, 0.5 * height); +//} +//function panLeft() { +// d3.select('svg').transition().call(zoom.translateBy, -50, 0); +//} +//function panRight() { +// d3.select('svg').transition().call(zoom.translateBy, 50, 0); +//} diff --git a/src/webui/service/static/topology_icons/Acknowledgements.txt b/src/webui/service/static/topology_icons/Acknowledgements.txt index c646efdec..6329f8233 100644 --- a/src/webui/service/static/topology_icons/Acknowledgements.txt +++ b/src/webui/service/static/topology_icons/Acknowledgements.txt @@ -10,3 +10,6 @@ https://symbols.getvecta.com/stencil_241/224_router.be30fb87e7.png => emu-packet https://symbols.getvecta.com/stencil_240/269_virtual-layer-switch.ed10fdede6.png => optical-line-system.png https://symbols.getvecta.com/stencil_241/281_virtual-layer-switch.29420aff2f.png => emu-optical-line-system.png + +https://symbols.getvecta.com/stencil_240/102_ibm-tower.2cc133f3d0.png => datacenter.png +https://symbols.getvecta.com/stencil_241/133_ibm-tower.995c44696c.png => emu-datacenter.png diff --git a/src/webui/service/static/topology_icons/datacenter.png b/src/webui/service/static/topology_icons/datacenter.png new file mode 100644 index 0000000000000000000000000000000000000000..33818cf87e0f47fb6fd45b45c46f368f62ab78d2 GIT binary patch literal 9417 zcmdUVWmr`2+Aqw|NJ~gajv^g`G)PDaf-np{N=k{elB0rzC?EqO5~4^AFm%IGlok|Z z6s44s6anct5BTqW_Sx6_Uf2F`KJa0!Yt4Fi?)b(1tmq4d+B8(`R76BXG`c#PCPYNU zA_spIWZ=&Akb8baL_9>gnrBS|Z5Pt1lWfdFTKBhm_*d$rQ&UM=!g*U=M98Q_jX$FV zOx|_cYhE}`I9+p^G_#`6--6_YWw+~Ae3GMa42G)hy{EDK`(}?yz*_Bey_@53YWL=zbLMx&_3Aa{`}@W(Q&MPMitf`V z+z*nMqoul*rJR!AEmKh2eT*@A$+=b;Tuy zocD203I+0rFq0M^E$7BHC$#yb`pchOlk?=-C@`6Q;HggtlYwqDXT32hk(MnjT3mfEFA!rea1#p(5q+53C7 zf@W*{=g^obCREn7Vh77Pqp0>uK9i)^VM-g%g1;2E*%U?kxqgF;Z9K|;l=(c%=QX=h zM8fir97D=Jt^T}HHn&1txajAg%>DK-8h8IvDnec(cNpy# zH!tn43+4o#U~7+sU&>!rE$5vT!|yhe4LjC7c+T6JXnBd!M~s$kKB1qj8$&ODS-urS*M987Fn@F4 z7p!Zi*D*RSl$S&G&iB!bQ~5WgZ(6$5F12n?hJ}{e+{+j-M9X)^LEa7%4vrNB?bvoW zpHzvL{EFX~?zzQ;(fJ#dWKYM0vc0mN)*4q?`(;$fvf3o@neThu+UW{OlPVOz56K&$ zYgfwL5k-}?+)KO+@?~C_TszKA$hEC-oBud*m-YR9&N+uYA1U6>xWbF&=Sa!Ys%L zUteypSl(?`DWX8O@IUqb1lEmQs6TP{7Ua!JgPH8f z5;y6(0O5QKm9A~pGI`cp_|Iu9r)Tch&3jRZ1e{_(EjFCS*Qsmnl~zo|1nnC&1^1r2 z<90be-wU#?TYV_`#|JoGue7Cpu%+Q%D+BC2(8&?vfj(8=3rj{Tv&6=#BBqh}4 z8Kl!x{bMlREo+FJyvcWqyDOJPT#&s}TJes<4;i<~fm;tU7ZCU%gbq4l$;8d0i6huf z(@&7pZB9TOPX3)+y-h~!Cga@Lgm=a> z`&*66O9a*eb|H)z_Pm*>Cfdxgp9Gb9GF^o9S<-t9JZyjTe)~^4^U;#KApQy%=C@r# zMY0^;y(!drQ(vzIK#$cnR|CQrU8L_l8WA%pwbJ0}6%sW-lj2MJie!oPyxy@X;`h#o zcOLt&E3p*Od{6`FP(h!5(a(BKhq$Pt!>$)|9>)$~;MJ;CW@1gdUm&ZO$B^hQAu0+H z2s~v@&CPH3QIAAWJ35y(IB4dwe8`LW(s#pq8_3#?sxj#6kuDpEgk=nYA$`cAL5Z2P zb0J}gr~EHmMH4R#{c>WY}a6#Q)U~ zg%K?jV~{#}Kwy7&b51-stMhVDLGqYWH&z zn*1(rJhE2D-zqzM01-ESl80Q2WPV@T*~NqkPidf~mJp3(w7v7+i|l_RTmQejGnxh! zVzVF>aE?9CK$neN)^ijQL`!?;l<_kJ{;|ajjvhmbx&kL4!u)X}-|Z+&p9|cD=UV%u zu({OUU_cc=_BFo)!5S)Ha})NCn-(N2Xb-`TuUI$7mZ}u>0l}k8s2-dzG75s>DqykF zBGAARV-Nmi(6=B`7`0A26)y1@;U1Tw!^+ef=+Ufcc9M?9gS^Od39Umx8V__d?AP|Y zM@{k}j=2+xC8FyR57f{+`uD$3akK>`B_5yr>$3W_c!rJfyl z5eqGgzuNj(?mF*g1Jlo!WQ*&K@|Nh-$bYJzM73vD)cQltiVHK@t>+v85JFEFuXpcK zkyW)}vbY^B?v(Jc4f+STv_T8qZwg$GQ?#ogQR_bv5r$g8%su+-=D* zwbJWa2Tt&e#_ar?42$uuS~y9vkG=-00G^22jRyCkk=$_+TMg&W0ZAf3vaM&oV?>3Q zfTUO+L4ZY{94sn}CPtk4@fI*}^92wLCfH$nfvdL%R~-0{MZ^*VnA_`&3KH*D7)e;@ zK?OWS!YMIk@CUhp%-{ymf4re~0gWiI(ikzqG7cDfJ@sv}*U)|u*S9b=q8S!&_P8&( z3MYg9&af5V$dbe)6CLjMP#Peg%_PmI2t1M992ctLjAZ>qdK7er|1xMKHcn@Be)8H) zj;GX$vP&bc`fM-|?BEq30b^MZ3~4M->gUF0Ra5KNP&3vBS)R_I~Q48%!$s1s2 zc?SVJMNCN0jtf{novuO?-)hOQuG8u3;o?bL6`80+G(aWNMzizLmW-kUa~twTXODyMpz~-@#oT6Lfexkw)onfr z6TO#mYUqe-;_RXr(hXNQ%Y{TnIxbLp)DaL&iWg7Re_4Ix0Fz#I_bG>mJlG zShXVjL|QnVnGa7STm*RsbFoOg($te{DcJuA0$?wU=Tt2?&X#S?C{uLa)XLJ-E#+Bv z(4L)J?z2w{(#^4Y5yw|r7wR2)UwnD%w@fP6;_Z}Et`2sX!;C^m<8FA4nfDHJQ;3YU zFqGbgU`Gs(;2B4D|B{J1BIHH=LFDOzEYb6Q^{RiS&z%(6A&EPmCfo5y#w}dN5wX3t z@PJtuXejJH4K(qdC1x@(@^vUmO@~&1BpJ@cl>!)2!vnGm>^6}13+290+u*Rj6om^O({=ud z%2Obxz|961Oqu|4ucASz>FO!@h68LkdaxRq$Oq}e;15-V#x~%CGRr+q{sk^s>Sc&P zx|VF!$xqy{y$Qi!Ww5Rv7U~D-!dq$Vwi%*{k69gTjrX4^vYVkj^3F&Xz~Ggl>_=A8 z-xGs+cSeR1cbiS&O8&=NZWhJH#Y3>{GMMn$VfNPf^5Ha>41?j*G+sfo0!rUh{6@?z zr7@%qiE?Pf>WC(cdc#Xo1eD#aIV#XqrC*P$p^3TgMIk_5@u1ih@6poHbwQBYQo?g~ z%}OKg?}WF5{Q37XvN@h(XEyR{IFh7OAgZP=t)j6EDmi!3poXq+ufi||$0SUP*fDbpo9TXs;_{p&Mszn#or}kByf^t?idBj;x9{kN3 zvouFa5y%;oWbXms{7XY+?lf#ecx$imT}bB?)dmB+i(nNupD19Ju3V~G%50B-Cr8UIt9ACeupv$ z_U9v|@MMg!m3c885-?k%mU5{H*KqbBqoD2FK|O~VAAtK=3@I*K$E1oI`GT=PCf!Z> z0jaUju+7_;=YRy2U$t<(yr)`x0ErU^Jopz*$va7nSn(qFaf%b4258tLN@D}6Oav!` z|42vuxAX}9P1*wa@VCzRX9h(4-G~hQc)m2=rZre?AivMx>1WVub3L=ZeZczLC&|5X*E2@X*8*54&IatSsfMky z>^HD;7=OiJ^Xfjanmn&T9f)GJbCZtZ2==%wl~O%Nibn-C58##4SMGS+Q*y&AD);L_zlc$ z>;wYq|4Uq!&xGA*yFB99=Zo4UT!Ofb}~ z^m7Vj#Zonva#Vj$R3^Wp@u7ni>^>FSL&bm!#~wD%Y;B`~@+jc@-(kq>ayWMBAXC2+ zOEEQCrZTC~SYhS#%nnUIW%HSYKjmRp=6M7I2`q2CJ`T;!vg8H$v>opY=_( zy1+9>t`Yh9PKPcs5A-2J8gGl~qBmKU03=>M+)}D1jkJ@4_NTX~w>iAjSwAd{OY(qP zwCsnF#wN2MmZK_wUk`~#0r$duy>nh4>ENutU;{;+Ish;F57`5tXPbd|%eYi(1*cC0 z+GrBeY(oGwa)ATSa$NeNVfrY!shIensuG_tcVM4|GD|@ZF|1G_)3x>}hP1}xf20b! zeoFqosOEnSWb&cQ(h{1gi2ProyLCRueo=WPXwf<1d*b}eqVv3%cEoosF>Qu&OC%bR z$4x-_RxQ#^b-X{{e;Iq4u5O!^^*5fW_$!wAk&5_-<<*e?mHv~*GZtG~TBIZ-=!2i- zlNm4cw{2hYThl4v|C}X0ns_cXW0vKVJmL*%BR zyD>3+VePfL0h(BHF#NSAO?W=7Ktsw~Omqg27vbRtG?+}!c9{<`Pq^?qg#gu0aYQl! zIH6fpe?VIg0f`j?Nm+pDxYz&?ih@TFk1VnTcqc+kII5w`HJsL7etT~Lj2+{3FJrLy zzjcc~`p3wt|0`GMYaDU~7soHI=x0bOkvvdL{$yZL0YQAY*-_n2e>XvtdgXJUGv}Po zCg}yR4haqmckX1kQYhYiH3Q(254664D=qFtiX-9WYeadrwUKcDjgPAu)Bms^2T-r> z&BKf5eE6*fAU&HN0QT1x)Is;LA9UaUpy>ZDlS_$Xckx6QxpV?o%72%?08XyR#M7f> zB(BGpALwBe<~3Ppc;Od>`xxtp#;7noN=+S!F2!(LuDR0gCDo!chKpban^=QW+nu|yCPi0*NE1Y2a(-)T|K6%1B z^Zk4E90z-Oc{XR10XMF@cWTZu)c^g+>vJLgC#Ci2QD*FBEw2FtxdTy^iReDPls1rp zF*>f4dLm@AoS#`*^49J_j+NFZV=NRof| zEFRG1pkt6JIpc`_H%PM=>`6`S&>|l;i@#gmA3i1%K@#~;2c-2Uw<3t>fxkH7*8lu` znbR9~^m#u4$!d~RjwiX5F|IwkX@ta03#sO`%-m>-CjF7$^tyC^;4fbK5_G4W_f-jC zL&_O~M6$KLk8Voj&BCRXYSK!-RxuZ#mD&%qk|wd6<>!sYXl&h?iV9+}EpcGty1-mR zDxVc?v2EeE7UL>q*Ppn_2X3eT#`Ti8knOdO7-nJEUDIgS?;^8(@wFD8D&jk;6D7a< zjr0nIUvgwc;9n|yvjtu!;~*Czm!4a-+&wl3)OhQb1@nuOC#TI%DJk>Z#}lx0 zN;A0Q)MnG**Xc%;)-e!lsEU|cP&fL{fiaFy2g`j7pS%%CdH`CsGq%5QOco8;4_yGK z{EqNNXHNtAzpsGrdQ`ZHQ_vD@Ykb z8t4izXAbXAvyl9>cL&8{xG5{uf1l`_p)NhJm^_{XP8MLeFbE{4^uIu;D?EAbr0RDS z2@%$zK-K@na3Pu=#n0!RWa6rQ#3Qwj7YtN)f%607pwXsp>E2suc8apgpSeE^v5xL} zjfrk9S&i^~2wgJL_j~>}RWIKh#%h2MhhQ(Oe}~}50d9h%gmJ6Mub*$i-L?l<_1Y?) zdt22QYxW@Zl;_YZOvb?OUOoZl9IiaqXmL5QldGiVZO$kne2@I4d%@%4qmqM!v}4H_ z!YvuR;94OdNeU3m3U-}f5`sW^p^3JbnO+eNbH4Pz9K6y#GzVWf@SSgpkykh38fIi{ z%CbswAM=C5y3wT?YE}z?gf1B=3qFQVHzyQjPhD#7f{N{$ zeRm95Szg$O9jWszZKxiTiWwTUYn!WRP3Oah+cZu=?7ArfC_UHO7F-2Lgrifs; ztn$K_lJ0f$oTjETEpRNG6dGvpAbqQUdK0Tt(dF7wd2^95I-DIxAma@J0;eUFCE5>outZ zJ$2Df{L;fgvsC?K85i~5TtjQyB!jX+ff?7%L-D_lz|6O?^yxOK`fS}fM-U~kNhVV# zT$(z;M2@&V;&8TifMJRG4=+We+F)ymAC(JtWdQfZ?E#wka5|=%_}odi(m%#^A-=mv zwdjg&sv+%E{=nwC(;3z#&!Hy$HBL_ds)d1We3ebelV{X*(JcNB58Zhl+k~zjy-h?! za{J(0fJ>4nd_x0B%awoa82Q-e*UrDtyUk>+shYUR8)3sv~in+G5^76lhRyNe5Vi4}N2JQvc& z&3{JpR1Px{aMHPUR>CD={M`9LXIV`5e`yl5TaD+TTaPlOJC;;I(Os1 zv02{B*N2K$qTkjD$-QksY!Q)Q3ImC}WKEL`X1Dvl#y(>saH;i9#qa~aiqV6-mFg|? zlj=@)%RK<63gHzXaY`+K>v;so6Dgq^?u9!`te?m1&6zFR@n`MAaTGWrr%Bez--Naf z9NVQ&d;f0dmb0BN@_s~tPZlTMP}Nh`cNlBI&#`JYz3re6!hPqr$u`CWt^A|j26Wxk zcRGd2((QxTv~U11+P@0-s@@kHb;~(Lt%5}n~#8hc)WeGY}MoHV~J$%x0koaUHJL&Cw`c_g+xRJ8*9b{-&mR% z?YQH4Lv8K20;h)pZZQr%pdPz$|7qM^NNBu-aN|)Fjxm8QX>ZewZr4O?`^8hKf=w-n zPd5F{v$8I!gUp|~wbM8-W?#Xh&!dL;`KD?HKDs&hnyUO9&IH@?jFV+Ru7ARGWXaUm zr_-fG$tlAMA#_pY^`vO|ni2DF+l%l!6F0Aph{Kyl7_M=OaFdf4?~d^SqMJI;Hv4u@ zBZn>py=~uyVMNJmfYZ{_)73FLR{3q27P}V*>wK#1=Pbl}Xsv&+h6!9vw?8rdpy}h@ z$Egbt07|~>%02bPu^VdJHF8{e-=H^o`8lg?Met}cZ&q!uJ}T%NlHI? zHLU;(3QJpDUzsw%xCrIwnyC4X z-JR<>@74Ocqg;XG!{v253_B=hRm!&El7(^%b*q@(c_?F;)2nfh)yOLd{aBL{0k3|y dvW|VaS^5zy!R~7>_(X|FSIbbdSRH-y{{W^F_BQ|k literal 0 HcmV?d00001 diff --git a/src/webui/service/static/topology_icons/emu-datacenter.png b/src/webui/service/static/topology_icons/emu-datacenter.png new file mode 100644 index 0000000000000000000000000000000000000000..ed2cc7376b481815edb48fb6faaa025289cfc3ca GIT binary patch literal 7690 zcmeHMdo+}L+kTiF+A(Z}oU-jw+J@5P6j4!jkwZ2HVJ0-}T?c`VQKyYKtDuKT+0UyP-> ziRcfqKR^&9diq!6a}dOb;QoXK!IPk{+x`%w(01DR_;1&or!&IxoovXBL-C=ayMV5wB4sbwXC^k)WVw!7GM zIpYTEuNC|L6zTnpoRRL-G=8Ux7;&(~m*MG#3>l=)94=W`js1ul6s&fz^YedSd0Q`X z?lRKR3p?9dmhdG{O2}M2_T!*~!-VB-Nc925NH_<-+^7A(xh-i690C6G(UxiR&_qZz z@pmKPEbWpAZRDZXdlHQYzRqT=tnIhz+dhPd&^h$GPNa7w*$LU7G#fgm$DVIK;P%$2 z>YfgzS-raZVY2fP>(>+6D(2R`M%4^Hf@afDwD<|#ydzKR(C?jp!Nb#FE^&CUaNQ}~WaDUXENXjs2F_&29wf?QEuD7G(?m*yB*nR_& z)^@$Qm4((m6T@$9JqA%SwoxLGt-wOfL#4<8dVZ*ppIL<5)4H{)4Db4_?2$x^;Iv0q z;0o8KikhYpvsNzV3#rCsqvW@LUTA|u`O$SUN|ANQlAH7?%36YYSFV(r!^O$2r}$?k zq30Fg`KY4$7X1Y}@sV>7OS1Z7tm;&>+?Db5^}N{Zl-yxtb+;hZg6WciL0C^UBcGB$)A7rrvAxWvpKyfz#!82rMdW11@CmX2W= z3UDxzCn)U}hGDAzgluaIVfn$U{e@-X^;fZMBP1tnO>r<}wzjq|Y$$lL;N{VK%RAtq z6V_S9Hx0aE^vgQK^mLPlx`0z46_rtxtW+q;w11<)A>f^?>_j{`3 zyFr1cwdiClVQ0DLXD_!V=i~`05^RzeX3JodiN%sG2jTnrSmd0Ymu}j(cgfE$ z4y~~(wukp&oOGYq)o~NU+)s(_SatcYolnCrDK5XFvMUGo4n4Si#DiH;1LX~tLbjse zEdd>m`dz^xEIQ}*AJ(vguZ-+7EZUu*@AH1x<=O`UM+*A*7cEhO4c-IF%g&N7d*Y5B zWw9Q$N*r0W2pSpvefRBgwdYN{2$H_Pa&)QhR%9)GJ04vRQd7y8e#Jnx>LZ!L5EG>p zuf~sZb<|WDc^u#VjC8l*cKc=52d}q zUAWdeNnPX2;!Ts)6?A;8opVvU8f1=0?^M`Bjdj?w6(U`=>d)Tu zt3J)Q%>tDzDfj5iT{~2mY;|_39fo*OBVKQh4XC!f?#D0xY%~=&mFKdWi`$cv5s+GN zzOuNEGFD zlisS{r*X1i$nvAE~eEk0{9W3o_Euxn=Egkw}oKg39d3kXOY(f2|Pe$|Pi~ zz|^4S(i+&MFGN7OQZO+=lxPr%k1h@sBp^(AsejE>g&P=t>jE!miWn~;Yz#|bCsksB z50#BkON8auz+k8g2LJYCiBTm)ab$V)|M!o@@J-$^f)_n`vcJe?#91hOZ6T}ADR6+L zHdF{lcVaL7BN|``CxS91A&MOFwKkrTm0iRnRlDAg4RN(BC{JrF^|CNJ#-NGd&+c*S z1NGfo9@{?&QRk0zwHyI}l!8ei5p8c{eF!#fR8kTQp+VV6iFQMQ{#7wACTSrngrc1p z#uXOuBQ(gozo^>gQzS0vPkR$iF4g$*|9y2|5|TVbCqXZ|q-I8cvs(zYt1C4RV53U| z{V>?{8i04Vy<3JdTdHybqMlR0*@{t&>s+a4%EaOtLNruz2BBqBA zBLR~Tcka(X2#u_rlye!%7e!4)-s~4bnGehU0z+lDX`o{mO09H0LX!PYbV{xOKP7iL zIYbliPYJvEtosD>f5V|JXzT4BLtxypXGC|PV}hCpNfT=KyBRI7Dhuu-tzZa?4sh7s zo2*YbX%EO&`rM$_De$I24YUSLLbO0d2qh`%X|GF_fW`5tcG{mPO3V18B)XIQczNIb zeG@w=&%~OCJq$E!lQ;J5WJXMt zwk1G}0?anIB9falAyh_;EIKIrHeZ|!smUfnlsX^M)p!OZ$Ag=(PwD|BNFOHU$aYO6 zFNs$jZ!~p=dR#M2v5-Pkb=?u5=Dv7&D-A-)+@xh!?}Ag`9mQD-fJ&(`fjC|OD2Q$_ z58NE|gql)?=I?(FI{!Bg$QWL}`egMB zN3Cg&^`XV+w%TX}X!Coq{adJSbmXN57QG?TjD6ZKknP~*B7m3eAmf^dfU3C7LF2bp z{v~LD)CMonO~nTFad*dpZe{i?|qx&_`;<4U~%dWnM-obroocd-vaSx^duSi%F9n`j==YQf?J zmvvVaN&f=OgW!6RziqT!IViwfC0TAl0nnLOVVlvf+G&88`3n%}6k{VKXj*iBnF%oD#m7Vam2QyJLG#dt zJUn!uNpP{@wpDMtKmHy7pdJ7qerp3r8l1}t>b$W6SAG-n)?^8+rNa0mkem)K?%SQX zNXy6g@Abg)lK8P)Fd=WbV~%t%3xV=`s4gn5E+unuc#|oC`%cEn64O~^@skA)}aBn5?Eth=# zH(6ITvt$P)MiUzzXC9>etq<8y|8(UYn-el9;iI^j8QhM<*$f0;>!bq%S8^=63bvtH z{-isan}u3g>!vdtHk^-H9D+dm=3pv%BPymW^r%!eh`fYQ=Q2orGHUWWKA1pAoWPw8+ z3ZP4H%Uu!)!1!m)LKNwakaZy-$&oU)Wyb*4HEFrb$=3zg9L@+<&*S2TJOJfa<~ENO ziGaNmgU9$b(laO#3tEvp8nA-Eis7}oU3ROQQ1h&loGKX;^8|pxBd40%x_O`gQq0Yl zm*_6Lg(n}>Fb4LNac3hbC{J)Z@(Mrtk~}ztg5b`5tR5XHm+$`R_nI{yY!yKuqHDK-aXJ z{5S*#23Fu#K_^t zssDyb^E%Sn>2WUkW@z)zQY}Qm{P+68Zx(?6e>eATsDFz@?4RX;+YT$mHXC@Zh51Xz z{=Zom8a1~nam_c2lRf}8K&uZz8Fcvm*pGo^(OOBa0W#w?ZLYJ`Uo>CJr3Qb9lJ*>| z38Dq1yXYXUi6wcA!u0 z7lC|l=##!C#kRoVlC`z9y5aiDm|-7SC>c1ay_Lui05kFTsi9R8CG8Sg6t+6)qUh## z3@4vl?hcXAw)^A%ua38Xi+&qteu77YN;BEg1(5y5}1wIDsby(>Y3Eg5f_!*R z!&v}DOEVq{TXNESPk>DY)};kmbMO(IzRX9nl~pRSdFcaLUxC*;B@aIHz1xHk%iP<3 z4jI3HQ_x_MD71^R-9rCrW9F?COj1(-TqX9SjoZ?`=U`O)@%yM)Db4##ILE8czssp0 z{SxEL%9k9&G9$60rRz=F(kF`!BTb9BPH*`4|K;?=DC(6X5{}P9m#O=lBUTp!05)2> zmG-Q)!|7sfTbup| z72gNhv59g)|HN^>pX{9clbnKBNvq2gtKYts^CMp5pOfX~m^;s9siKq8jxY7$>k14K zN=}!iJ0;=ljJD0w3nTHWR0%rR1#C#e7(eQPR?J|;`YM~;8j5AMQLIWYbYsB`A=W9w z{I)n*uwvAZ*hs8I(dIoR)J$46(kF)a2+F}bVcdZg7=?sd<(jGD<|zPah4ICT=Zgh{ ztk9C9Ihnz^1sQOb@8W8}9;FVxsmcZ8xj#EmA&t44u5h9K6=D|2r3mv2tL!yoC zG!Jv(2XB}8{`Rb`sKK+@thsI#2bWi1&2@<9yElXe{4Mir_9bbZ=wXoy(Ew!MWHvx{ zlk060_?dRk=OsCdPa}@9L1qZBPrEBT7-J39xSO7fl>$Wh+7Xe(Bq_6vU2-<7%d8aO(T zD3G2kQB-@~6$7n3TCt7t^XtGK4}z@$SV6k++XiKr1qm^A#G1StufEO!K*TFWMrf2g zJ)2znxlxXN({Saur$A64e5$O=nN1W6MOV)>T#?97M?Y}yy~sxz#|vYk$Qk=dO86p} z#nHvP3RhqL=o(LO)p6|^k>W$Vs1mOqO6yrq>DhHc<{e27+?VX@z9D9?Q$b*<%tS30 z0imu(DMjvMOsU-8@@b3C+(h2fUO--VR)kwfw@RvkCqeU3lTxH3!}D}qX#1nHwMeXf z;xN^KGv%tt>24~Qv#cCmO>wU&v zufCh0d811yGK}t-L1s)QpTG7YDqwZ7OFLOStK%_@C6c%0t?pQQJEz~L>l;nXP~ThH zw(_fUpA4$FaA)67bEo6=yx*PP*Hh&` zIn^NT(*LbzEGy#qfHzGP7Df*Dy#_RsP0;)@*XJ#vEJQg4!&PfvXC=>fyC$670b`8L z919wrB)h?9q}AO}a@to)t<7~G)^QS~P8JwV@lmH626(z6G847+RIKK-Pc9DbkfQ*BuuArIKAW#J_NGQ=M=bal$%|MJB9O@}x$@d~{OR?~tu9t5#{>sN}ZV z%Q3Jx`qa2uSZ$6@9AGKXd zg?5=qR7<8N7w8ak+@ Date: Wed, 27 Jul 2022 11:51:11 +0200 Subject: [PATCH 05/91] Arranged deploy_in_kubernetes.sh script --- deploy_in_kubernetes.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy_in_kubernetes.sh b/deploy_in_kubernetes.sh index b6e0c8c1c..a5e227cd4 100755 --- a/deploy_in_kubernetes.sh +++ b/deploy_in_kubernetes.sh @@ -81,7 +81,7 @@ for COMPONENT in $COMPONENTS; do if [ "$COMPONENT" == "automation" ] || [ "$COMPONENT" == "policy" ]; then docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG" else - docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile ./src/ > "$BUILD_LOG" + docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile . > "$BUILD_LOG" fi if [ -n "$REGISTRY_IMAGE" ]; then -- GitLab From 213c0476ae799234115d316a809e3739f339ece5 Mon Sep 17 00:00:00 2001 From: Lluis Gifre Date: Wed, 27 Jul 2022 18:15:20 +0200 Subject: [PATCH 06/91] ECOC'22 functional test: - removed redundant scripts and files - added missing show_logs_slice.sh script - partially arranged tutorial page --- scripts/show_logs_slice.sh | 27 +++++++ src/tests/ecoc22/expose_services.yaml | 112 -------------------------- src/tests/ecoc22/show_logs_compute.sh | 17 ---- src/tests/ecoc22/show_logs_context.sh | 17 ---- src/tests/ecoc22/show_logs_device.sh | 17 ---- src/tests/ecoc22/show_logs_service.sh | 17 ---- src/tests/ecoc22/show_logs_slice.sh | 17 ---- src/tests/ecoc22/show_logs_webui.sh | 17 ---- tutorial/2-0-run-experiments.md | 2 +- tutorial/2-4-ecoc22.md | 112 +++++++++++++++++++++++++- 10 files changed, 139 insertions(+), 216 deletions(-) create mode 100755 scripts/show_logs_slice.sh delete mode 100644 src/tests/ecoc22/expose_services.yaml delete mode 100755 src/tests/ecoc22/show_logs_compute.sh delete mode 100755 src/tests/ecoc22/show_logs_context.sh delete mode 100755 src/tests/ecoc22/show_logs_device.sh delete mode 100755 src/tests/ecoc22/show_logs_service.sh delete mode 100755 src/tests/ecoc22/show_logs_slice.sh delete mode 100755 src/tests/ecoc22/show_logs_webui.sh diff --git a/scripts/show_logs_slice.sh b/scripts/show_logs_slice.sh new file mode 100755 index 000000000..7c699ce92 --- /dev/null +++ b/scripts/show_logs_slice.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +######################################################################################################################## +# Define your deployment settings here +######################################################################################################################## + +# If not already set, set the name of the Kubernetes namespace to deploy to. +export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs-dev"} + +######################################################################################################################## +# Automated steps start here +######################################################################################################################## + +kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/sliceservice diff --git a/src/tests/ecoc22/expose_services.yaml b/src/tests/ecoc22/expose_services.yaml deleted file mode 100644 index d51438361..000000000 --- a/src/tests/ecoc22/expose_services.yaml +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - ---- -apiVersion: v1 -kind: Service -metadata: - name: contextservice-public - labels: - app: contextservice -spec: - type: NodePort - selector: - app: contextservice - ports: - - name: grpc - protocol: TCP - port: 1010 - targetPort: 1010 - nodePort: 30101 - - name: redis - protocol: TCP - port: 6379 - targetPort: 6379 - nodePort: 30637 - - name: http - protocol: TCP - port: 8080 - targetPort: 8080 - nodePort: 31808 ---- -apiVersion: v1 -kind: Service -metadata: - name: deviceservice-public - labels: - app: deviceservice -spec: - type: NodePort - selector: - app: deviceservice - ports: - - name: grpc - protocol: TCP - port: 2020 - targetPort: 2020 - nodePort: 30202 ---- -apiVersion: v1 -kind: Service -metadata: - name: monitoringservice-public - labels: - app: monitoringservice -spec: - type: NodePort - selector: - app: monitoringservice - ports: - - name: influx - protocol: TCP - port: 8086 - targetPort: 8086 - nodePort: 30886 ---- -apiVersion: v1 -kind: Service -metadata: - name: computeservice-public -spec: - type: NodePort - selector: - app: computeservice - ports: - - name: http - protocol: TCP - port: 8080 - targetPort: 8080 - nodePort: 30808 ---- -apiVersion: v1 -kind: Service -metadata: - name: webuiservice-public - labels: - app: webuiservice -spec: - type: NodePort - selector: - app: webuiservice - ports: - - name: http - protocol: TCP - port: 8004 - targetPort: 8004 - nodePort: 30800 - - name: grafana - protocol: TCP - port: 3000 - targetPort: 3000 - nodePort: 30300 diff --git a/src/tests/ecoc22/show_logs_compute.sh b/src/tests/ecoc22/show_logs_compute.sh deleted file mode 100755 index 7d27f477d..000000000 --- a/src/tests/ecoc22/show_logs_compute.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -K8S_NAMESPACE="ecoc22" -kubectl --namespace $K8S_NAMESPACE logs deployment/computeservice diff --git a/src/tests/ecoc22/show_logs_context.sh b/src/tests/ecoc22/show_logs_context.sh deleted file mode 100755 index 814c486bd..000000000 --- a/src/tests/ecoc22/show_logs_context.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -K8S_NAMESPACE="ecoc22" -kubectl --namespace $K8S_NAMESPACE logs deployment/contextservice -c server diff --git a/src/tests/ecoc22/show_logs_device.sh b/src/tests/ecoc22/show_logs_device.sh deleted file mode 100755 index 5e291e7ca..000000000 --- a/src/tests/ecoc22/show_logs_device.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -K8S_NAMESPACE="ecoc22" -kubectl --namespace $K8S_NAMESPACE logs deployment/deviceservice diff --git a/src/tests/ecoc22/show_logs_service.sh b/src/tests/ecoc22/show_logs_service.sh deleted file mode 100755 index 0189b8c2e..000000000 --- a/src/tests/ecoc22/show_logs_service.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -K8S_NAMESPACE="ecoc22" -kubectl --namespace $K8S_NAMESPACE logs deployment/serviceservice diff --git a/src/tests/ecoc22/show_logs_slice.sh b/src/tests/ecoc22/show_logs_slice.sh deleted file mode 100755 index b92aab8b7..000000000 --- a/src/tests/ecoc22/show_logs_slice.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -K8S_NAMESPACE="ecoc22" -kubectl --namespace $K8S_NAMESPACE logs deployment/sliceservice diff --git a/src/tests/ecoc22/show_logs_webui.sh b/src/tests/ecoc22/show_logs_webui.sh deleted file mode 100755 index 5c6bada20..000000000 --- a/src/tests/ecoc22/show_logs_webui.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -K8S_NAMESPACE="ecoc22" -kubectl --namespace $K8S_NAMESPACE logs deployment/webuiservice -c server diff --git a/tutorial/2-0-run-experiments.md b/tutorial/2-0-run-experiments.md index f87d00e98..82f6a56bf 100644 --- a/tutorial/2-0-run-experiments.md +++ b/tutorial/2-0-run-experiments.md @@ -8,5 +8,5 @@ commands you might need, configuring the network topology, and executing differe - [2.1. Configure the Python environment](./2-1-python-environment.md) - [2.2. OFC'22 Demo - Bootstrap devices, Monitor device Endpoints, Manage L3VPN Services](./2-2-ofc22.md) - [2.3. OECC/PSC'22 Demo (WORK IN PROGRESS)](./2-3-oeccpsc22.md) -- [2.4. ECOC'22 Demo (PENDING)](./2-4-ecoc22.md) +- [2.4. ECOC'22 Demo - Disjoint DC-2-DC L3VPN Service (WORK IN PROGRESS)](./2-4-ecoc22.md) - [2.5. NFV-SDN'22 Demo (PENDING)](./2-5-nfvsdn22.md) diff --git a/tutorial/2-4-ecoc22.md b/tutorial/2-4-ecoc22.md index f752bda84..0cf285faa 100644 --- a/tutorial/2-4-ecoc22.md +++ b/tutorial/2-4-ecoc22.md @@ -1 +1,111 @@ -# 2.4. ECOC'22 Demo (PENDING) +# 2.4. ECOC'22 Demo - Disjoint DC-2-DC L3VPN Service (WORK IN PROGRESS) + +This functional test reproduces the experimental assessment of "Experimental Demonstration of Transport Network Slicing +with SLA Using the TeraFlowSDN Controller" presented at [ECOC'22](https://www.ecoc2022.org/). + +## 2.4.1. Functional test folder + +This functional test can be found in folder `./src/tests/ecoc22/`. A convenience alias `./ecoc22/` pointing to that +folder has been defined. + +## 2.4.2. Execute with real devices + +This functional test has only been tested with emulated devices; however, if you have access to real devices, you can +modify the files `./ecoc22/tests/Objects.py` and `./ecoc22/tests/Credentials.py` to point to your devices, and map to +your network topology. + +__Important__: The device drivers operating with real devices, e.g., OpenConfigDriver, P4Driver, and TransportApiDriver, + have to be considered as experimental. The configuration and monitoring capabilities they support are + limited or partially implemented/tested. Use them with care. + + +## 2.4.3. Deployment and Dependencies + +To run this functional test, it is assumed you have deployed a MicroK8s-based Kubernetes environment and a TeraFlowSDN +controller instance as described in the [Tutorial: Deployment Guide](./1-0-deployment.md), and you configured the +Python environment as described in +[Tutorial: Run Experiments Guide > 2.1. Configure Python Environment](./2-1-python-environment.md). +Remember to source the scenario settings appropriately, e.g., `cd ~/tfs-ctrl && source my_deploy.sh` in each terminal +you open. + + +## 2.4.4. Access to the WebUI and Dashboard + +When the deployment completes, you can connect to the TeraFlowSDN WebUI and Dashboards as described in +[Tutorial: Deployment Guide > 1.4. Access TeraFlowSDN WebUI and Grafana Dashboards](./1-4-access-webui.md) + +Notes: +- the default credentials for the Grafana Dashboiard is user/pass: `admin`/`admin123+`. +- this functional test does not involve the Monitoring component, so no monitoring data is plotted in Grafana. + + +## 2.4.5. Test execution + +To execute this functional test, four main steps needs to be carried out: +1. Device bootstrapping +2. L3VPN Service creation +3. L3VPN Service removal +4. Cleanup + +Upon the execution of each test progresses, a report will be generated indicating PASSED / FAILED / SKIPPED. If there +is some error during the execution, you should see a detailed report on the error. See the troubleshooting section if +needed. + +You can check the logs of the different components using the appropriate `scripts/show_logs_[component].sh` scripts +after you execute each step. + + +### 2.4.5.1. Device bootstrapping + +This step configures some basic entities (Context and Topology), the devices, and the links in the topology. The +expected results are: +- The devices to be added into the Topology. +- The devices to be pre-configured and initialized as ENABLED by the Automation component. +- The monitoring for the device ports (named as endpoints in TeraFlowSDN) to be activated and data collection to +automatically start. +- The links to be added to the topology. + +To run this step, you can do it from the WebUI by uploading the file `./ecoc22/tests/descriptors_emulated.json` that +contains the descriptors of the contexts, topologies, devices, and links, or by executing the +`./ecoc22/run_test_01_bootstrap.sh` script. + +When the bootstrapping finishes, check in the Grafana L3-Monitoring Dashboard and you should see the monitoring data +being plotted and updated every 5 seconds (by default). Given that there is no service configured, you should see a +0-valued flat plot. + +In the WebUI, select the "admin" Context. Then, in the "Devices" tab you should see that 5 different emulated devices +have been created and activated: 4 packet routers, and 1 optical line system controller. Besides, in the "Services" tab +you should see that there is no service created. Note here that the emulated devices produce synthetic +randomly-generated data and do not care about the services configured. + + +### 2.4.5.2. L3VPN Service creation + +This step configures a new service emulating the request an OSM WIM would make by means of a Mock OSM instance. + +To run this step, execute the `./ecoc22/run_test_02_create_service.sh` script. + +When the script finishes, check the WebUI "Services" tab. You should see that two services have been created, one for +the optical layer and another for the packet layer. Besides, you can check the "Devices" tab to see the configuration +rules that have been configured in each device. In the Grafana Dashboard, given that there is now a service configured, +you should see the plots with the monitored data for the device. By default, device R1-EMU is selected. + +### 2.4.5.3. L3VPN Service removal + +This step deconfigures the previously created services emulating the request an OSM WIM would make by means of a Mock +OSM instance. + +To run this step, execute the `./ecoc22/run_test_03_delete_service.sh` script, or delete the L3NM service from the WebUI. + +When the script finishes, check the WebUI "Services" tab. You should see that the two services have been removed. +Besides, in the "Devices" tab you can see that the appropriate configuration rules have been deconfigured. In the +Grafana Dashboard, given that there is no service configured, you should see a 0-valued flat plot again. + +### 2.4.5.4. Cleanup + +This last step performs a cleanup of the scenario removing all the TeraFlowSDN entities for completeness. + +To run this step, execute the `./ecoc22/run_test_04_cleanup.sh` script. + +When the script finishes, check the WebUI "Devices" tab, you should see that the devices have been removed. Besides, in +the "Services" tab you can see that the "admin" Context has no services given that that context has been removed. -- GitLab From 272fa3052f2fe792360f6b64d744fab83b754527 Mon Sep 17 00:00:00 2001 From: Lluis Gifre Date: Wed, 27 Jul 2022 18:23:23 +0200 Subject: [PATCH 07/91] Tutorial: - Minor formatting of OFC'22 demo page. - Formatting of ECOC'22 demo page. --- tutorial/2-2-ofc22.md | 4 ++-- tutorial/2-4-ecoc22.md | 10 +++++++--- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/tutorial/2-2-ofc22.md b/tutorial/2-2-ofc22.md index 5a0547d64..d70d72661 100644 --- a/tutorial/2-2-ofc22.md +++ b/tutorial/2-2-ofc22.md @@ -30,8 +30,8 @@ __Important__: The device drivers operating with real devices, e.g., OpenConfigD ## 2.2.3. Deployment and Dependencies To run this functional test, it is assumed you have deployed a MicroK8s-based Kubernetes environment and a TeraFlowSDN -controller instance as described in the [Tutorial: Deployment Guide](./1-0-deployment.md), and you configured -the Python environment as described in +controller instance as described in the [Tutorial: Deployment Guide](./1-0-deployment.md), and you configured the Python +environment as described in [Tutorial: Run Experiments Guide > 2.1. Configure Python Environment](./2-1-python-environment.md). Remember to source the scenario settings appropriately, e.g., `cd ~/tfs-ctrl && source my_deploy.sh` in each terminal you open. diff --git a/tutorial/2-4-ecoc22.md b/tutorial/2-4-ecoc22.md index 0cf285faa..b28fdfd1c 100644 --- a/tutorial/2-4-ecoc22.md +++ b/tutorial/2-4-ecoc22.md @@ -13,6 +13,8 @@ folder has been defined. This functional test has only been tested with emulated devices; however, if you have access to real devices, you can modify the files `./ecoc22/tests/Objects.py` and `./ecoc22/tests/Credentials.py` to point to your devices, and map to your network topology. +Otherwise, you can modify the `./ecoc22/tests/descriptors_emulated.json` that is designed to be uploaded through the +WebUI instead of using the command line scripts. __Important__: The device drivers operating with real devices, e.g., OpenConfigDriver, P4Driver, and TransportApiDriver, have to be considered as experimental. The configuration and monitoring capabilities they support are @@ -22,8 +24,8 @@ __Important__: The device drivers operating with real devices, e.g., OpenConfigD ## 2.4.3. Deployment and Dependencies To run this functional test, it is assumed you have deployed a MicroK8s-based Kubernetes environment and a TeraFlowSDN -controller instance as described in the [Tutorial: Deployment Guide](./1-0-deployment.md), and you configured the -Python environment as described in +controller instance as described in the [Tutorial: Deployment Guide](./1-0-deployment.md), and you configured the Python +environment as described in [Tutorial: Run Experiments Guide > 2.1. Configure Python Environment](./2-1-python-environment.md). Remember to source the scenario settings appropriately, e.g., `cd ~/tfs-ctrl && source my_deploy.sh` in each terminal you open. @@ -62,7 +64,7 @@ expected results are: - The devices to be added into the Topology. - The devices to be pre-configured and initialized as ENABLED by the Automation component. - The monitoring for the device ports (named as endpoints in TeraFlowSDN) to be activated and data collection to -automatically start. + automatically start. - The links to be added to the topology. To run this step, you can do it from the WebUI by uploading the file `./ecoc22/tests/descriptors_emulated.json` that @@ -90,6 +92,7 @@ the optical layer and another for the packet layer. Besides, you can check the " rules that have been configured in each device. In the Grafana Dashboard, given that there is now a service configured, you should see the plots with the monitored data for the device. By default, device R1-EMU is selected. + ### 2.4.5.3. L3VPN Service removal This step deconfigures the previously created services emulating the request an OSM WIM would make by means of a Mock @@ -101,6 +104,7 @@ When the script finishes, check the WebUI "Services" tab. You should see that th Besides, in the "Devices" tab you can see that the appropriate configuration rules have been deconfigured. In the Grafana Dashboard, given that there is no service configured, you should see a 0-valued flat plot again. + ### 2.4.5.4. Cleanup This last step performs a cleanup of the scenario removing all the TeraFlowSDN entities for completeness. -- GitLab From 6a4ae5fe795873777f331b36664b7631248232b4 Mon Sep 17 00:00:00 2001 From: Lluis Gifre Date: Wed, 27 Jul 2022 17:30:36 +0000 Subject: [PATCH 08/91] Multiple changes: Common scripts: - added script to dump logs of all pods/containers in a namespace ECOC'22 demo: - removed unneeded scripts and files - cleaned up run_test scripts - added run test and coverage script - added deploy_specs.sh - added scripts to generate JSON descriptors OFC'22 demo: - added deploy_specs.sh --- my_deploy.sh | 2 +- scripts/dump_logs.sh | 37 ++++++++ src/tests/ecoc22/README.md | 93 ------------------- src/tests/ecoc22/deploy_in_kubernetes.sh | 27 ------ src/tests/ecoc22/deploy_specs.sh | 17 ++++ src/tests/ecoc22/dump_logs.sh | 24 ----- src/tests/ecoc22/run_test_01_bootstrap.sh | 37 +------- .../ecoc22/run_test_02_create_service.sh | 27 +----- .../ecoc22/run_test_03_delete_service.sh | 27 +----- src/tests/ecoc22/run_test_04_cleanup.sh | 27 +----- src/tests/ecoc22/run_tests_and_coverage.sh | 43 +++++++++ src/tests/ecoc22/show_deploy.sh | 18 ---- src/tests/ecoc22/tests/BuildDescriptors.py | 35 +++++++ src/tests/ecoc22/tests/LoadDescriptors.py | 40 ++++++++ src/tests/ofc22/deploy_specs.sh | 17 ++++ 15 files changed, 194 insertions(+), 277 deletions(-) create mode 100755 scripts/dump_logs.sh delete mode 100644 src/tests/ecoc22/README.md delete mode 100755 src/tests/ecoc22/deploy_in_kubernetes.sh create mode 100644 src/tests/ecoc22/deploy_specs.sh delete mode 100755 src/tests/ecoc22/dump_logs.sh create mode 100755 src/tests/ecoc22/run_tests_and_coverage.sh delete mode 100755 src/tests/ecoc22/show_deploy.sh create mode 100644 src/tests/ecoc22/tests/BuildDescriptors.py create mode 100644 src/tests/ecoc22/tests/LoadDescriptors.py create mode 100644 src/tests/ofc22/deploy_specs.sh diff --git a/my_deploy.sh b/my_deploy.sh index 67a2e0558..274f0dfc4 100644 --- a/my_deploy.sh +++ b/my_deploy.sh @@ -1,7 +1,7 @@ # Set the URL of your local Docker registry where the images will be uploaded to. export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/" -# Set the list of components, separated by comas, you want to build images for, and deploy. +# Set the list of components, separated by spaces, you want to build images for, and deploy. # Supported components are: # context device automation policy service compute monitoring webui # interdomain slice pathcomp dlt diff --git a/scripts/dump_logs.sh b/scripts/dump_logs.sh new file mode 100755 index 000000000..85b927108 --- /dev/null +++ b/scripts/dump_logs.sh @@ -0,0 +1,37 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +######################################################################################################################## +# Define your deployment settings here +######################################################################################################################## + +# If not already set, set the name of the Kubernetes namespace to deploy to. +export TFS_K8S_NAMESPACE=${TFS_K8S_NAMESPACE:-"tfs-dev"} + +######################################################################################################################## +# Automated steps start here +######################################################################################################################## + +mkdir -p tmp/exec_logs/$TFS_K8S_NAMESPACE/ +rm tmp/exec_logs/$TFS_K8S_NAMESPACE/* + +PODS=$(kubectl get pods --namespace $TFS_K8S_NAMESPACE --no-headers --output=custom-columns=":metadata.name") +for POD in $PODS; do + CONTAINERS=$(kubectl get pods --namespace $TFS_K8S_NAMESPACE $POD -o jsonpath='{.spec.containers[*].name}') + for CONTAINER in $CONTAINERS; do + kubectl --namespace $TFS_K8S_NAMESPACE logs pod/${POD} --container ${CONTAINER} \ + > tmp/exec_logs/$TFS_K8S_NAMESPACE/$POD\_\_$CONTAINER.log + done +done diff --git a/src/tests/ecoc22/README.md b/src/tests/ecoc22/README.md deleted file mode 100644 index 0e23de037..000000000 --- a/src/tests/ecoc22/README.md +++ /dev/null @@ -1,93 +0,0 @@ -# ECOC'22 Demo - Disjoint DC-2-DC L3VPN Service -This functional test reproduces the experimental assessment of "" presented at [ECOC'22](https://www.ecoc2022.org/). - -## Functional test folder -This functional test can be found in folder `./src/tests/ecoc22/`. A convenience alias `./ecoc22/` pointing to that folder has been defined. - -## Execute with real devices -This functional test has only been tested with emulated devices; however, if you have access to real devices, you can modify the files `./ecoc22/tests/Objects.py` and `./ofc22/tests/Credentials.py` to point to your devices, and map to your network topology. - -__Important:__ The OpenConfigDriver, the P4Driver, and the TrandportApiDriver have to be considered as experimental. The configuration and monitoring capabilities they support are limited or partially implemented. Use them with care. - -## Deployment -To run this functional test, it is assumed you have deployed a Kubernetes-based environment as described in [Wiki: Installing Kubernetes on your Linux machine](https://gitlab.com/teraflow-h2020/controller/-/wikis/Installing-Kubernetes-on-your-Linux-machine). - -After installing Kubernetes, you can run it to deploy the appropriate components. Feel free to adapt it your particular case following the instructions described in [Wiki: Deploying a TeraFlow OS test instance](https://gitlab.com/teraflow-h2020/controller/-/wikis/Deploying-a-TeraFlow-OS-test-instance). - -__Important:__ -- The `./ecoc22/deploy_in_kubernetes.sh` assumes you have installed the appropriate development dependencies using the `install_development_dependencies.sh` script. -- Before running the scripts in this folder, remember to update the environment variable K8S_HOSTNAME to point to the Kubernetes node you will be using as described in [Wiki: Deploying a TeraFlow OS test instance](https://gitlab.com/teraflow-h2020/controller/-/wikis/Deploying-a-TeraFlow-OS-test-instance). - -For your convenience, the configuration script `./ecoc22/deploy_in_kubernetes.sh` has been already defined. The script will take some minutes to download the dependencies, build the micro-services, deploy them, and leave them ready for operation. The deployment will finish with a report of the items that have been created. - -## Access to the WebUI and Dashboard -When the deployment completes, you can connect to the TeraFlow OS WebUI and Dashboards as described in [Wiki: Using the WebUI](https://gitlab.com/teraflow-h2020/controller/-/wikis/Using-the-WebUI), or directly navigating to `http://[your-node-ip]:30800` for the WebUI and `http://[your-node-ip]:30300` for the Grafana Dashboard. - -Notes: -- the default credentials for the Grafana Dashboiard is user/pass: `admin`/`admin123+`. -- this functional test does not involve the Monitoring component, so no monitoring data is plotted in Grafana. - -## Test execution -To execute this functional test, four main steps needs to be carried out: -1. Device bootstrapping -2. L3VPN Service creation -3. L3VPN Service removal -4. Cleanup - -Upon the execution of each test progresses, a report will be generated indicating PASSED / FAILED / SKIPPED. If there is some error during the execution, you should see a detailed report on the error. See the troubleshooting section in that case. - -Feel free to check the logs of the different components using the appropriate `ecoc22/show_logs_[component].sh` scripts after you execute each step. - -### 1. Device bootstrapping - -This step configures some basic entities (Context and Topology), the devices, and the links in the topology. The expected results are: -- The devices to be incorporated into the Topology. -- The devices to be pre-configured and initialized as ENABLED by the Automation component. -- The monitoring for the device ports (named as endpoints in TeraFlow OS) to be activated and data collection to automatically start. -- The links to be added to the topology. - -To run this step, execute the following script: -`./ofc22/run_test_01_bootstrap.sh` - -When the script finishes, check in the Grafana L3-Monitoring Dashboard and you should see the monitoring data being plotted and updated every 5 seconds (by default). Given that there is no service configured, you should see a 0-valued flat plot. - -In the WebUI, select the "admin" Context. In the "Devices" tab you should see that 5 different emulated devices have been created and activated: 4 packet routers, and 1 optical line system controller. Besides, in the "Services" tab you should see that there is no service created. Note here that the emulated devices produce synthetic randomly-generated data and do not care about the services configured. - -### 2. L3VPN Service creation - -This step configures a new service emulating the request an OSM WIM would make by means of a Mock OSM instance. - -To run this step, execute the following script: -`./ofc22/run_test_02_create_service.sh` - -When the script finishes, check the WebUI "Services" tab. You should see that two services have been created, one for the optical layer and another for the packet layer. Besides, you can check the "Devices" tab to see the configuration rules that have been configured in each device. In the Grafana Dashboard, given that there is now a service configured, you should see the plots with the monitored data for the device. By default, device R1-INF is selected. - -### 3. L3VPN Service removal - -This step deconfigures the previously created services emulating the request an OSM WIM would make by means of a Mock OSM instance. - -To run this step, execute the following script: -`./ofc22/run_test_03_delete_service.sh` - -When the script finishes, check the WebUI "Services" tab. You should see that the two services have been removed. Besides, in the "Devices" tab you can see that the appropriate configuration rules have been deconfigured. In the Grafana Dashboard, given that there is no service configured, you should see a 0-valued flat plot again. - -### 4. Cleanup - -This last step just performs a cleanup of the scenario removing all the TeraFlow OS entities for completeness. - -To run this step, execute the following script: -`./ofc22/run_test_04_cleanup.sh` - -When the script finishes, check the WebUI "Devices" tab, you should see that the devices have been removed. Besides, in the "Services" tab you can see that the "admin" Context has no services given that that context has been removed. - -## Troubleshooting - -Different scripts are provided to help in troubleshooting issues in the execution of the test. These scripts are: -- `./ofc22/show_deployment.sh`: this script reports the items belonging to this deployment. Use it to validate that all the pods, deployments and replica sets are ready and have a state of "running"; and the services are deployed and have appropriate IP addresses and ports. -- `ofc22/show_logs_automation.sh`: this script reports the logs for the automation component. -- `ofc22/show_logs_compute.sh`: this script reports the logs for the compute component. -- `ofc22/show_logs_context.sh`: this script reports the logs for the context component. -- `ofc22/show_logs_device.sh`: this script reports the logs for the device component. -- `ofc22/show_logs_monitoring.sh`: this script reports the logs for the monitoring component. -- `ofc22/show_logs_service.sh`: this script reports the logs for the service component. -- `ofc22/show_logs_webui.sh`: this script reports the logs for the webui component. diff --git a/src/tests/ecoc22/deploy_in_kubernetes.sh b/src/tests/ecoc22/deploy_in_kubernetes.sh deleted file mode 100755 index 8cd32ff19..000000000 --- a/src/tests/ecoc22/deploy_in_kubernetes.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# ECOC 22 deployment settings - -export REGISTRY_IMAGE="" -export COMPONENTS="context device service slice compute webui" -export IMAGE_TAG="ecoc22" -export K8S_NAMESPACE="ecoc22" -export K8S_HOSTNAME="kubernetes-master" -export EXTRA_MANIFESTS="./ecoc22/expose_services.yaml" -export GRAFANA_PASSWORD="admin123+" - -./deploy_in_kubernetes.sh diff --git a/src/tests/ecoc22/deploy_specs.sh b/src/tests/ecoc22/deploy_specs.sh new file mode 100644 index 000000000..4afe7e20f --- /dev/null +++ b/src/tests/ecoc22/deploy_specs.sh @@ -0,0 +1,17 @@ +# Set the URL of your local Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/" + +# Set the list of components, separated by spaces, you want to build images for, and deploy. +export TFS_COMPONENTS="context device automation service slice compute webui" + +# Set the tag you want to use for your images. +export TFS_IMAGE_TAG="dev" + +# Set the name of the Kubernetes namespace to deploy to. +export TFS_K8S_NAMESPACE="tfs" + +# Set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" + +# Set the neew Grafana admin password +export TFS_GRAFANA_PASSWORD="admin123+" diff --git a/src/tests/ecoc22/dump_logs.sh b/src/tests/ecoc22/dump_logs.sh deleted file mode 100755 index 85372f85a..000000000 --- a/src/tests/ecoc22/dump_logs.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -export COMPONENTS="context device service slice compute webui" -export K8S_NAMESPACE="ecoc22" - -mkdir -p tmp/exec_logs/$K8S_NAMESPACE/ -rm tmp/exec_logs/$K8S_NAMESPACE/* - -for COMPONENT in $COMPONENTS; do - kubectl --namespace $K8S_NAMESPACE logs deployment/${COMPONENT}service -c server > tmp/exec_logs/$K8S_NAMESPACE/$COMPONENT.log -done diff --git a/src/tests/ecoc22/run_test_01_bootstrap.sh b/src/tests/ecoc22/run_test_01_bootstrap.sh index f4d3b9ba3..4e94fcb2c 100755 --- a/src/tests/ecoc22/run_test_01_bootstrap.sh +++ b/src/tests/ecoc22/run_test_01_bootstrap.sh @@ -13,39 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. - -PROJECTDIR=`pwd` - -cd $PROJECTDIR/src -RCFILE=$PROJECTDIR/coverage/.coveragerc -COVERAGEFILE=$PROJECTDIR/coverage/.coverage - -# Configure the correct folder on the .coveragerc file -cat $PROJECTDIR/coverage/.coveragerc.template | sed s+~/teraflow/controller+$PROJECTDIR+g > $RCFILE - -# Destroy old coverage file -rm -f $COVERAGEFILE - -# Set the name of the Kubernetes namespace and hostname to use. -K8S_NAMESPACE="ecoc22" -# K8S_HOSTNAME="kubernetes-master" -# dynamically gets the name of the K8s master node -K8S_HOSTNAME=`kubectl get nodes --selector=node-role.kubernetes.io/master | tr -s " " | cut -f1 -d" " | sed -n '2 p'` - -# Flush Context database -kubectl --namespace $K8S_NAMESPACE exec -it deployment/contextservice --container redis -- redis-cli FLUSHALL - -export CONTEXTSERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}') -export CONTEXTSERVICE_SERVICE_PORT_GRPC=$(kubectl get service contextservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==1010)].nodePort}') -export DEVICESERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}') -export DEVICESERVICE_SERVICE_PORT_GRPC=$(kubectl get service deviceservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==2020)].nodePort}') -export COMPUTESERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}') -export COMPUTESERVICE_SERVICE_PORT_HTTP=$(kubectl get service computeservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==8080)].nodePort}') - -# Useful flags for pytest: -#-o log_cli=true -o log_file=device.log -o log_file_level=DEBUG - -# Run functional test and analyze coverage of code at same time - -coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ - tests/ecoc22/tests/test_functional_bootstrap.py +pytest --verbose src/tests/ecoc22/tests/test_functional_bootstrap.py diff --git a/src/tests/ecoc22/run_test_02_create_service.sh b/src/tests/ecoc22/run_test_02_create_service.sh index f426e8cd8..9cab7daa2 100755 --- a/src/tests/ecoc22/run_test_02_create_service.sh +++ b/src/tests/ecoc22/run_test_02_create_service.sh @@ -13,29 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. - -PROJECTDIR=`pwd` - -cd $PROJECTDIR/src -RCFILE=$PROJECTDIR/coverage/.coveragerc -COVERAGEFILE=$PROJECTDIR/coverage/.coverage - -# Set the name of the Kubernetes namespace and hostname to use. -K8S_NAMESPACE="ecoc22" -# dynamically gets the name of the K8s master node -K8S_HOSTNAME=`kubectl get nodes --selector=node-role.kubernetes.io/master | tr -s " " | cut -f1 -d" " | sed -n '2 p'` - -export CONTEXTSERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}') -export CONTEXTSERVICE_SERVICE_PORT_GRPC=$(kubectl get service contextservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==1010)].nodePort}') -export DEVICESERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}') -export DEVICESERVICE_SERVICE_PORT_GRPC=$(kubectl get service deviceservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==2020)].nodePort}') -export COMPUTESERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}') -export COMPUTESERVICE_SERVICE_PORT_HTTP=$(kubectl get service computeservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==8080)].nodePort}') - -# Useful flags for pytest: -#-o log_cli=true -o log_file=device.log -o log_file_level=DEBUG - -# Run functional test and analyze coverage of code at same time - -coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose -o log_cli=true \ - tests/ecoc22/tests/test_functional_create_service.py +pytest --verbose src/tests/ecoc22/tests/test_functional_create_service.py diff --git a/src/tests/ecoc22/run_test_03_delete_service.sh b/src/tests/ecoc22/run_test_03_delete_service.sh index a589ddf68..b01ba350a 100755 --- a/src/tests/ecoc22/run_test_03_delete_service.sh +++ b/src/tests/ecoc22/run_test_03_delete_service.sh @@ -13,29 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. - -PROJECTDIR=`pwd` - -cd $PROJECTDIR/src -RCFILE=$PROJECTDIR/coverage/.coveragerc -COVERAGEFILE=$PROJECTDIR/coverage/.coverage - -# Set the name of the Kubernetes namespace and hostname to use. -K8S_NAMESPACE="ecoc22" -# dynamically gets the name of the K8s master node -K8S_HOSTNAME=`kubectl get nodes --selector=node-role.kubernetes.io/master | tr -s " " | cut -f1 -d" " | sed -n '2 p'` - -export CONTEXTSERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}') -export CONTEXTSERVICE_SERVICE_PORT_GRPC=$(kubectl get service contextservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==1010)].nodePort}') -export DEVICESERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}') -export DEVICESERVICE_SERVICE_PORT_GRPC=$(kubectl get service deviceservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==2020)].nodePort}') -export COMPUTESERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}') -export COMPUTESERVICE_SERVICE_PORT_HTTP=$(kubectl get service computeservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==8080)].nodePort}') - -# Useful flags for pytest: -#-o log_cli=true -o log_file=device.log -o log_file_level=DEBUG - -# Run functional test and analyze coverage of code at same time - -coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ - tests/ecoc22/tests/test_functional_delete_service.py +pytest --verbose src/tests/ecoc22/tests/test_functional_delete_service.py diff --git a/src/tests/ecoc22/run_test_04_cleanup.sh b/src/tests/ecoc22/run_test_04_cleanup.sh index 0b8b30519..a2925d633 100755 --- a/src/tests/ecoc22/run_test_04_cleanup.sh +++ b/src/tests/ecoc22/run_test_04_cleanup.sh @@ -13,29 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. - -PROJECTDIR=`pwd` - -cd $PROJECTDIR/src -RCFILE=$PROJECTDIR/coverage/.coveragerc -COVERAGEFILE=$PROJECTDIR/coverage/.coverage - -# Set the name of the Kubernetes namespace and hostname to use. -K8S_NAMESPACE="ecoc22" -# dynamically gets the name of the K8s master node -K8S_HOSTNAME=`kubectl get nodes --selector=node-role.kubernetes.io/master | tr -s " " | cut -f1 -d" " | sed -n '2 p'` - -export CONTEXTSERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}') -export CONTEXTSERVICE_SERVICE_PORT_GRPC=$(kubectl get service contextservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==1010)].nodePort}') -export DEVICESERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}') -export DEVICESERVICE_SERVICE_PORT_GRPC=$(kubectl get service deviceservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==2020)].nodePort}') -export COMPUTESERVICE_SERVICE_HOST=$(kubectl get node $K8S_HOSTNAME -o 'jsonpath={.status.addresses[?(@.type=="InternalIP")].address}') -export COMPUTESERVICE_SERVICE_PORT_HTTP=$(kubectl get service computeservice-public --namespace $K8S_NAMESPACE -o 'jsonpath={.spec.ports[?(@.port==8080)].nodePort}') - -# Useful flags for pytest: -#-o log_cli=true -o log_file=device.log -o log_file_level=DEBUG - -# Run functional test and analyze coverage of code at same time - -coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ - tests/ecoc22/tests/test_functional_cleanup.py +pytest --verbose src/tests/ecoc22/tests/test_functional_cleanup.py diff --git a/src/tests/ecoc22/run_tests_and_coverage.sh b/src/tests/ecoc22/run_tests_and_coverage.sh new file mode 100755 index 000000000..835867896 --- /dev/null +++ b/src/tests/ecoc22/run_tests_and_coverage.sh @@ -0,0 +1,43 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +PROJECTDIR=`pwd` + +cd $PROJECTDIR/src +RCFILE=$PROJECTDIR/coverage/.coveragerc +COVERAGEFILE=$PROJECTDIR/coverage/.coverage + +# Configure the correct folder on the .coveragerc file +cat $PROJECTDIR/coverage/.coveragerc.template | sed s+~/teraflow/controller+$PROJECTDIR+g > $RCFILE + +# Destroy old coverage file +rm -f $COVERAGEFILE + +# Force a flush of Context database +kubectl --namespace $TFS_K8S_NAMESPACE exec -it deployment/contextservice --container redis -- redis-cli FLUSHALL + +# Run functional tests and analyze code coverage at the same time +coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ + tests/ecoc22/tests/test_functional_bootstrap.py + +coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ + tests/ecoc22/tests/test_functional_create_service.py + +coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ + tests/ecoc22/tests/test_functional_delete_service.py + +coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ + tests/ecoc22/tests/test_functional_cleanup.py diff --git a/src/tests/ecoc22/show_deploy.sh b/src/tests/ecoc22/show_deploy.sh deleted file mode 100755 index 3e1b283a9..000000000 --- a/src/tests/ecoc22/show_deploy.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -K8S_NAMESPACE="ecoc22" -kubectl --namespace $K8S_NAMESPACE get all diff --git a/src/tests/ecoc22/tests/BuildDescriptors.py b/src/tests/ecoc22/tests/BuildDescriptors.py new file mode 100644 index 000000000..00d09ae8d --- /dev/null +++ b/src/tests/ecoc22/tests/BuildDescriptors.py @@ -0,0 +1,35 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy, json, sys +from .Objects import CONTEXTS, DEVICES, LINKS, TOPOLOGIES + +def main(): + with open('tests/ecoc22/descriptors_emulated.json', 'w', encoding='UTF-8') as f: + devices = [] + for device,connect_rules in DEVICES: + device = copy.deepcopy(device) + device['device_config']['config_rules'].extend(connect_rules) + devices.append(device) + + f.write(json.dumps({ + 'contexts': CONTEXTS, + 'topologies': TOPOLOGIES, + 'devices': devices, + 'links': LINKS + })) + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/src/tests/ecoc22/tests/LoadDescriptors.py b/src/tests/ecoc22/tests/LoadDescriptors.py new file mode 100644 index 000000000..495b05a89 --- /dev/null +++ b/src/tests/ecoc22/tests/LoadDescriptors.py @@ -0,0 +1,40 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging, sys +from common.Settings import get_setting +from context.client.ContextClient import ContextClient +from context.proto.context_pb2 import Context, Device, Link, Topology +from device.client.DeviceClient import DeviceClient + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +def main(): + context_client = ContextClient( + get_setting('CONTEXTSERVICE_SERVICE_HOST'), get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC')) + device_client = DeviceClient( + get_setting('DEVICESERVICE_SERVICE_HOST'), get_setting('DEVICESERVICE_SERVICE_PORT_GRPC')) + + with open('tests/ecoc22/descriptors.json', 'r', encoding='UTF-8') as f: + descriptors = json.loads(f.read()) + + for context in descriptors['contexts' ]: context_client.SetContext (Context (**context )) + for topology in descriptors['topologies']: context_client.SetTopology(Topology(**topology)) + for device in descriptors['devices' ]: device_client .AddDevice (Device (**device )) + for link in descriptors['links' ]: context_client.SetLink (Link (**link )) + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/src/tests/ofc22/deploy_specs.sh b/src/tests/ofc22/deploy_specs.sh new file mode 100644 index 000000000..b486474e2 --- /dev/null +++ b/src/tests/ofc22/deploy_specs.sh @@ -0,0 +1,17 @@ +# Set the URL of your local Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/" + +# Set the list of components, separated by spaces, you want to build images for, and deploy. +export TFS_COMPONENTS="context device automation service compute monitoring webui" + +# Set the tag you want to use for your images. +export TFS_IMAGE_TAG="dev" + +# Set the name of the Kubernetes namespace to deploy to. +export TFS_K8S_NAMESPACE="tfs" + +# Set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" + +# Set the neew Grafana admin password +export TFS_GRAFANA_PASSWORD="admin123+" -- GitLab From c9f05d80fdb61b80acb37c828f08048790d11bb9 Mon Sep 17 00:00:00 2001 From: PabloArmingolRobles Date: Fri, 29 Jul 2022 12:16:26 +0200 Subject: [PATCH 09/91] Commit per rulle in SetConfig --- .../drivers/openconfig/OpenConfigDriver.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/src/device/service/drivers/openconfig/OpenConfigDriver.py b/src/device/service/drivers/openconfig/OpenConfigDriver.py index 8461e83e4..c35ae9b9d 100644 --- a/src/device/service/drivers/openconfig/OpenConfigDriver.py +++ b/src/device/service/drivers/openconfig/OpenConfigDriver.py @@ -92,7 +92,7 @@ class NetconfSessionHandler: def use_candidate(self): return self.__candidate_supported and not self.__force_running @property - def commit_per_delete_rule(self): return self.__commit_per_delete + def commit_per_rule(self): return self.__commit_per_delete @RETRY_DECORATOR def get(self, filter=None, with_defaults=None): # pylint: disable=redefined-builtin @@ -284,12 +284,15 @@ class OpenConfigDriver(_Driver): with self.__lock: if self.__netconf_handler.use_candidate: with self.__netconf_handler.locked(target='candidate'): - results = edit_config(self.__netconf_handler, resources, target='candidate') - try: - self.__netconf_handler.commit() - except Exception as e: # pylint: disable=broad-except - LOGGER.exception('[SetConfig] Exception commiting resources: {:s}'.format(str(resources))) - results = [e for _ in resources] # if commit fails, set exception in each resource + if self.__netconf_handler.commit_per_rule: + results = edit_config(self.__netconf_handler, resources, target='candidate', commit_per_rule= True) + else: + results = edit_config(self.__netconf_handler, resources, target='candidate') + try: + self.__netconf_handler.commit() + except Exception as e: # pylint: disable=broad-except + LOGGER.exception('[SetConfig] Exception commiting resources: {:s}'.format(str(resources))) + results = [e for _ in resources] # if commit fails, set exception in each resource else: results = edit_config(self.__netconf_handler, resources) return results @@ -300,7 +303,7 @@ class OpenConfigDriver(_Driver): with self.__lock: if self.__netconf_handler.use_candidate: with self.__netconf_handler.locked(target='candidate'): - if self.__netconf_handler.commit_per_delete_rule: + if self.__netconf_handler.commit_per_rule: results = edit_config(self.__netconf_handler, resources, target='candidate', delete=True, commit_per_rule= True) else: results = edit_config(self.__netconf_handler, resources, target='candidate', delete=True) -- GitLab From 6dadeffac6a88ad0ddc12eac4f0d2b19015489d5 Mon Sep 17 00:00:00 2001 From: PabloArmingolRobles Date: Fri, 29 Jul 2022 12:19:58 +0200 Subject: [PATCH 10/91] Changes in ACL --- .../acl/acl-set/acl-entry/edit_config.xml | 24 ++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/src/device/service/drivers/openconfig/templates/acl/acl-set/acl-entry/edit_config.xml b/src/device/service/drivers/openconfig/templates/acl/acl-set/acl-entry/edit_config.xml index 297563cca..2769e8b2e 100644 --- a/src/device/service/drivers/openconfig/templates/acl/acl-set/acl-entry/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/acl/acl-set/acl-entry/edit_config.xml @@ -13,7 +13,16 @@ {{sequence_id}} - {% if config is defined %} + {% if operation is not defined or operation != 'delete' %} + {% if type=='ACL_L2' %} + + + {% if source_address is defined %}{{source_address}}{% endif%} + {% if destination_address is defined %}{{destination_address}}{% endif%} + + + {% endif%} + {% if type=='ACL_IPV4' %} {% if source_address is defined %}{{source_address}}{% endif%} @@ -30,6 +39,19 @@ {% if tcp_flags is defined %}{{tcp_flags}}{% endif%} + {% endif%} + {% if type=='ACL_IPV6' %} + + + {% if source_address is defined %}{{source_address}}{% endif%} + {% if destination_address is defined %}{{destination_address}}{% endif%} + {% if protocol is defined %}{{protocol}}{% endif%} + {% if dscp is defined %}{{dscp}}{% endif%} + {% if hop_limit is defined %}{{hop_limit}}{% endif%} + + + {% endif%} + {% if forwarding_action is defined %}{{forwarding_action}}{% endif%} -- GitLab From 1475e174f722788c7d2a0250474371880736c3de Mon Sep 17 00:00:00 2001 From: PabloArmingolRobles Date: Fri, 29 Jul 2022 12:33:29 +0200 Subject: [PATCH 11/91] Device component OCDriver : - Fixed L3VPN management for ADVA --- .../openconfig/templates/Interfaces.py | 3 +- .../openconfig/templates/NetworkInstances.py | 9 ++- .../openconfig/templates/RoutingPolicy.py | 2 +- .../templates/interface/edit_config.xml | 8 +- .../interface/subinterface/edit_config.xml | 76 ++++++++++--------- .../network_instance/edit_config.xml | 3 +- .../interface/edit_config.xml | 4 +- .../protocols/edit_config.xml | 19 ++++- .../bgp_defined_set/edit_config.xml | 5 +- .../statement/edit_config.xml | 17 +++-- 10 files changed, 88 insertions(+), 58 deletions(-) diff --git a/src/device/service/drivers/openconfig/templates/Interfaces.py b/src/device/service/drivers/openconfig/templates/Interfaces.py index 33f977524..da1bbd483 100644 --- a/src/device/service/drivers/openconfig/templates/Interfaces.py +++ b/src/device/service/drivers/openconfig/templates/Interfaces.py @@ -49,12 +49,13 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: subinterface = {} add_value_from_tag(subinterface, 'name', interface_name) + add_value_from_tag(subinterface, 'mtu', interface_mtu) subinterface_index = xml_subinterface.find('oci:index', namespaces=NAMESPACES) if subinterface_index is None or subinterface_index.text is None: continue add_value_from_tag(subinterface, 'index', subinterface_index, cast=int) - vlan_id = xml_subinterface.find('ocv:vlan/ocv:config/ocv:vlan-id', namespaces=NAMESPACES) + vlan_id = xml_subinterface.find('ocv:vlan/ocv:match/ocv:single-tagged/ocv:config/ocv:vlan-id', namespaces=NAMESPACES) add_value_from_tag(subinterface, 'vlan_id', vlan_id, cast=int) # TODO: implement support for multiple IP addresses per subinterface diff --git a/src/device/service/drivers/openconfig/templates/NetworkInstances.py b/src/device/service/drivers/openconfig/templates/NetworkInstances.py index b091a0d20..0cd6bbfd3 100644 --- a/src/device/service/drivers/openconfig/templates/NetworkInstances.py +++ b/src/device/service/drivers/openconfig/templates/NetworkInstances.py @@ -39,10 +39,11 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: add_value_from_tag(network_instance, 'name', ni_name) ni_type = xml_network_instance.find('ocni:config/ocni:type', namespaces=NAMESPACES) + ni_type.text = ni_type.text.replace('oc-ni-types:','') add_value_from_tag(network_instance, 'type', ni_type) - #ni_router_id = xml_network_instance.find('ocni:config/ocni:router-id', namespaces=NAMESPACES) - #add_value_from_tag(network_instance, 'router_id', ni_router_id) + ni_router_id = xml_network_instance.find('ocni:config/ocni:router-id', namespaces=NAMESPACES) + add_value_from_tag(network_instance, 'router_id', ni_router_id) ni_route_dist = xml_network_instance.find('ocni:config/ocni:route-distinguisher', namespaces=NAMESPACES) add_value_from_tag(network_instance, 'route_distinguisher', ni_route_dist) @@ -71,6 +72,8 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: if protocol['identifier'] == 'BGP': bgp_as = xml_protocol.find('ocni:bgp/ocni:global/ocni:config/ocni:as', namespaces=NAMESPACES) add_value_from_tag(protocol, 'as', bgp_as, cast=int) + bgp_id = xml_protocol.find('ocni:bgp/ocni:global/ocni:config/ocni:router-id', namespaces=NAMESPACES) + add_value_from_tag(protocol, 'router_id', bgp_id) resource_key = '/network_instance[{:s}]/protocols[{:s}]'.format( network_instance['name'], protocol['identifier']) @@ -94,7 +97,7 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: add_value_from_tag(table_connection, 'address_family', address_family, cast=lambda s: s.replace('oc-types:', '')) - default_import_policy = xml_table_connection.find('ocni:default-import-policy', namespaces=NAMESPACES) + default_import_policy = xml_table_connection.find('ocni:config/ocni:default-import-policy', namespaces=NAMESPACES) add_value_from_tag(table_connection, 'default_import_policy', default_import_policy) resource_key = '/network_instance[{:s}]/table_connections[{:s}][{:s}][{:s}]'.format( diff --git a/src/device/service/drivers/openconfig/templates/RoutingPolicy.py b/src/device/service/drivers/openconfig/templates/RoutingPolicy.py index 369732de3..068ca5430 100644 --- a/src/device/service/drivers/openconfig/templates/RoutingPolicy.py +++ b/src/device/service/drivers/openconfig/templates/RoutingPolicy.py @@ -74,7 +74,7 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: resource_key = '/routing_policy/bgp_defined_set[{:s}]'.format(bgp_ext_community_set['ext_community_set_name']) response.append((resource_key, copy.deepcopy(bgp_ext_community_set))) - ext_community_member = xml_bgp_ext_community_set.find('ocbp:ext-community-member', namespaces=NAMESPACES) + ext_community_member = xml_bgp_ext_community_set.find('ocbp:config/ocbp:ext-community-member', namespaces=NAMESPACES) if ext_community_member is not None and ext_community_member.text is not None: add_value_from_tag(bgp_ext_community_set, 'ext_community_member', ext_community_member) diff --git a/src/device/service/drivers/openconfig/templates/interface/edit_config.xml b/src/device/service/drivers/openconfig/templates/interface/edit_config.xml index ff15d1d68..4bc53ff1d 100644 --- a/src/device/service/drivers/openconfig/templates/interface/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/interface/edit_config.xml @@ -1,14 +1,12 @@ - + {{name}} + {% if operation is defined and operation != 'delete' %} {{name}} - {% if operation is defined and operation == 'delete' %} - {% else %} - {{description}} {{mtu}} - {% endif %} + {% endif %} diff --git a/src/device/service/drivers/openconfig/templates/interface/subinterface/edit_config.xml b/src/device/service/drivers/openconfig/templates/interface/subinterface/edit_config.xml index d266f819c..f172c1676 100644 --- a/src/device/service/drivers/openconfig/templates/interface/subinterface/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/interface/subinterface/edit_config.xml @@ -1,37 +1,45 @@ - - - {{name}} - {% if operation is not defined or operation != 'delete' %} + + + {{name}} + + {{name}} + ianaift:l3ipvlan + {{mtu}} + true + + + + {{index}} - {{name}} + {{index}} + {{description}} - {% endif %} - - - {{index}} - {% if operation is not defined or operation != 'delete' %} - - {{index}} - true - - - - {{vlan_id}} - - - - -
- {{address_ip}} - - {{address_ip}} - {{address_prefix}} - -
-
-
- {% endif %} -
-
-
+ + + + + {{vlan_id}} + + + + + + + + {{address_ip}} + + {{address_ip}} + {{address_prefix}} + + + + + + + +
+ + + diff --git a/src/device/service/drivers/openconfig/templates/network_instance/edit_config.xml b/src/device/service/drivers/openconfig/templates/network_instance/edit_config.xml index 9362c09c6..74424cea9 100644 --- a/src/device/service/drivers/openconfig/templates/network_instance/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/network_instance/edit_config.xml @@ -5,7 +5,7 @@ {{name}} oc-ni-types:{{type}} - {{description}} + {% if description is defined %}{{description}}{% endif %} {% if router_id is defined %}{{router_id}}{% endif %} {{route_distinguisher}} true @@ -13,6 +13,7 @@ oc-ni-types:MPLS + oc-ni-types:INSTANCE_LABEL {% endif %} diff --git a/src/device/service/drivers/openconfig/templates/network_instance/interface/edit_config.xml b/src/device/service/drivers/openconfig/templates/network_instance/interface/edit_config.xml index d5c33d31a..bf8c0c077 100644 --- a/src/device/service/drivers/openconfig/templates/network_instance/interface/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/network_instance/interface/edit_config.xml @@ -2,15 +2,13 @@ {{name}} - + {{id}} - {% if operation is not defined or operation != 'delete' %} {{id}} {{interface}} {{subinterface}} - {% endif %} diff --git a/src/device/service/drivers/openconfig/templates/network_instance/protocols/edit_config.xml b/src/device/service/drivers/openconfig/templates/network_instance/protocols/edit_config.xml index da05d0467..c9c068e48 100644 --- a/src/device/service/drivers/openconfig/templates/network_instance/protocols/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/network_instance/protocols/edit_config.xml @@ -3,19 +3,19 @@ {{name}} - {{identifier}} + oc-pol-types:{{identifier}} {{protocol_name}} {% if operation is not defined or operation != 'delete' %} - {{identifier}} + oc-pol-types:{{identifier}} {{protocol_name}} - true {% if identifier=='BGP' %} {{as}} + {{router_id}} @@ -23,5 +23,18 @@ {% endif %} + {% if operation is not defined or operation != 'delete' %} + + + + oc-pol-types:{{identifier}} + oc-types:IPV4 + + oc-pol-types:{{identifier}} + oc-types:IPV4 + +
+
+ {% endif %} diff --git a/src/device/service/drivers/openconfig/templates/routing_policy/bgp_defined_set/edit_config.xml b/src/device/service/drivers/openconfig/templates/routing_policy/bgp_defined_set/edit_config.xml index df64606ae..6843c2dcb 100644 --- a/src/device/service/drivers/openconfig/templates/routing_policy/bgp_defined_set/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/routing_policy/bgp_defined_set/edit_config.xml @@ -5,7 +5,10 @@ {{ext_community_set_name}} {% if operation is not defined or operation != 'delete' %} - {% if ext_community_member is defined %} {{ext_community_member}}{% endif %} + + {{ext_community_set_name}} + {{ext_community_member}} + {% endif %} diff --git a/src/device/service/drivers/openconfig/templates/routing_policy/policy_definition/statement/edit_config.xml b/src/device/service/drivers/openconfig/templates/routing_policy/policy_definition/statement/edit_config.xml index 711067f42..eda2d99c9 100644 --- a/src/device/service/drivers/openconfig/templates/routing_policy/policy_definition/statement/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/routing_policy/policy_definition/statement/edit_config.xml @@ -1,8 +1,11 @@ -{% if operation is not defined or operation != 'delete' %} - + {{policy_name}} + {% if operation is not defined or operation != 'delete' %} + + {{policy_name}} + {{statement_name}} @@ -10,11 +13,13 @@ {{statement_name}}
+ + oc-pol-types:DIRECTLY_CONNECTED + - + {{ext_community_set_name}} - {{match_set_options}} - +
@@ -24,7 +29,7 @@ + {% endif %} -{% endif %} -- GitLab From 6f91d90b135979bb32904312ede46a789bd5db8c Mon Sep 17 00:00:00 2001 From: Lluis Gifre Date: Fri, 29 Jul 2022 13:05:44 +0200 Subject: [PATCH 12/91] Device component - OpenConfigDriver: - updated to new protobuf definitions for config rules --- src/device/tests/test_unitary_openconfig.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/device/tests/test_unitary_openconfig.py b/src/device/tests/test_unitary_openconfig.py index 47bea7ef6..32fb5709a 100644 --- a/src/device/tests/test_unitary_openconfig.py +++ b/src/device/tests/test_unitary_openconfig.py @@ -88,8 +88,9 @@ def test_device_openconfig_add_correct( device_data = context_client.GetDevice(DeviceId(**DEVICE_OC_ID)) config_rules = [ - (ConfigActionEnum.Name(config_rule.action), config_rule.resource_key, config_rule.resource_value) + (ConfigActionEnum.Name(config_rule.action), config_rule.custom.resource_key, config_rule.custom.resource_value) for config_rule in device_data.device_config.config_rules + if config_rule.WhichOneof('config_rule') == 'custom' ] LOGGER.info('device_data.device_config.config_rules = \n{:s}'.format( '\n'.join(['{:s} {:s} = {:s}'.format(*config_rule) for config_rule in config_rules]))) @@ -135,8 +136,9 @@ def test_device_openconfig_configure( device_data = context_client.GetDevice(DeviceId(**DEVICE_OC_ID)) config_rules = [ - (ConfigActionEnum.Name(config_rule.action), config_rule.resource_key, config_rule.resource_value) + (ConfigActionEnum.Name(config_rule.action), config_rule.custom.resource_key, config_rule.custom.resource_value) for config_rule in device_data.device_config.config_rules + if config_rule.WhichOneof('config_rule') == 'custom' ] LOGGER.info('device_data.device_config.config_rules = \n{:s}'.format( '\n'.join(['{:s} {:s} = {:s}'.format(*config_rule) for config_rule in config_rules]))) @@ -276,7 +278,7 @@ def test_device_openconfig_deconfigure( device_data = context_client.GetDevice(DeviceId(**DEVICE_OC_ID)) config_rules = [ - (ConfigActionEnum.Name(config_rule.action), config_rule.resource_key, config_rule.resource_value) + (ConfigActionEnum.Name(config_rule.action), config_rule.custom.resource_key, config_rule.custom.resource_value) for config_rule in device_data.device_config.config_rules if config_rule.WhichOneof('config_rule') == 'custom' ] -- GitLab From d4afb901526569afc0f2edc4aba9e6e6a6dd55dd Mon Sep 17 00:00:00 2001 From: PabloArmingolRobles Date: Fri, 29 Jul 2022 13:15:40 +0200 Subject: [PATCH 13/91] Device OC : configuration of unitary tests for ADVA --- src/device/tests/test_unitary_openconfig.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/device/tests/test_unitary_openconfig.py b/src/device/tests/test_unitary_openconfig.py index 32fb5709a..38adcf15f 100644 --- a/src/device/tests/test_unitary_openconfig.py +++ b/src/device/tests/test_unitary_openconfig.py @@ -29,8 +29,12 @@ from .PrepareTestScenario import ( # pylint: disable=unused-import mock_service, device_service, context_client, device_client, monitoring_client, test_prepare_environment) try: - from .Device_OpenConfig_Infinera1 import( + #from .Device_OpenConfig_Infinera1 import( #from .Device_OpenConfig_Infinera2 import( + #from .Device_OpenConfig_Adva import( + #from .Device_OpenConfig_Adva_149 import( + from .Device_OpenConfig_Adva_155 import( + DEVICE_OC, DEVICE_OC_CONFIG_RULES, DEVICE_OC_DECONFIG_RULES, DEVICE_OC_CONNECT_RULES, DEVICE_OC_ID, DEVICE_OC_UUID) ENABLE_OPENCONFIG = True @@ -38,10 +42,9 @@ except ImportError: ENABLE_OPENCONFIG = False ENABLE_OPENCONFIG_CONFIGURE = True -ENABLE_OPENCONFIG_MONITOR = True +ENABLE_OPENCONFIG_MONITOR = False ENABLE_OPENCONFIG_DECONFIGURE = True - logging.getLogger('apscheduler.executors.default').setLevel(logging.WARNING) logging.getLogger('apscheduler.scheduler').setLevel(logging.WARNING) logging.getLogger('monitoring-client').setLevel(logging.WARNING) -- GitLab From 23b42b082836931815f2b94735390fca32a4fe95 Mon Sep 17 00:00:00 2001 From: PabloArmingolRobles Date: Fri, 29 Jul 2022 13:16:58 +0200 Subject: [PATCH 14/91] Device OC : configuration of unitary tests for ADVA --- src/device/tests/test_unitary_openconfig.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/device/tests/test_unitary_openconfig.py b/src/device/tests/test_unitary_openconfig.py index 38adcf15f..6144a95d9 100644 --- a/src/device/tests/test_unitary_openconfig.py +++ b/src/device/tests/test_unitary_openconfig.py @@ -34,7 +34,7 @@ try: #from .Device_OpenConfig_Adva import( #from .Device_OpenConfig_Adva_149 import( from .Device_OpenConfig_Adva_155 import( - + #from .Device_OpenConfig_Cisco import( DEVICE_OC, DEVICE_OC_CONFIG_RULES, DEVICE_OC_DECONFIG_RULES, DEVICE_OC_CONNECT_RULES, DEVICE_OC_ID, DEVICE_OC_UUID) ENABLE_OPENCONFIG = True -- GitLab From fc145563d595e29578089e5cbdefc3c441e48b69 Mon Sep 17 00:00:00 2001 From: Lluis Gifre Date: Mon, 1 Aug 2022 18:15:00 +0000 Subject: [PATCH 15/91] ECOC'22 functional test: - arranged run_test scripts and objects - deactivated unneeded components in the deployment - arranged LoadDescriptors Python script - improved Tutorial page for ECOC'22 demo --- src/tests/ecoc22/deploy_specs.sh | 2 +- src/tests/ecoc22/redeploy.sh | 4 ++++ src/tests/ecoc22/run_test_01_bootstrap.sh | 1 + src/tests/ecoc22/run_test_02_create_service.sh | 1 + src/tests/ecoc22/run_test_03_delete_service.sh | 1 + src/tests/ecoc22/run_test_04_cleanup.sh | 1 + src/tests/ecoc22/tests/LoadDescriptors.py | 8 +++----- src/tests/ecoc22/tests/Objects.py | 2 +- src/tests/ecoc22/tests/test_functional_bootstrap.py | 6 +++--- src/tests/ecoc22/tests/test_functional_create_service.py | 2 +- src/tests/ecoc22/tests/test_functional_delete_service.py | 2 +- tutorial/2-4-ecoc22.md | 5 +++++ 12 files changed, 23 insertions(+), 12 deletions(-) create mode 100755 src/tests/ecoc22/redeploy.sh diff --git a/src/tests/ecoc22/deploy_specs.sh b/src/tests/ecoc22/deploy_specs.sh index 4afe7e20f..dc75c4b2c 100644 --- a/src/tests/ecoc22/deploy_specs.sh +++ b/src/tests/ecoc22/deploy_specs.sh @@ -2,7 +2,7 @@ export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/" # Set the list of components, separated by spaces, you want to build images for, and deploy. -export TFS_COMPONENTS="context device automation service slice compute webui" +export TFS_COMPONENTS="context device service slice compute" # automation webui # Set the tag you want to use for your images. export TFS_IMAGE_TAG="dev" diff --git a/src/tests/ecoc22/redeploy.sh b/src/tests/ecoc22/redeploy.sh new file mode 100755 index 000000000..3f3986deb --- /dev/null +++ b/src/tests/ecoc22/redeploy.sh @@ -0,0 +1,4 @@ +#!/bin/bash +source ecoc22/deploy_specs.sh +./deploy.sh +source tfs_runtime_env_vars.sh diff --git a/src/tests/ecoc22/run_test_01_bootstrap.sh b/src/tests/ecoc22/run_test_01_bootstrap.sh index 4e94fcb2c..819991d78 100755 --- a/src/tests/ecoc22/run_test_01_bootstrap.sh +++ b/src/tests/ecoc22/run_test_01_bootstrap.sh @@ -13,4 +13,5 @@ # See the License for the specific language governing permissions and # limitations under the License. +source tfs_runtime_env_vars.sh pytest --verbose src/tests/ecoc22/tests/test_functional_bootstrap.py diff --git a/src/tests/ecoc22/run_test_02_create_service.sh b/src/tests/ecoc22/run_test_02_create_service.sh index 9cab7daa2..5a54d39d4 100755 --- a/src/tests/ecoc22/run_test_02_create_service.sh +++ b/src/tests/ecoc22/run_test_02_create_service.sh @@ -13,4 +13,5 @@ # See the License for the specific language governing permissions and # limitations under the License. +source tfs_runtime_env_vars.sh pytest --verbose src/tests/ecoc22/tests/test_functional_create_service.py diff --git a/src/tests/ecoc22/run_test_03_delete_service.sh b/src/tests/ecoc22/run_test_03_delete_service.sh index b01ba350a..900e09b65 100755 --- a/src/tests/ecoc22/run_test_03_delete_service.sh +++ b/src/tests/ecoc22/run_test_03_delete_service.sh @@ -13,4 +13,5 @@ # See the License for the specific language governing permissions and # limitations under the License. +source tfs_runtime_env_vars.sh pytest --verbose src/tests/ecoc22/tests/test_functional_delete_service.py diff --git a/src/tests/ecoc22/run_test_04_cleanup.sh b/src/tests/ecoc22/run_test_04_cleanup.sh index a2925d633..4e0622e6b 100755 --- a/src/tests/ecoc22/run_test_04_cleanup.sh +++ b/src/tests/ecoc22/run_test_04_cleanup.sh @@ -13,4 +13,5 @@ # See the License for the specific language governing permissions and # limitations under the License. +source tfs_runtime_env_vars.sh pytest --verbose src/tests/ecoc22/tests/test_functional_cleanup.py diff --git a/src/tests/ecoc22/tests/LoadDescriptors.py b/src/tests/ecoc22/tests/LoadDescriptors.py index 495b05a89..bd7e48366 100644 --- a/src/tests/ecoc22/tests/LoadDescriptors.py +++ b/src/tests/ecoc22/tests/LoadDescriptors.py @@ -15,17 +15,15 @@ import json, logging, sys from common.Settings import get_setting from context.client.ContextClient import ContextClient -from context.proto.context_pb2 import Context, Device, Link, Topology +from common.proto.context_pb2 import Context, Device, Link, Topology from device.client.DeviceClient import DeviceClient LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) def main(): - context_client = ContextClient( - get_setting('CONTEXTSERVICE_SERVICE_HOST'), get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC')) - device_client = DeviceClient( - get_setting('DEVICESERVICE_SERVICE_HOST'), get_setting('DEVICESERVICE_SERVICE_PORT_GRPC')) + context_client = ContextClient() + device_client = DeviceClient() with open('tests/ecoc22/descriptors.json', 'r', encoding='UTF-8') as f: descriptors = json.loads(f.read()) diff --git a/src/tests/ecoc22/tests/Objects.py b/src/tests/ecoc22/tests/Objects.py index 062a00516..33a6ad052 100644 --- a/src/tests/ecoc22/tests/Objects.py +++ b/src/tests/ecoc22/tests/Objects.py @@ -258,7 +258,7 @@ WIM_SEP_DC2_SEC, WIM_MAP_DC2_SEC = mapping('DC2', ENDPOINT_ID_CE4_1_1, DEVICE_PE WIM_MAPPING = [WIM_MAP_DC1_PRI, WIM_MAP_DC1_SEC, WIM_MAP_DC2_PRI, WIM_MAP_DC2_SEC] WIM_SRV_VLAN_ID = 300 -WIM_SERVICE_TYPE = 'ELINE' +WIM_SERVICE_TYPE = 'ELAN' WIM_SERVICE_CONNECTION_POINTS = [ {'service_endpoint_id': WIM_SEP_DC1_PRI, 'service_endpoint_encapsulation_type': 'dot1q', diff --git a/src/tests/ecoc22/tests/test_functional_bootstrap.py b/src/tests/ecoc22/tests/test_functional_bootstrap.py index 7626ce304..a787cf049 100644 --- a/src/tests/ecoc22/tests/test_functional_bootstrap.py +++ b/src/tests/ecoc22/tests/test_functional_bootstrap.py @@ -14,8 +14,8 @@ import copy, logging, pytest from common.Settings import get_setting +from common.proto.context_pb2 import Context, ContextId, Device, Empty, Link, Topology from context.client.ContextClient import ContextClient -from context.proto.context_pb2 import Context, ContextId, Device, Empty, Link, Topology from device.client.DeviceClient import DeviceClient from .Objects import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES @@ -26,14 +26,14 @@ LOGGER.setLevel(logging.DEBUG) @pytest.fixture(scope='session') def context_client(): - _client = ContextClient(get_setting('CONTEXTSERVICE_SERVICE_HOST'), get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC')) + _client = ContextClient() yield _client _client.close() @pytest.fixture(scope='session') def device_client(): - _client = DeviceClient(get_setting('DEVICESERVICE_SERVICE_HOST'), get_setting('DEVICESERVICE_SERVICE_PORT_GRPC')) + _client = DeviceClient() yield _client _client.close() diff --git a/src/tests/ecoc22/tests/test_functional_create_service.py b/src/tests/ecoc22/tests/test_functional_create_service.py index 58d87dada..2928d9b35 100644 --- a/src/tests/ecoc22/tests/test_functional_create_service.py +++ b/src/tests/ecoc22/tests/test_functional_create_service.py @@ -14,10 +14,10 @@ import logging, pytest from common.Settings import get_setting +from common.proto.context_pb2 import ContextId, Empty from common.tools.grpc.Tools import grpc_message_to_json_string from compute.tests.mock_osm.MockOSM import MockOSM from context.client.ContextClient import ContextClient -from context.proto.context_pb2 import ContextId, Empty from .Objects import ( CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_MAPPING, WIM_PASSWORD, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE, WIM_USERNAME) diff --git a/src/tests/ecoc22/tests/test_functional_delete_service.py b/src/tests/ecoc22/tests/test_functional_delete_service.py index 51e91a596..89fe22b1f 100644 --- a/src/tests/ecoc22/tests/test_functional_delete_service.py +++ b/src/tests/ecoc22/tests/test_functional_delete_service.py @@ -15,6 +15,7 @@ import logging, pytest from common.DeviceTypes import DeviceTypeEnum from common.Settings import get_setting +from common.proto.context_pb2 import ContextId, Empty from common.tests.EventTools import EVENT_REMOVE, EVENT_UPDATE, check_events from common.tools.object_factory.Connection import json_connection_id from common.tools.object_factory.Device import json_device_id @@ -23,7 +24,6 @@ from common.tools.grpc.Tools import grpc_message_to_json_string from compute.tests.mock_osm.MockOSM import MockOSM from context.client.ContextClient import ContextClient from context.client.EventsCollector import EventsCollector -from context.proto.context_pb2 import ContextId, Empty from .Objects import ( CONTEXT_ID, CONTEXTS, DEVICE_O1_UUID, DEVICE_R1_UUID, DEVICE_R3_UUID, DEVICES, LINKS, TOPOLOGIES, WIM_MAPPING, WIM_PASSWORD, WIM_USERNAME) diff --git a/tutorial/2-4-ecoc22.md b/tutorial/2-4-ecoc22.md index b28fdfd1c..6fc9333b5 100644 --- a/tutorial/2-4-ecoc22.md +++ b/tutorial/2-4-ecoc22.md @@ -29,6 +29,11 @@ environment as described in [Tutorial: Run Experiments Guide > 2.1. Configure Python Environment](./2-1-python-environment.md). Remember to source the scenario settings appropriately, e.g., `cd ~/tfs-ctrl && source my_deploy.sh` in each terminal you open. +Next, remember to source the environment variables created by the deployment, e.g., +`cd ~/tfs-ctrl && source tfs_runtime_env_vars.sh`. +Then, re-build the protocol buffers code from the proto files: +`./proto/generate_code_python.sh` + ## 2.4.4. Access to the WebUI and Dashboard -- GitLab From c926393733b058a345b1a288ebca9f2852281129 Mon Sep 17 00:00:00 2001 From: Lluis Gifre Date: Tue, 2 Aug 2022 14:47:24 +0000 Subject: [PATCH 16/91] Slice component: - refined create/update of slices - implemented delete of slices --- src/slice/requirements.in | 1 + src/slice/service/SliceServiceServicerImpl.py | 134 ++++++++++++------ 2 files changed, 89 insertions(+), 46 deletions(-) diff --git a/src/slice/requirements.in b/src/slice/requirements.in index e69de29bb..cbf07ecb7 100644 --- a/src/slice/requirements.in +++ b/src/slice/requirements.in @@ -0,0 +1 @@ +deepdiff==5.8.* diff --git a/src/slice/service/SliceServiceServicerImpl.py b/src/slice/service/SliceServiceServicerImpl.py index 275a20114..8c70b5e5a 100644 --- a/src/slice/service/SliceServiceServicerImpl.py +++ b/src/slice/service/SliceServiceServicerImpl.py @@ -12,11 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -import grpc, json, logging +import grpc, logging, deepdiff from common.proto.context_pb2 import ( - ConfigActionEnum, Empty, Service, ServiceStatusEnum, ServiceTypeEnum, Slice, SliceId, SliceStatusEnum) + Empty, Service, ServiceId, ServiceStatusEnum, ServiceTypeEnum, Slice, SliceId, SliceStatusEnum) from common.proto.slice_pb2_grpc import SliceServiceServicer from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method +from common.tools.grpc.ConfigRules import copy_config_rules +from common.tools.grpc.Constraints import copy_constraints +from common.tools.grpc.EndPointIds import copy_endpoint_ids +from common.tools.grpc.ServiceIds import update_service_ids +from common.tools.grpc.Tools import grpc_message_to_json from context.client.ContextClient import ContextClient from interdomain.client.InterdomainClient import InterdomainClient from service.client.ServiceClient import ServiceClient @@ -34,66 +39,69 @@ class SliceServiceServicerImpl(SliceServiceServicer): def create_update(self, request : Slice) -> SliceId: context_client = ContextClient() - - slice_id = context_client.SetSlice(request) - if len(request.slice_endpoint_ids) != 2: return slice_id + try: + _slice = context_client.GetSlice(request.slice_id) + json_current_slice = grpc_message_to_json(_slice) + except: + json_current_slice = {} + slice_request = Slice() + slice_request.slice_id.CopyFrom(request.slice_id) + slice_request.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_PLANNED + context_client.SetSlice(slice_request) + _slice = context_client.GetSlice(request.slice_id) + slice_request = Slice() + slice_request.CopyFrom(_slice) + + LOGGER.info('json_current_slice = {:s}'.format(str(json_current_slice))) + json_updated_slice = grpc_message_to_json(request) + LOGGER.info('json_updated_slice = {:s}'.format(str(json_updated_slice))) + changes = deepdiff.DeepDiff(json_current_slice, json_updated_slice) + LOGGER.info('changes = {:s}'.format(str(changes))) domains = set() for slice_endpoint_id in request.slice_endpoint_ids: device_uuid = slice_endpoint_id.device_id.device_uuid.uuid - domains.add(device_uuid.split('@')[1]) + device_parts = device_uuid.split('@') + domain_uuid = '' if len(device_parts) == 1 else device_parts[1] + domains.add(domain_uuid) + LOGGER.info('domains = {:s}'.format(str(domains))) + is_multi_domain = len(domains) > 1 + LOGGER.info('is_multi_domain = {:s}'.format(str(is_multi_domain))) - is_multi_domain = len(domains) == 2 if is_multi_domain: interdomain_client = InterdomainClient() slice_id = interdomain_client.RequestSlice(request) else: - # pylint: disable=no-member - service_request = Service() - service_request.service_id.context_id.context_uuid.uuid = request.slice_id.context_id.context_uuid.uuid - service_request.service_id.service_uuid.uuid = request.slice_id.slice_uuid.uuid - service_request.service_type = ServiceTypeEnum.SERVICETYPE_L3NM - service_request.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PLANNED + service_id = ServiceId() + context_uuid = service_id.context_id.context_uuid.uuid = request.slice_id.context_id.context_uuid.uuid + slice_uuid = service_uuid = service_id.service_uuid.uuid = request.slice_id.slice_uuid.uuid service_client = ServiceClient() - service_reply = service_client.CreateService(service_request) - if service_reply != service_request.service_id: # pylint: disable=no-member - raise Exception('Service creation failed. Wrong Service Id was returned') - - config_rule = service_request.service_config.config_rules.add() - config_rule.action = ConfigActionEnum.CONFIGACTION_SET - config_rule.custom.resource_key = '/settings' - config_rule.custom.resource_value = json.dumps( - {'mtu': 1512, 'address_families': ['IPV4'], 'bgp_as': 65000, 'bgp_route_target': '65000:333'}, - sort_keys=True) - - for slice_endpoint_id in request.slice_endpoint_ids: - device_uuid = slice_endpoint_id.device_id.device_uuid.uuid - endpoint_uuid = slice_endpoint_id.endpoint_uuid.uuid - - endpoint_id = service_request.service_endpoint_ids.add() - endpoint_id.device_id.device_uuid.uuid = device_uuid - endpoint_id.endpoint_uuid.uuid = endpoint_uuid - - config_rule = service_request.service_config.config_rules.add() - config_rule.action = ConfigActionEnum.CONFIGACTION_SET - config_rule.custom.resource_key = '/device[{:s}]/endpoint[{:s}]/settings'.format( - device_uuid, endpoint_uuid) - config_rule.custom.resource_value = json.dumps( - {'router_id': '0.0.0.0', 'route_distinguisher': '0:0', 'sub_interface_index': 0, 'vlan_id': 0, - 'address_ip': '0.0.0.0', 'address_prefix': 0}, - sort_keys=True) + try: + _service = context_client.GetService(service_id) + except: + service_request = Service() + service_request.service_id.CopyFrom(service_id) + service_request.service_type = ServiceTypeEnum.SERVICETYPE_L2NM + service_request.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PLANNED + service_reply = service_client.CreateService(service_request) + if service_reply != service_request.service_id: # pylint: disable=no-member + raise Exception('Service creation failed. Wrong Service Id was returned') + _service = context_client.GetService(service_id) + service_request = Service() + service_request.CopyFrom(_service) + + copy_endpoint_ids(request.slice_endpoint_ids, service_request.service_endpoint_ids) + copy_constraints(request.slice_constraints, service_request.service_constraints) + copy_config_rules(request.slice_config.config_rules, service_request.service_config.config_rules) service_reply = service_client.UpdateService(service_request) if service_reply != service_request.service_id: # pylint: disable=no-member raise Exception('Service update failed. Wrong Service Id was returned') - reply = Slice() - reply.CopyFrom(request) - slice_service_id = reply.slice_service_ids.add() - slice_service_id.CopyFrom(service_reply) - context_client.SetSlice(reply) - slice_id = reply.slice_id + update_service_ids(slice_request.slice_service_ids, context_uuid, service_uuid) + context_client.SetSlice(slice_request) + slice_id = slice_request.slice_id slice_ = context_client.GetSlice(slice_id) slice_active = Slice() @@ -132,4 +140,38 @@ class SliceServiceServicerImpl(SliceServiceServicer): @safe_and_metered_rpc_method(METRICS, LOGGER) def DeleteSlice(self, request : SliceId, context : grpc.ServicerContext) -> Empty: + context_client = ContextClient() + try: + _slice = context_client.GetSlice(request.slice_id) + except: + return Empty() + + domains = set() + for slice_endpoint_id in _slice.slice_endpoint_ids: + device_uuid = slice_endpoint_id.device_id.device_uuid.uuid + device_parts = device_uuid.split('@') + domain_uuid = '' if len(device_parts) == 1 else device_parts[1] + domains.add(domain_uuid) + LOGGER.info('domains = {:s}'.format(str(domains))) + is_multi_domain = len(domains) > 1 + LOGGER.info('is_multi_domain = {:s}'.format(str(is_multi_domain))) + + if is_multi_domain: + interdomain_client = InterdomainClient() + #slice_id = interdomain_client.DeleteSlice(request) + raise NotImplementedError('Delete inter-domain slice') + else: + current_slice = Slice() + current_slice.CopyFrom(_slice) + current_slice.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_DEINIT + context_client.SetSlice(current_slice) + + service_client = ServiceClient() + for service_id in _slice.slice_service_ids: + try: + service_client.DeleteService(service_id) + except: + pass + + context_client.RemoveSlice(request.slice_id) return Empty() -- GitLab From e431d4d619ca85209e4a82ab305a1fd24e36ad0e Mon Sep 17 00:00:00 2001 From: Lluis Gifre Date: Tue, 2 Aug 2022 14:48:06 +0000 Subject: [PATCH 17/91] Service component: - implemented skeleton for L2VPN service handlers for emulated devices --- .../service/service_handlers/__init__.py | 7 + .../L2NMEmulatedServiceHandler.py | 379 ++++++++++++++++++ .../l2nm_emulated/__init__.py | 14 + 3 files changed, 400 insertions(+) create mode 100644 src/service/service/service_handlers/l2nm_emulated/L2NMEmulatedServiceHandler.py create mode 100644 src/service/service/service_handlers/l2nm_emulated/__init__.py diff --git a/src/service/service/service_handlers/__init__.py b/src/service/service/service_handlers/__init__.py index 6abe4048f..33e345c42 100644 --- a/src/service/service/service_handlers/__init__.py +++ b/src/service/service/service_handlers/__init__.py @@ -13,11 +13,18 @@ # limitations under the License. from ..service_handler_api.FilterFields import FilterFieldEnum, ORM_DeviceDriverEnum, ORM_ServiceTypeEnum +from .l2nm_emulated.L2NMEmulatedServiceHandler import L2NMEmulatedServiceHandler from .l3nm_emulated.L3NMEmulatedServiceHandler import L3NMEmulatedServiceHandler from .l3nm_openconfig.L3NMOpenConfigServiceHandler import L3NMOpenConfigServiceHandler from .tapi_tapi.TapiServiceHandler import TapiServiceHandler SERVICE_HANDLERS = [ + (L2NMEmulatedServiceHandler, [ + { + FilterFieldEnum.SERVICE_TYPE : ORM_ServiceTypeEnum.L2NM, + FilterFieldEnum.DEVICE_DRIVER : ORM_DeviceDriverEnum.UNDEFINED, + } + ]), (L3NMEmulatedServiceHandler, [ { FilterFieldEnum.SERVICE_TYPE : ORM_ServiceTypeEnum.L3NM, diff --git a/src/service/service/service_handlers/l2nm_emulated/L2NMEmulatedServiceHandler.py b/src/service/service/service_handlers/l2nm_emulated/L2NMEmulatedServiceHandler.py new file mode 100644 index 000000000..889a60ad5 --- /dev/null +++ b/src/service/service/service_handlers/l2nm_emulated/L2NMEmulatedServiceHandler.py @@ -0,0 +1,379 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import anytree, json, logging +from typing import Any, Dict, List, Optional, Tuple, Union +from common.orm.Database import Database +from common.orm.HighLevel import get_object +from common.orm.backend.Tools import key_to_str +from common.proto.context_pb2 import Device +from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set +from common.type_checkers.Checkers import chk_length, chk_type +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from service.service.database.ConfigModel import ORM_ConfigActionEnum, get_config_rules +from service.service.database.ContextModel import ContextModel +from service.service.database.DeviceModel import DeviceModel +from service.service.database.ServiceModel import ServiceModel +from service.service.service_handler_api._ServiceHandler import _ServiceHandler +from service.service.service_handler_api.AnyTreeTools import TreeNode, delete_subnode, get_subnode, set_subnode_value + +LOGGER = logging.getLogger(__name__) + +class L2NMEmulatedServiceHandler(_ServiceHandler): + def __init__( # pylint: disable=super-init-not-called + self, db_service : ServiceModel, database : Database, context_client : ContextClient, + device_client : DeviceClient, **settings + ) -> None: + self.__db_service = db_service + self.__database = database + self.__context_client = context_client # pylint: disable=unused-private-member + self.__device_client = device_client + + self.__db_context : ContextModel = get_object(self.__database, ContextModel, self.__db_service.context_fk) + str_service_key = key_to_str([self.__db_context.context_uuid, self.__db_service.service_uuid]) + db_config = get_config_rules(self.__database, str_service_key, 'running') + self.__resolver = anytree.Resolver(pathattr='name') + self.__config = TreeNode('.') + for action, resource_key, resource_value in db_config: + if action == ORM_ConfigActionEnum.SET: + try: + resource_value = json.loads(resource_value) + except: # pylint: disable=bare-except + pass + set_subnode_value(self.__resolver, self.__config, resource_key, resource_value) + elif action == ORM_ConfigActionEnum.DELETE: + delete_subnode(self.__resolver, self.__config, resource_key) + + def SetEndpoint(self, endpoints : List[Tuple[str, str, Optional[str]]]) -> List[Union[bool, Exception]]: + chk_type('endpoints', endpoints, list) + if len(endpoints) == 0: return [] + + service_uuid = self.__db_service.service_uuid + service_short_uuid = service_uuid.split('-')[-1] + network_instance_name = '{:s}-NetInst'.format(service_short_uuid) + network_interface_desc = '{:s}-NetIf'.format(service_uuid) + network_subinterface_desc = '{:s}-NetSubIf'.format(service_uuid) + + settings : TreeNode = get_subnode(self.__resolver, self.__config, '/settings', None) + if settings is None: raise Exception('Unable to retrieve service settings') + json_settings : Dict = settings.value + mtu = json_settings.get('mtu', 1450 ) # 1512 + #address_families = json_settings.get('address_families', [] ) # ['IPV4'] + bgp_as = json_settings.get('bgp_as', 0 ) # 65000 + bgp_route_target = json_settings.get('bgp_route_target', '0:0') # 65000:333 + + results = [] + for endpoint in endpoints: + try: + chk_type('endpoint', endpoint, (tuple, list)) + chk_length('endpoint', endpoint, min_length=2, max_length=3) + if len(endpoint) == 2: + device_uuid, endpoint_uuid = endpoint + else: + device_uuid, endpoint_uuid, _ = endpoint # ignore topology_uuid by now + + endpoint_settings_uri = '/device[{:s}]/endpoint[{:s}]/settings'.format(device_uuid, endpoint_uuid) + endpoint_settings : TreeNode = get_subnode(self.__resolver, self.__config, endpoint_settings_uri, None) + if endpoint_settings is None: + raise Exception('Unable to retrieve service settings for endpoint({:s})'.format( + str(endpoint_settings_uri))) + json_endpoint_settings : Dict = endpoint_settings.value + #router_id = json_endpoint_settings.get('router_id', '0.0.0.0') # '10.95.0.10' + route_distinguisher = json_endpoint_settings.get('route_distinguisher', '0:0' ) # '60001:801' + sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0 ) # 1 + vlan_id = json_endpoint_settings.get('vlan_id', 1 ) # 400 + address_ip = json_endpoint_settings.get('address_ip', '0.0.0.0') # '2.2.2.1' + address_prefix = json_endpoint_settings.get('address_prefix', 24 ) # 30 + if_subif_name = '{:s}.{:d}'.format(endpoint_uuid, vlan_id) + + db_device : DeviceModel = get_object(self.__database, DeviceModel, device_uuid, raise_if_not_found=True) + json_device = db_device.dump(include_config_rules=False, include_drivers=True, include_endpoints=True) + json_device_config : Dict = json_device.setdefault('device_config', {}) + json_device_config_rules : List = json_device_config.setdefault('config_rules', []) + json_device_config_rules.extend([ + json_config_rule_set( + '/network_instance[{:s}]'.format(network_instance_name), { + 'name': network_instance_name, 'description': network_interface_desc, 'type': 'L3VRF', + 'route_distinguisher': route_distinguisher, + #'router_id': router_id, 'address_families': address_families, + }), + json_config_rule_set( + '/interface[{:s}]'.format(endpoint_uuid), { + 'name': endpoint_uuid, 'description': network_interface_desc, 'mtu': mtu, + }), + json_config_rule_set( + '/interface[{:s}]/subinterface[{:d}]'.format(endpoint_uuid, sub_interface_index), { + 'name': endpoint_uuid, 'index': sub_interface_index, + 'description': network_subinterface_desc, 'vlan_id': vlan_id, + 'address_ip': address_ip, 'address_prefix': address_prefix, + }), + json_config_rule_set( + '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_subif_name), { + 'name': network_instance_name, 'id': if_subif_name, 'interface': endpoint_uuid, + 'subinterface': sub_interface_index, + }), + json_config_rule_set( + '/network_instance[{:s}]/protocols[BGP]'.format(network_instance_name), { + 'name': network_instance_name, 'identifier': 'BGP', 'protocol_name': 'BGP', 'as': bgp_as, + }), + json_config_rule_set( + '/network_instance[{:s}]/table_connections[STATIC][BGP][IPV4]'.format(network_instance_name), { + 'name': network_instance_name, 'src_protocol': 'STATIC', 'dst_protocol': 'BGP', + 'address_family': 'IPV4', #'default_import_policy': 'REJECT_ROUTE', + }), + json_config_rule_set( + '/network_instance[{:s}]/table_connections[DIRECTLY_CONNECTED][BGP][IPV4]'.format( + network_instance_name), { + 'name': network_instance_name, 'src_protocol': 'DIRECTLY_CONNECTED', 'dst_protocol': 'BGP', + 'address_family': 'IPV4', #'default_import_policy': 'REJECT_ROUTE', + }), + json_config_rule_set( + '/routing_policy/bgp_defined_set[{:s}_rt_import]'.format(network_instance_name), { + 'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name), + }), + json_config_rule_set( + '/routing_policy/bgp_defined_set[{:s}_rt_import][route-target:{:s}]'.format( + network_instance_name, bgp_route_target), { + 'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name), + 'ext_community_member' : 'route-target:{:s}'.format(bgp_route_target), + }), + json_config_rule_set( + '/routing_policy/policy_definition[{:s}_import]'.format(network_instance_name), { + 'policy_name': '{:s}_import'.format(network_instance_name), + }), + json_config_rule_set( + '/routing_policy/policy_definition[{:s}_import]/statement[{:s}]'.format( + network_instance_name, '3'), { + 'policy_name': '{:s}_import'.format(network_instance_name), 'statement_name': '3', + 'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name), + 'match_set_options': 'ANY', 'policy_result': 'ACCEPT_ROUTE', + }), + json_config_rule_set( + # pylint: disable=duplicate-string-formatting-argument + '/network_instance[{:s}]/inter_instance_policies[{:s}_import]'.format( + network_instance_name, network_instance_name), { + 'name': network_instance_name, 'import_policy': '{:s}_import'.format(network_instance_name), + }), + json_config_rule_set( + '/routing_policy/bgp_defined_set[{:s}_rt_export]'.format(network_instance_name), { + 'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name), + }), + json_config_rule_set( + '/routing_policy/bgp_defined_set[{:s}_rt_export][route-target:{:s}]'.format( + network_instance_name, bgp_route_target), { + 'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name), + 'ext_community_member' : 'route-target:{:s}'.format(bgp_route_target), + }), + json_config_rule_set( + '/routing_policy/policy_definition[{:s}_export]'.format(network_instance_name), { + 'policy_name': '{:s}_export'.format(network_instance_name), + }), + json_config_rule_set( + '/routing_policy/policy_definition[{:s}_export]/statement[{:s}]'.format( + network_instance_name, '3'), { + 'policy_name': '{:s}_export'.format(network_instance_name), 'statement_name': '3', + 'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name), + 'match_set_options': 'ANY', 'policy_result': 'ACCEPT_ROUTE', + }), + json_config_rule_set( + # pylint: disable=duplicate-string-formatting-argument + '/network_instance[{:s}]/inter_instance_policies[{:s}_export]'.format( + network_instance_name, network_instance_name), { + 'name': network_instance_name, 'export_policy': '{:s}_export'.format(network_instance_name), + }), + ]) + self.__device_client.ConfigureDevice(Device(**json_device)) + results.append(True) + except Exception as e: # pylint: disable=broad-except + LOGGER.exception('Unable to SetEndpoint({:s})'.format(str(endpoint))) + results.append(e) + + return results + + def DeleteEndpoint(self, endpoints : List[Tuple[str, str, Optional[str]]]) -> List[Union[bool, Exception]]: + chk_type('endpoints', endpoints, list) + if len(endpoints) == 0: return [] + + service_uuid = self.__db_service.service_uuid + service_short_uuid = service_uuid.split('-')[-1] + network_instance_name = '{:s}-NetInst'.format(service_short_uuid) + + settings : TreeNode = get_subnode(self.__resolver, self.__config, '/settings', None) + if settings is None: raise Exception('Unable to retrieve service settings') + json_settings : Dict = settings.value + bgp_route_target = json_settings.get('bgp_route_target', '0:0') # 65000:333 + + results = [] + for endpoint in endpoints: + try: + chk_type('endpoint', endpoint, (tuple, list)) + chk_length('endpoint', endpoint, min_length=2, max_length=3) + if len(endpoint) == 2: + device_uuid, endpoint_uuid = endpoint + else: + device_uuid, endpoint_uuid, _ = endpoint # ignore topology_uuid by now + + endpoint_settings_uri = '/device[{:s}]/endpoint[{:s}]/settings'.format(device_uuid, endpoint_uuid) + endpoint_settings : TreeNode = get_subnode(self.__resolver, self.__config, endpoint_settings_uri, None) + if endpoint_settings is None: + raise Exception('Unable to retrieve service settings for endpoint({:s})'.format( + str(endpoint_settings_uri))) + json_endpoint_settings : Dict = endpoint_settings.value + sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0 ) # 1 + vlan_id = json_endpoint_settings.get('vlan_id', 1 ) # 400 + if_subif_name = '{:s}.{:d}'.format(endpoint_uuid, vlan_id) + + db_device : DeviceModel = get_object(self.__database, DeviceModel, device_uuid, raise_if_not_found=True) + json_device = db_device.dump(include_config_rules=False, include_drivers=True, include_endpoints=True) + json_device_config : Dict = json_device.setdefault('device_config', {}) + json_device_config_rules : List = json_device_config.setdefault('config_rules', []) + json_device_config_rules.extend([ + json_config_rule_delete( + '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_subif_name), { + 'name': network_instance_name, 'id': if_subif_name, + }), + json_config_rule_delete( + '/interface[{:s}]/subinterface[{:d}]'.format(endpoint_uuid, sub_interface_index), { + 'name': endpoint_uuid, 'index': sub_interface_index, + }), + json_config_rule_delete( + '/interface[{:s}]'.format(endpoint_uuid), { + 'name': endpoint_uuid, + }), + json_config_rule_delete( + '/network_instance[{:s}]/table_connections[DIRECTLY_CONNECTED][BGP][IPV4]'.format( + network_instance_name), { + 'name': network_instance_name, 'src_protocol': 'DIRECTLY_CONNECTED', 'dst_protocol': 'BGP', + 'address_family': 'IPV4', + }), + json_config_rule_delete( + '/network_instance[{:s}]/table_connections[STATIC][BGP][IPV4]'.format(network_instance_name), { + 'name': network_instance_name, 'src_protocol': 'STATIC', 'dst_protocol': 'BGP', + 'address_family': 'IPV4', + }), + json_config_rule_delete( + '/network_instance[{:s}]/protocols[BGP]'.format(network_instance_name), { + 'name': network_instance_name, 'identifier': 'BGP', 'protocol_name': 'BGP', + }), + json_config_rule_delete( + # pylint: disable=duplicate-string-formatting-argument + '/network_instance[{:s}]/inter_instance_policies[{:s}_import]'.format( + network_instance_name, network_instance_name), { + 'name': network_instance_name, + }), + json_config_rule_delete( + '/routing_policy/policy_definition[{:s}_import]/statement[{:s}]'.format( + network_instance_name, '3'), { + 'policy_name': '{:s}_import'.format(network_instance_name), 'statement_name': '3', + }), + json_config_rule_delete( + '/routing_policy/policy_definition[{:s}_import]'.format(network_instance_name), { + 'policy_name': '{:s}_import'.format(network_instance_name), + }), + json_config_rule_delete( + '/routing_policy/bgp_defined_set[{:s}_rt_import][route-target:{:s}]'.format( + network_instance_name, bgp_route_target), { + 'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name), + 'ext_community_member' : 'route-target:{:s}'.format(bgp_route_target), + }), + json_config_rule_delete( + '/routing_policy/bgp_defined_set[{:s}_rt_import]'.format(network_instance_name), { + 'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name), + }), + json_config_rule_delete( + # pylint: disable=duplicate-string-formatting-argument + '/network_instance[{:s}]/inter_instance_policies[{:s}_export]'.format( + network_instance_name, network_instance_name), { + 'name': network_instance_name, + }), + json_config_rule_delete( + '/routing_policy/policy_definition[{:s}_export]/statement[{:s}]'.format( + network_instance_name, '3'), { + 'policy_name': '{:s}_export'.format(network_instance_name), 'statement_name': '3', + }), + json_config_rule_delete( + '/routing_policy/policy_definition[{:s}_export]'.format(network_instance_name), { + 'policy_name': '{:s}_export'.format(network_instance_name), + }), + json_config_rule_delete( + '/routing_policy/bgp_defined_set[{:s}_rt_export][route-target:{:s}]'.format( + network_instance_name, bgp_route_target), { + 'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name), + 'ext_community_member' : 'route-target:{:s}'.format(bgp_route_target), + }), + json_config_rule_delete( + '/routing_policy/bgp_defined_set[{:s}_rt_export]'.format(network_instance_name), { + 'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name), + }), + json_config_rule_delete( + '/network_instance[{:s}]'.format(network_instance_name), { + 'name': network_instance_name + }), + ]) + self.__device_client.ConfigureDevice(Device(**json_device)) + results.append(True) + except Exception as e: # pylint: disable=broad-except + LOGGER.exception('Unable to DeleteEndpoint({:s})'.format(str(endpoint))) + results.append(e) + + return results + + def SetConstraint(self, constraints : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + chk_type('constraints', constraints, list) + if len(constraints) == 0: return [] + + msg = '[SetConstraint] Method not implemented. Constraints({:s}) are being ignored.' + LOGGER.warning(msg.format(str(constraints))) + return [True for _ in range(len(constraints))] + + def DeleteConstraint(self, constraints : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + chk_type('constraints', constraints, list) + if len(constraints) == 0: return [] + + msg = '[DeleteConstraint] Method not implemented. Constraints({:s}) are being ignored.' + LOGGER.warning(msg.format(str(constraints))) + return [True for _ in range(len(constraints))] + + def SetConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + chk_type('resources', resources, list) + if len(resources) == 0: return [] + + results = [] + for resource in resources: + try: + resource_key, resource_value = resource + resource_value = json.loads(resource_value) + set_subnode_value(self.__resolver, self.__config, resource_key, resource_value) + results.append(True) + except Exception as e: # pylint: disable=broad-except + LOGGER.exception('Unable to SetConfig({:s})'.format(str(resource))) + results.append(e) + + return results + + def DeleteConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + chk_type('resources', resources, list) + if len(resources) == 0: return [] + + results = [] + for resource in resources: + try: + resource_key, _ = resource + delete_subnode(self.__resolver, self.__config, resource_key) + except Exception as e: # pylint: disable=broad-except + LOGGER.exception('Unable to DeleteConfig({:s})'.format(str(resource))) + results.append(e) + + return results diff --git a/src/service/service/service_handlers/l2nm_emulated/__init__.py b/src/service/service/service_handlers/l2nm_emulated/__init__.py new file mode 100644 index 000000000..70a332512 --- /dev/null +++ b/src/service/service/service_handlers/l2nm_emulated/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + -- GitLab From ce0208b19384c70ec63a3b032bb161cb941c1368 Mon Sep 17 00:00:00 2001 From: Lluis Gifre Date: Tue, 2 Aug 2022 14:49:46 +0000 Subject: [PATCH 18/91] Compute component: - implemented delete of IETF L2VPN services/slices - implemented diversity and availability constraints --- .../nbi_plugins/ietf_l2vpn/L2VPN_Service.py | 42 ++-- .../ietf_l2vpn/L2VPN_SiteNetworkAccesses.py | 237 ++++++------------ 2 files changed, 104 insertions(+), 175 deletions(-) diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py index 23f61b492..ed2f01af1 100644 --- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py +++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py @@ -20,6 +20,7 @@ from common.Constants import DEFAULT_CONTEXT_UUID from common.proto.context_pb2 import ServiceId, ServiceStatusEnum, SliceStatusEnum from context.client.ContextClient import ContextClient from service.client.ServiceClient import ServiceClient +from slice.client.SliceClient import SliceClient from .tools.Authentication import HTTP_AUTH from .tools.ContextMethods import get_service, get_slice from .tools.HttpStatusCodes import HTTP_GATEWAYTIMEOUT, HTTP_NOCONTENT, HTTP_OK, HTTP_SERVERERROR @@ -32,11 +33,6 @@ class L2VPN_Service(Resource): LOGGER.debug('VPN_Id: {:s}'.format(str(vpn_id))) LOGGER.debug('Request: {:s}'.format(str(request))) - # TODO: HACK ECOC'22, to be corrected - response = jsonify({}) - response.status_code = HTTP_OK - return response - try: context_client = ContextClient() @@ -60,7 +56,7 @@ class L2VPN_Service(Resource): raise Exception('VPN({:s}) not found in database'.format(str(vpn_id))) except Exception as e: # pylint: disable=broad-except - LOGGER.exception('Something went wrong Retrieving VPN({:s})'.format(str(request))) + LOGGER.exception('Something went wrong Retrieving VPN({:s})'.format(str(vpn_id))) response = jsonify({'error': str(e)}) response.status_code = HTTP_SERVERERROR return response @@ -70,18 +66,32 @@ class L2VPN_Service(Resource): LOGGER.debug('VPN_Id: {:s}'.format(str(vpn_id))) LOGGER.debug('Request: {:s}'.format(str(request))) - # pylint: disable=no-member - service_id_request = ServiceId() - service_id_request.context_id.context_uuid.uuid = DEFAULT_CONTEXT_UUID - service_id_request.service_uuid.uuid = vpn_id - try: - service_client = ServiceClient() - service_client.DeleteService(service_id_request) - response = jsonify({}) - response.status_code = HTTP_NOCONTENT + context_client = ContextClient() + + target = get_service(context_client, vpn_id) + if target is not None: + if target.service_id.service_uuid.uuid != vpn_id: # pylint: disable=no-member + raise Exception('Service retrieval failed. Wrong Service Id was returned') + service_client = ServiceClient() + service_client.DeleteService(target.service_id) + response = jsonify({}) + response.status_code = HTTP_NOCONTENT + return response + + target = get_slice(context_client, vpn_id) + if target is not None: + if target.slice_id.slice_uuid.uuid != vpn_id: # pylint: disable=no-member + raise Exception('Slice retrieval failed. Wrong Slice Id was returned') + slice_client = SliceClient() + slice_client.DeleteSlice(target.slice_id) + response = jsonify({}) + response.status_code = HTTP_NOCONTENT + return response + + raise Exception('VPN({:s}) not found in database'.format(str(vpn_id))) except Exception as e: # pylint: disable=broad-except - LOGGER.exception('Something went wrong Deleting Service {:s}'.format(str(request))) + LOGGER.exception('Something went wrong Deleting VPN({:s})'.format(str(vpn_id))) response = jsonify({'error': str(e)}) response.status_code = HTTP_SERVERERROR return response diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py index 2abeade30..401909940 100644 --- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py +++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py @@ -12,16 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -import time, random -from ctypes import Union -import json, logging -from typing import Dict +import logging, random, time +from typing import Dict, Optional, Union from flask import request from flask.json import jsonify from flask.wrappers import Response from flask_restful import Resource from werkzeug.exceptions import UnsupportedMediaType -from common.proto.context_pb2 import ConfigActionEnum, Service, Slice +from common.proto.context_pb2 import Service, Slice +from common.tools.grpc.ConfigRules import update_config_rule_custom +from common.tools.grpc.Constraints import ( + update_constraint_custom, update_constraint_endpoint_location, update_constraint_endpoint_priority, + update_constraint_sla_availability) +from common.tools.grpc.EndPointIds import update_endpoint_ids from common.tools.grpc.Tools import grpc_message_to_json_string from context.client.ContextClient import ContextClient from service.client.ServiceClient import ServiceClient @@ -31,20 +34,32 @@ from .tools.Authentication import HTTP_AUTH from .tools.ContextMethods import get_service, get_slice from .tools.HttpStatusCodes import HTTP_NOCONTENT, HTTP_SERVERERROR from .tools.Validator import validate_message -from .Constants import BEARER_MAPPINGS, DEFAULT_ADDRESS_FAMILIES, DEFAULT_BGP_AS, DEFAULT_BGP_ROUTE_TARGET, DEFAULT_MTU +from .Constants import ( + BEARER_MAPPINGS, DEFAULT_ADDRESS_FAMILIES, DEFAULT_BGP_AS, DEFAULT_BGP_ROUTE_TARGET, DEFAULT_MTU) LOGGER = logging.getLogger(__name__) -def process_site_network_access(context_client : ContextClient, site_network_access : Dict) -> Service: +def process_site_network_access(context_client : ContextClient, site_id : str, site_network_access : Dict) -> Service: vpn_id = site_network_access['vpn-attachment']['vpn-id'] - cvlan_id = site_network_access['connection']['tagged-interface']['dot1q-vlan-tagged']['cvlan-id'] + encapsulation_type = site_network_access['connection']['encapsulation-type'] + cvlan_id = site_network_access['connection']['tagged-interface'][encapsulation_type]['cvlan-id'] + bearer_reference = site_network_access['bearer']['bearer-reference'] - access_priority = site_network_access.get('availability', {}).get('access-priority') - single_active = site_network_access.get('availability', {}).get('single-active') - all_active = site_network_access.get('availability', {}).get('all-active') + + access_priority : Optional[int] = site_network_access.get('availability', {}).get('access-priority') + single_active : bool = len(site_network_access.get('availability', {}).get('single-active', [])) > 0 + all_active : bool = len(site_network_access.get('availability', {}).get('all-active', [])) > 0 + diversity_constraints = site_network_access.get('access-diversity', {}).get('constraints', {}).get('constraint', []) - # TODO: manage targets of constraints, right now, only type of constraint is considered - diversity_constraints = [constraint['constraint-type'] for constraint in diversity_constraints] + raise_if_differs = True + diversity_constraints = { + constraint['constraint-type']:([ + target[0] + for target in constraint['target'].items() + if len(target[1]) == 1 + ][0], raise_if_differs) + for constraint in diversity_constraints + } mapping = BEARER_MAPPINGS.get(bearer_reference) if mapping is None: @@ -57,157 +72,61 @@ def process_site_network_access(context_client : ContextClient, site_network_acc if target is None: target = get_slice (context_client, vpn_id) if target is None: raise Exception('VPN({:s}) not found in database'.format(str(vpn_id))) - # pylint: disable=no-member - endpoint_ids = target.service_endpoint_ids if isinstance(target, Service) else target.slice_endpoint_ids - - for endpoint_id in endpoint_ids: - if endpoint_id.device_id.device_uuid.uuid != device_uuid: continue - if endpoint_id.endpoint_uuid.uuid != endpoint_uuid: continue - break # found, do nothing + if isinstance(target, Service): + endpoint_ids = target.service_endpoint_ids # pylint: disable=no-member + config_rules = target.service_config.config_rules # pylint: disable=no-member + constraints = target.service_constraints # pylint: disable=no-member + elif isinstance(target, Slice): + endpoint_ids = target.slice_endpoint_ids # pylint: disable=no-member + config_rules = target.slice_config.config_rules # pylint: disable=no-member + constraints = target.slice_constraints # pylint: disable=no-member else: - # not found, add it - endpoint_id = endpoint_ids.add() - endpoint_id.device_id.device_uuid.uuid = device_uuid - endpoint_id.endpoint_uuid.uuid = endpoint_uuid - - if isinstance(target, Slice): return target - - for config_rule in target.service_config.config_rules: # pylint: disable=no-member - if config_rule.WhichOneof('config_rule') != 'custom': continue - if config_rule.custom.resource_key != '/settings': continue - json_settings = json.loads(config_rule.custom.resource_value) + raise Exception('Target({:s}) not supported'.format(str(target.__class__.__name__))) - if 'mtu' not in json_settings: # missing, add it - json_settings['mtu'] = DEFAULT_MTU - elif json_settings['mtu'] != DEFAULT_MTU: # differs, raise exception - msg = 'Specified MTU({:s}) differs from Service MTU({:s})' - raise Exception(msg.format(str(json_settings['mtu']), str(DEFAULT_MTU))) + endpoint_id = update_endpoint_ids(endpoint_ids, device_uuid, endpoint_uuid) - if 'address_families' not in json_settings: # missing, add it - json_settings['address_families'] = DEFAULT_ADDRESS_FAMILIES - elif json_settings['address_families'] != DEFAULT_ADDRESS_FAMILIES: # differs, raise exception - msg = 'Specified AddressFamilies({:s}) differs from Service AddressFamilies({:s})' - raise Exception(msg.format(str(json_settings['address_families']), str(DEFAULT_ADDRESS_FAMILIES))) - - if 'bgp_as' not in json_settings: # missing, add it - json_settings['bgp_as'] = DEFAULT_BGP_AS - elif json_settings['bgp_as'] != DEFAULT_BGP_AS: # differs, raise exception - msg = 'Specified BgpAs({:s}) differs from Service BgpAs({:s})' - raise Exception(msg.format(str(json_settings['bgp_as']), str(DEFAULT_BGP_AS))) - - if 'bgp_route_target' not in json_settings: # missing, add it - json_settings['bgp_route_target'] = DEFAULT_BGP_ROUTE_TARGET - elif json_settings['bgp_route_target'] != DEFAULT_BGP_ROUTE_TARGET: # differs, raise exception - msg = 'Specified BgpRouteTarget({:s}) differs from Service BgpRouteTarget({:s})' - raise Exception(msg.format(str(json_settings['bgp_route_target']), str(DEFAULT_BGP_ROUTE_TARGET))) - - config_rule.custom.resource_value = json.dumps(json_settings, sort_keys=True) - break - else: - # not found, add it - config_rule = target.service_config.config_rules.add() # pylint: disable=no-member - config_rule.action = ConfigActionEnum.CONFIGACTION_SET - config_rule.custom.resource_key = '/settings' - config_rule.custom.resource_value = json.dumps({ - 'mtu' : DEFAULT_MTU, - 'address_families': DEFAULT_ADDRESS_FAMILIES, - 'bgp_as' : DEFAULT_BGP_AS, - 'bgp_route_target': DEFAULT_BGP_ROUTE_TARGET, - }, sort_keys=True) + service_settings_key = '/settings' + update_config_rule_custom(config_rules, service_settings_key, { + 'mtu' : (DEFAULT_MTU, True), + 'address_families': (DEFAULT_ADDRESS_FAMILIES, True), + 'bgp_as' : (DEFAULT_BGP_AS, True), + 'bgp_route_target': (DEFAULT_BGP_ROUTE_TARGET, True), + }) endpoint_settings_key = '/device[{:s}]/endpoint[{:s}]/settings'.format(device_uuid, endpoint_uuid) - for config_rule in target.service_config.config_rules: # pylint: disable=no-member - if config_rule.WhichOneof('config_rule') != 'custom': continue - if config_rule.custom.resource_key != endpoint_settings_key: continue - json_settings = json.loads(config_rule.custom.resource_value) - - if 'router_id' not in json_settings: # missing, add it - json_settings['router_id'] = router_id - elif json_settings['router_id'] != router_id: # differs, raise exception - msg = 'Specified RouterId({:s}) differs from Service RouterId({:s})' - raise Exception(msg.format(str(json_settings['router_id']), str(router_id))) - - if 'route_distinguisher' not in json_settings: # missing, add it - json_settings['route_distinguisher'] = route_distinguisher - elif json_settings['route_distinguisher'] != route_distinguisher: # differs, raise exception - msg = 'Specified RouteDistinguisher({:s}) differs from Service RouteDistinguisher({:s})' - raise Exception(msg.format(str(json_settings['route_distinguisher']), str(route_distinguisher))) - - if 'sub_interface_index' not in json_settings: # missing, add it - json_settings['sub_interface_index'] = sub_if_index - elif json_settings['sub_interface_index'] != sub_if_index: # differs, raise exception - msg = 'Specified SubInterfaceIndex({:s}) differs from Service SubInterfaceIndex({:s})' - raise Exception(msg.format( - str(json_settings['sub_interface_index']), str(sub_if_index))) - - if 'vlan_id' not in json_settings: # missing, add it - json_settings['vlan_id'] = cvlan_id - elif json_settings['vlan_id'] != cvlan_id: # differs, raise exception - msg = 'Specified VLANId({:s}) differs from Service VLANId({:s})' - raise Exception(msg.format( - str(json_settings['vlan_id']), str(cvlan_id))) - - if address_ip is not None: - if 'address_ip' not in json_settings: # missing, add it - json_settings['address_ip'] = address_ip - elif json_settings['address_ip'] != address_ip: # differs, raise exception - msg = 'Specified AddressIP({:s}) differs from Service AddressIP({:s})' - raise Exception(msg.format( - str(json_settings['address_ip']), str(address_ip))) - - if address_prefix is not None: - if 'address_prefix' not in json_settings: # missing, add it - json_settings['address_prefix'] = address_prefix - elif json_settings['address_prefix'] != address_prefix: # differs, raise exception - msg = 'Specified AddressPrefix({:s}) differs from Service AddressPrefix({:s})' - raise Exception(msg.format( - str(json_settings['address_prefix']), str(address_prefix))) - - if address_prefix is not None: - if 'address_prefix' not in json_settings: # missing, add it - json_settings['address_prefix'] = address_prefix - elif json_settings['address_prefix'] != address_prefix: # differs, raise exception - msg = 'Specified AddressPrefix({:s}) differs from Service AddressPrefix({:s})' - raise Exception(msg.format( - str(json_settings['address_prefix']), str(address_prefix))) - - config_rule.custom.resource_value = json.dumps(json_settings, sort_keys=True) - break - else: - # not found, add it - config_rule = target.service_config.config_rules.add() # pylint: disable=no-member - config_rule.action = ConfigActionEnum.CONFIGACTION_SET - resource_value = { - 'router_id': router_id, - 'route_distinguisher': route_distinguisher, - 'sub_interface_index': sub_if_index, - 'vlan_id': cvlan_id, - 'address_ip': address_ip, - 'address_prefix': address_prefix, - } - if access_priority is not None: resource_value['access_priority'] = access_priority - if single_active is not None and len(single_active) > 0: resource_value['access_active'] = 'single' - if all_active is not None and len(all_active) > 0: resource_value['access_active'] = 'all' - config_rule.custom.resource_key = endpoint_settings_key - config_rule.custom.resource_value = json.dumps(resource_value, sort_keys=True) - - for constraint in target.service_constraints: # pylint: disable=no-member - if constraint.constraint_type == 'diversity' and len(diversity_constraints) > 0: - constraint_value = set(json.loads(constraint.constraint_value)) - constraint_value.update(diversity_constraints) - constraint.constraint_value = json.dumps(sorted(list(constraint_value)), sort_keys=True) - break - else: - # not found, and there are diversity constraints, add them - if len(diversity_constraints) > 0: - constraint = target.service_constraints.add() # pylint: disable=no-member - constraint.constraint_type = 'diversity' - constraint.constraint_value = json.dumps(sorted(list(diversity_constraints)), sort_keys=True) + field_updates = { + 'router_id' : (router_id, True), + 'route_distinguisher': (route_distinguisher, True), + 'sub_interface_index': (sub_if_index, True), + 'vlan_id' : (cvlan_id, True), + } + if address_ip is not None: field_updates['address_ip' ] = (address_ip, True) + if address_prefix is not None: field_updates['address_prefix' ] = (address_prefix, True) + update_config_rule_custom(config_rules, endpoint_settings_key, field_updates) + + field_updates = {} + if len(diversity_constraints) > 0: + field_updates.update(diversity_constraints) + update_constraint_custom(constraints, 'diversity', field_updates) + + update_constraint_endpoint_location(constraints, endpoint_id, region=site_id) + if access_priority is not None: update_constraint_endpoint_priority(constraints, endpoint_id, access_priority) + if single_active or all_active: + # assume 1 disjoint path per endpoint/location included in service/slice + location_endpoints = {} + for constraint in constraints: + if constraint.WhichOneof('constraint') != 'endpoint_location': continue + str_endpoint_id = grpc_message_to_json_string(constraint.endpoint_location.endpoint_id) + str_location_id = grpc_message_to_json_string(constraint.endpoint_location.location) + location_endpoints.setdefault(str_location_id, set()).add(str_endpoint_id) + num_endpoints_per_location = {len(endpoints) for endpoints in location_endpoints.values()} + num_disjoint_paths = min(num_endpoints_per_location) + update_constraint_sla_availability(constraints, num_disjoint_paths, all_active) return target def process_list_site_network_access( - context_client : ContextClient, service_client : ServiceClient, slice_client : SliceClient, + context_client : ContextClient, service_client : ServiceClient, slice_client : SliceClient, site_id : str, request_data : Dict ) -> Response: @@ -216,7 +135,7 @@ def process_list_site_network_access( errors = [] for site_network_access in request_data['ietf-l2vpn-svc:site-network-access']: - sna_request = process_site_network_access(context_client, site_network_access) + sna_request = process_site_network_access(context_client, site_id, site_network_access) LOGGER.debug('sna_request = {:s}'.format(grpc_message_to_json_string(sna_request))) try: if isinstance(sna_request, Service): @@ -230,7 +149,7 @@ def process_list_site_network_access( else: raise NotImplementedError('Support for Class({:s}) not implemented'.format(str(type(sna_request)))) except Exception as e: # pylint: disable=broad-except - msg = 'Something went wrong Updating Service {:s}' + msg = 'Something went wrong Updating VPN {:s}' LOGGER.exception(msg.format(grpc_message_to_json_string(sna_request))) errors.append({'error': str(e)}) time.sleep(random.random() / 10.0) @@ -247,7 +166,7 @@ class L2VPN_SiteNetworkAccesses(Resource): context_client = ContextClient() service_client = ServiceClient() slice_client = SliceClient() - return process_list_site_network_access(context_client, service_client, slice_client, request.json) + return process_list_site_network_access(context_client, service_client, slice_client, site_id, request.json) @HTTP_AUTH.login_required def put(self, site_id : str): @@ -256,4 +175,4 @@ class L2VPN_SiteNetworkAccesses(Resource): context_client = ContextClient() service_client = ServiceClient() slice_client = SliceClient() - return process_list_site_network_access(context_client, service_client, slice_client, request.json) + return process_list_site_network_access(context_client, service_client, slice_client, site_id, request.json) -- GitLab From bce8d76a39739ada1a03f4a6c75a6b9cf8facc5a Mon Sep 17 00:00:00 2001 From: Lluis Gifre Date: Fri, 5 Aug 2022 15:01:10 +0000 Subject: [PATCH 19/91] Service component: - partial integration of PathComp with Service - ongoing work --- scripts/run_tests_locally-service.sh | 1 + .../rpc_method_wrapper/ServiceExceptions.py | 8 ++ src/service/Dockerfile | 1 + src/service/service/DependencyResolver.py | 73 ++++++++++ .../service/ServiceServiceServicerImpl.py | 129 +++++++++--------- src/service/service/Tools.py | 20 ++- src/service/service/__main__.py | 10 +- .../tests/test_unitary_dependency_resolver.py | 98 +++++++++++++ 8 files changed, 273 insertions(+), 67 deletions(-) create mode 100644 src/service/service/DependencyResolver.py create mode 100644 src/service/tests/test_unitary_dependency_resolver.py diff --git a/scripts/run_tests_locally-service.sh b/scripts/run_tests_locally-service.sh index 8a2a8d0be..e2ccc3ebe 100755 --- a/scripts/run_tests_locally-service.sh +++ b/scripts/run_tests_locally-service.sh @@ -21,4 +21,5 @@ RCFILE=$PROJECTDIR/coverage/.coveragerc # Run unitary tests and analyze coverage of code at same time coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ + service/tests/test_unitary_dependency_resolver.py \ service/tests/test_unitary.py diff --git a/src/common/rpc_method_wrapper/ServiceExceptions.py b/src/common/rpc_method_wrapper/ServiceExceptions.py index f4f0a64ca..e8d5c79ac 100644 --- a/src/common/rpc_method_wrapper/ServiceExceptions.py +++ b/src/common/rpc_method_wrapper/ServiceExceptions.py @@ -56,3 +56,11 @@ class OperationFailedException(ServiceException): details = 'Operation({:s}) failed'.format(str(operation)) super().__init__(grpc.StatusCode.INTERNAL, details, extra_details=extra_details) + +class NotImplementedException(ServiceException): + def __init__( + self, operation : str, extra_details : Union[str, Iterable[str]] = None + ) -> None: + + details = 'Operation({:s}) not implemented'.format(str(operation)) + super().__init__(grpc.StatusCode.UNIMPLEMENTED, details, extra_details=extra_details) diff --git a/src/service/Dockerfile b/src/service/Dockerfile index c53a89782..e469898e5 100644 --- a/src/service/Dockerfile +++ b/src/service/Dockerfile @@ -64,6 +64,7 @@ RUN python3 -m pip install -r requirements.txt WORKDIR /var/teraflow COPY src/context/. context/ COPY src/device/. device/ +COPY src/pathcomp/frontend/. pathcomp/frontend/ COPY src/service/. service/ # Start the service diff --git a/src/service/service/DependencyResolver.py b/src/service/service/DependencyResolver.py new file mode 100644 index 000000000..0bf5923c8 --- /dev/null +++ b/src/service/service/DependencyResolver.py @@ -0,0 +1,73 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import graphlib +from enum import Enum +from typing import Dict, List, Optional, Tuple, Union +from common.proto.context_pb2 import Connection, ConnectionId, Service, ServiceId +from common.proto.pathcomp_pb2 import PathCompReply + +# Compose Directed Acyclic Graph of dependencies between connections and services +# retrieved by PathComp to create them in the appropriate order. + +class ObjectType(Enum): + CONNECTION = 'connection' + SERVICE = 'service' + +ObjectKey = Tuple[ObjectType, str] +ObjectId = Union[ConnectionId, ServiceId] +ObjectData = Union[Connection, Service] +ObjectItem = Tuple[ObjectId, Optional[ObjectData]] +ObjectDict = Dict[ObjectKey, ObjectItem] +Resolution = List[Tuple[ObjectKey, ObjectItem]] + +def get_connection_key(connection_id : ConnectionId) -> ObjectKey: + connection_uuid = connection_id.connection_uuid.uuid + return ObjectType.CONNECTION.value, connection_uuid + +def get_service_key(service_id : ServiceId) -> ObjectKey: + context_uuid = service_id.context_id.context_uuid.uuid + service_uuid = service_id.service_uuid.uuid + return ObjectType.SERVICE.value, '/'.join([context_uuid, service_uuid]) + +def resolve_dependencies(pathcomp_reply : PathCompReply) -> Resolution: + dag = graphlib.TopologicalSorter() + objects : ObjectDict = dict() + + for service in pathcomp_reply.services: + service_key = get_service_key(service.service_id) + objects[service_key] = (service.service_id, service) + + for connection in pathcomp_reply.connections: + connection_key = get_connection_key(connection.connection_id) + objects[connection_key] = (connection.connection_id, connection) + + # the connection's service depends on the connection + service_key = get_service_key(connection.service_id) + dag.add(service_key, connection_key) + if service_key not in objects: objects[service_key] = (connection.service_id, None) + + # the connection depends on these sub-services + for sub_service_id in connection.sub_service_ids: + sub_service_key = get_service_key(sub_service_id) + dag.add(connection_key, sub_service_key) + if sub_service_key not in objects: objects[sub_service_key] = (sub_service_id, None) + + resolution : Resolution = list() + for item_key in dag.static_order(): + item_tuple = objects.get(item_key) + resolution.append((item_key, item_tuple)) + + return resolution diff --git a/src/service/service/ServiceServiceServicerImpl.py b/src/service/service/ServiceServiceServicerImpl.py index 6355cafbe..2591f5bda 100644 --- a/src/service/service/ServiceServiceServicerImpl.py +++ b/src/service/service/ServiceServiceServicerImpl.py @@ -13,24 +13,29 @@ # limitations under the License. from typing import Dict, List -import grpc, json, logging +import graphlib, grpc, json, logging from common.orm.Database import Database from common.orm.HighLevel import get_object from common.orm.backend.Tools import key_to_str -from common.proto.context_pb2 import Empty, Service, ServiceId +from common.proto.context_pb2 import ConnectionId, Empty, Service, ServiceId +from common.proto.pathcomp_pb2 import PathCompRequest from common.proto.service_pb2_grpc import ServiceServiceServicer from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method -from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentException, NotFoundException +from common.rpc_method_wrapper.ServiceExceptions import AlreadyExistsException, InvalidArgumentException, NotFoundException, NotImplementedException from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_string +from common.tools.object_factory.Connection import json_connection_id +from common.tools.object_factory.Service import json_service_id from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient +from pathcomp.frontend.client.PathCompClient import PathCompClient +from service.service.DependencyResolver import ObjectType, resolve_dependencies from service.service.database.DeviceModel import DeviceModel from .database.DatabaseServiceTools import ( sync_service_from_context, sync_service_to_context, update_service_in_local_database) from .database.ServiceModel import ServiceModel from .path_computation_element.PathComputationElement import PathComputationElement, dump_connectivity from .service_handler_api.ServiceHandlerFactory import ServiceHandlerFactory -from .Tools import delete_service, sync_devices_from_context, update_service +from .Tools import delete_service, get_connection, get_service, sync_devices_from_context, update_service LOGGER = logging.getLogger(__name__) @@ -41,8 +46,6 @@ METRICS = create_metrics(SERVICE_NAME, METHOD_NAMES) class ServiceServiceServicerImpl(ServiceServiceServicer): def __init__(self, database : Database, service_handler_factory : ServiceHandlerFactory) -> None: LOGGER.debug('Creating Servicer...') - self.context_client = ContextClient() - self.device_client = DeviceClient() self.database = database self.service_handler_factory = service_handler_factory LOGGER.debug('Servicer Created') @@ -84,14 +87,19 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): extra_details='RPC method CreateService does not accept Config Rules. '\ 'Config Rules should be configured after creating the service.') - sync_service_from_context(service_context_uuid, service_uuid, self.context_client, self.database) - db_service,_ = update_service_in_local_database(self.database, request) + # check that service does not exist + context_client = ContextClient() + current_service = get_service(context_client, request.service_id) + if current_service is not None: + context_uuid = request.service_id.context_id.context_uuid.uuid + service_uuid = request.service_id.service_uuid.uuid + raise AlreadyExistsException( + 'Service', service_uuid, extra_details='context_uuid={:s}'.format(str(context_uuid))) - LOGGER.info('[CreateService] db_service = {:s}'.format(str(db_service.dump( - include_endpoint_ids=True, include_constraints=True, include_config_rules=True)))) - - sync_service_to_context(db_service, self.context_client) - return ServiceId(**db_service.dump_id()) + # just create the service in the database to lock the service_id + # update will perform changes on the resources + service_id = context_client.SetService(request) + return service_id @safe_and_metered_rpc_method(METRICS, LOGGER) def UpdateService(self, request : Service, context : grpc.ServicerContext) -> ServiceId: @@ -101,54 +109,53 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): service_uuid = service_id.service_uuid.uuid service_context_uuid = service_id.context_id.context_uuid.uuid - pce = PathComputationElement() - pce.load_topology(self.context_client) - pce.load_connectivity(self.context_client, service_id) - #pce.dump_topology_to_file('../data/topo.dot') - #pce.dump_connectivity_to_file('../data/conn-before.txt') - connectivity = pce.route_service(request) - #pce.dump_connectivity_to_file('../data/conn-after.txt') - - LOGGER.info('[UpdateService] connectivity = {:s}'.format(str(dump_connectivity(connectivity)))) - - if connectivity is None: - # just update local database and context - str_service_key = key_to_str([service_context_uuid, service_uuid]) - db_service = get_object(self.database, ServiceModel, str_service_key, raise_if_not_found=False) - LOGGER.info('[UpdateService] before db_service = {:s}'.format(str(db_service.dump( - include_endpoint_ids=True, include_constraints=True, include_config_rules=True)))) - db_devices : Dict[str, DeviceModel] = sync_devices_from_context( - self.context_client, self.database, db_service, request.service_endpoint_ids) - LOGGER.info('[UpdateService] db_devices[{:d}] = {:s}'.format( - len(db_devices), str({ - device_uuid:db_device.dump(include_config_rules=True, include_drivers=True, include_endpoints=True) - for device_uuid,db_device in db_devices.items() - }))) - sync_service_from_context(service_context_uuid, service_uuid, self.context_client, self.database) - db_service,_ = update_service_in_local_database(self.database, request) - LOGGER.info('[UpdateService] after db_service = {:s}'.format(str(db_service.dump( - include_endpoint_ids=True, include_constraints=True, include_config_rules=True)))) - sync_service_to_context(db_service, self.context_client) - else: - for sub_service, sub_connections in connectivity.get('requirements', []): - for sub_connection in sub_connections: - update_service( - self.database, self.context_client, self.device_client, self.service_handler_factory, - sub_service, sub_connection) - - for connection in connectivity.get('connections'): - db_service = update_service( - self.database, self.context_client, self.device_client, self.service_handler_factory, - request, connection) - - str_service_key = key_to_str([service_context_uuid, service_uuid]) - db_service = get_object(self.database, ServiceModel, str_service_key, raise_if_not_found=False) - if db_service is None: raise NotFoundException('Service', str_service_key) - - LOGGER.info('[UpdateService] db_service = {:s}'.format(str(db_service.dump( - include_endpoint_ids=True, include_constraints=True, include_config_rules=True)))) - - return ServiceId(**db_service.dump_id()) + pathcomp_request = PathCompRequest() + pathcomp_request.services.append(request) + pathcomp_request.services.k_shortest_path.k_inspection = 5 + pathcomp_request.services.k_shortest_path.k_return = 5 + + pathcomp = PathCompClient() + pathcomp_response = pathcomp.Compute(pathcomp_request) + + # convert from a unordered lists of services and connections to a list of ordered items + # that fulfill interdependencies among them. E.g., a service cannot be created if connections + # supporting that service still does not exist. + resolution = resolve_dependencies(pathcomp_response) + + # implement changes + context_client = ContextClient() + device_client = DeviceClient() + for (obj_type, obj_key), (grpc_objid, grpc_obj) in resolution: + if grpc_obj is None: + # check if the resource already exists + if obj_type == ObjectType.CONNECTION.value: + connection = get_connection(context_client, grpc_objid) + if connection is None: raise NotFoundException('Connection', obj_key) + elif obj_type == ObjectType.SERVICE.value: + service = get_service(context_client, grpc_objid) + if service is None: raise NotFoundException('Service', obj_key) + else: + MSG_EXTRA_DETAILS = 'obj_type={:s} obj_key={:s} grpc_objid={:s} grpc_obj={:s}' + str_grpc_obj = 'None' if grpc_obj is None else grpc_message_to_json_string(grpc_obj) + str_grpc_objid = 'None' if grpc_objid is None else grpc_message_to_json_string(grpc_objid) + msg_extra_details = MSG_EXTRA_DETAILS.format(obj_type, obj_key, str_grpc_objid, str_grpc_obj) + raise NotImplementedException('Empty Dependency', extra_details=msg_extra_details) + else: + # create/update the resource + if obj_type == ObjectType.CONNECTION.value: + update_connection(context_client, device_client, self.service_handler_factory, grpc_obj) + context_client.SetConnection(grpc_obj) + elif obj_type == ObjectType.SERVICE.value: + update_service(context_client, device_client, self.service_handler_factory, grpc_obj) + context_client.SetService(grpc_obj) + else: + MSG_EXTRA_DETAILS = 'obj_type={:s} obj_key={:s} grpc_objid={:s} grpc_obj={:s}' + str_grpc_obj = 'None' if grpc_obj is None else grpc_message_to_json_string(grpc_obj) + str_grpc_objid = 'None' if grpc_objid is None else grpc_message_to_json_string(grpc_objid) + msg_extra_details = MSG_EXTRA_DETAILS.format(obj_type, obj_key, str_grpc_objid, str_grpc_obj) + raise NotImplementedException('Specified Dependency', extra_details=msg_extra_details) + + return request.service_id @safe_and_metered_rpc_method(METRICS, LOGGER) def DeleteService(self, request : ServiceId, context : grpc.ServicerContext) -> Empty: diff --git a/src/service/service/Tools.py b/src/service/service/Tools.py index 4386793c5..ea4369fd5 100644 --- a/src/service/service/Tools.py +++ b/src/service/service/Tools.py @@ -12,13 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging +import grpc, logging from typing import Any, Dict, List, Optional, Tuple from common.orm.Database import Database from common.orm.HighLevel import get_object, get_related_objects from common.orm.backend.Tools import key_to_str from common.proto.context_pb2 import ( - ConfigRule, Connection, Constraint, EndPointId, Service, ServiceId, ServiceStatusEnum) + ConfigRule, Connection, ConnectionId, Constraint, EndPointId, Service, ServiceId, ServiceStatusEnum) from common.rpc_method_wrapper.ServiceExceptions import ( InvalidArgumentException, NotFoundException, OperationFailedException) from context.client.ContextClient import ContextClient @@ -42,6 +42,22 @@ from .service_handler_api.Tools import ( LOGGER = logging.getLogger(__name__) +def get_connection(context_client : ContextClient, connection_id : ConnectionId) -> Optional[Connection]: + try: + connection : Connection = context_client.GetConnection(connection_id) + return connection + except grpc.RpcError as e: + if e.code() != grpc.StatusCode.NOT_FOUND: raise # pylint: disable=no-member + return None + +def get_service(context_client : ContextClient, service_id : ServiceId) -> Optional[Service]: + try: + service : Service = context_client.GetService(service_id) + return service + except grpc.RpcError as e: + if e.code() != grpc.StatusCode.NOT_FOUND: raise # pylint: disable=no-member + return None + def sync_devices_from_context( context_client : ContextClient, database : Database, db_service : Optional[ServiceModel], service_endpoint_ids : List[EndPointId] diff --git a/src/service/service/__main__.py b/src/service/service/__main__.py index 1a67a309f..2c042fe0e 100644 --- a/src/service/service/__main__.py +++ b/src/service/service/__main__.py @@ -37,10 +37,12 @@ def main(): LOGGER = logging.getLogger(__name__) wait_for_environment_variables([ - get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST ), - get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC), - get_env_var_name(ServiceNameEnum.DEVICE, ENVVAR_SUFIX_SERVICE_HOST ), - get_env_var_name(ServiceNameEnum.DEVICE, ENVVAR_SUFIX_SERVICE_PORT_GRPC), + get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST ), + get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC), + get_env_var_name(ServiceNameEnum.DEVICE, ENVVAR_SUFIX_SERVICE_HOST ), + get_env_var_name(ServiceNameEnum.DEVICE, ENVVAR_SUFIX_SERVICE_PORT_GRPC), + get_env_var_name(ServiceNameEnum.PATHCOMP, ENVVAR_SUFIX_SERVICE_HOST ), + get_env_var_name(ServiceNameEnum.PATHCOMP, ENVVAR_SUFIX_SERVICE_PORT_GRPC), ]) signal.signal(signal.SIGINT, signal_handler) diff --git a/src/service/tests/test_unitary_dependency_resolver.py b/src/service/tests/test_unitary_dependency_resolver.py new file mode 100644 index 000000000..1dd70ba7e --- /dev/null +++ b/src/service/tests/test_unitary_dependency_resolver.py @@ -0,0 +1,98 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, operator +from common.proto.context_pb2 import Connection, Service +from common.proto.pathcomp_pb2 import PathCompReply +from common.tools.grpc.Tools import grpc_message_to_json_string +from service.service.DependencyResolver import resolve_dependencies + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +def test_dependency_resolver(): + # test: add services and connections that depend on each other + # then, check if they are properly resolved. + # - service MAIN, depends on connection PKT-1, TAPI, and PKT-2 + # - connection PKT-1, depends on nothing + # - connection TAPI, depends on service TAPI-1 and TAPI-2 + # - connection PKT-2, depends on nothing + # - service TAPI-1, depends on connection TAPI-1 + # - service TAPI-2, depends on connection TAPI-2 + + pathcomp_reply = PathCompReply() + + service_main = pathcomp_reply.services.add() + service_main.service_id.context_id.context_uuid.uuid = 'admin' + service_main.service_id.service_uuid.uuid = 'MAIN' + + service_tapi1 = pathcomp_reply.services.add() + service_tapi1.service_id.context_id.context_uuid.uuid = 'admin' + service_tapi1.service_id.service_uuid.uuid = 'TAPI-1' + + service_tapi2 = pathcomp_reply.services.add() + service_tapi2.service_id.context_id.context_uuid.uuid = 'admin' + service_tapi2.service_id.service_uuid.uuid = 'TAPI-2' + + connection_pkt1 = pathcomp_reply.connections.add() + connection_pkt1.connection_id.connection_uuid.uuid = 'PKT-1' + connection_pkt1.service_id.CopyFrom(service_main.service_id) + + connection_tapi = pathcomp_reply.connections.add() + connection_tapi.connection_id.connection_uuid.uuid = 'TAPI' + connection_tapi.service_id.CopyFrom(service_main.service_id) + + connection_pkt2 = pathcomp_reply.connections.add() + connection_pkt2.connection_id.connection_uuid.uuid = 'PKT-2' + connection_pkt2.service_id.CopyFrom(service_main.service_id) + + connection_tapi1 = pathcomp_reply.connections.add() + connection_tapi1.connection_id.connection_uuid.uuid = 'TAPI-1' + connection_tapi1.service_id.CopyFrom(service_tapi1.service_id) + connection_tapi.sub_service_ids.append(service_tapi1.service_id) + + connection_tapi2 = pathcomp_reply.connections.add() + connection_tapi2.connection_id.connection_uuid.uuid = 'TAPI-2' + connection_tapi2.service_id.CopyFrom(service_tapi2.service_id) + connection_tapi.sub_service_ids.append(service_tapi2.service_id) + + LOGGER.info('pathcomp_reply={:s}'.format(grpc_message_to_json_string(pathcomp_reply))) + resolution = resolve_dependencies(pathcomp_reply) + LOGGER.info('resolution={:s}'.format(str(list(map(operator.itemgetter(0), resolution))))) + + CORRECT_RESOLUTION_KEYS = [ + ('connection', 'PKT-1' ), + ('connection', 'PKT-2' ), + ('connection', 'TAPI-1' ), + ('connection', 'TAPI-2' ), + ('service' , 'admin/TAPI-1'), + ('service' , 'admin/TAPI-2'), + ('connection', 'TAPI' ), + ('service' , 'admin/MAIN' ), + ] + for (resolved_key,(resolved_objid, resolved_obj)),correct_key in zip(resolution, CORRECT_RESOLUTION_KEYS): + assert resolved_key == correct_key + assert resolved_obj is not None + if resolved_key[0] == 'connection': + assert isinstance(resolved_obj, Connection) + assert resolved_objid == resolved_obj.connection_id + connection_key = resolved_obj.connection_id.connection_uuid.uuid + assert resolved_key[1] == connection_key + elif resolved_key[0] == 'service': + assert isinstance(resolved_obj, Service) + assert resolved_objid == resolved_obj.service_id + context_uuid = resolved_obj.service_id.context_id.context_uuid.uuid + service_uuid = resolved_obj.service_id.service_uuid.uuid + service_key = '/'.join([context_uuid, service_uuid]) + assert resolved_key[1] == service_key -- GitLab From 1a6f947ce0b661734e7498aea251a474d7e0dba5 Mon Sep 17 00:00:00 2001 From: Carlos Natalino Date: Mon, 5 Sep 2022 12:04:00 +0200 Subject: [PATCH 20/91] Modifications that enable the use of a single script that builds an image, tests and reports coverage for a specific component. --- .gitignore | 1 + run_tests_docker.sh | 59 +++++++++++++++++++++++ scripts/build_run_report_tests_locally.sh | 57 ++++++++++++++++++++++ tutorial/3-2-develop-cth.md | 13 +++++ 4 files changed, 130 insertions(+) create mode 100755 run_tests_docker.sh create mode 100755 scripts/build_run_report_tests_locally.sh diff --git a/.gitignore b/.gitignore index 71b77da25..86434f04b 100644 --- a/.gitignore +++ b/.gitignore @@ -53,6 +53,7 @@ coverage.xml .pytest_cache/ .benchmarks/ cover/ +*_report.xml # Translations *.mo diff --git a/run_tests_docker.sh b/run_tests_docker.sh new file mode 100755 index 000000000..fd8851409 --- /dev/null +++ b/run_tests_docker.sh @@ -0,0 +1,59 @@ +#!/bin/bash + +######################################################################################################################## +# Define your deployment settings here +######################################################################################################################## + +# Set the URL of your local Docker registry where the images will be uploaded to. Leave it blank if you do not want to +# use any Docker registry. +REGISTRY_IMAGE="" +#REGISTRY_IMAGE="http://my-container-registry.local/" + +# Set the list of components you want to build images for, and deploy. +COMPONENTS="context device automation policy service compute monitoring centralizedattackdetector" + +# Set the tag you want to use for your images. +IMAGE_TAG="tf-dev" + +# Constants +TMP_FOLDER="./tmp" + +TMP_LOGS_FOLDER="$TMP_FOLDER/logs" +mkdir -p $TMP_LOGS_FOLDER + +for COMPONENT in $COMPONENTS; do + echo "Processing '$COMPONENT' component..." + IMAGE_NAME="$COMPONENT:$IMAGE_TAG" + IMAGE_URL="$REGISTRY_IMAGE/$IMAGE_NAME" + + echo " Building Docker image..." + BUILD_LOG="$TMP_LOGS_FOLDER/build_${COMPONENT}.log" + + if [ "$COMPONENT" == "automation" ] || [ "$COMPONENT" == "policy" ]; then + docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile ./src/"$COMPONENT"/ > "$BUILD_LOG" + else + docker build -t "$IMAGE_NAME" -f ./src/"$COMPONENT"/Dockerfile ./src/ > "$BUILD_LOG" + fi + + if [ -n "$REGISTRY_IMAGE" ]; then + echo "Pushing Docker image to '$REGISTRY_IMAGE'..." + + TAG_LOG="$TMP_LOGS_FOLDER/tag_${COMPONENT}.log" + docker tag "$IMAGE_NAME" "$IMAGE_URL" > "$TAG_LOG" + + PUSH_LOG="$TMP_LOGS_FOLDER/push_${COMPONENT}.log" + docker push "$IMAGE_URL" > "$PUSH_LOG" + fi +done + +echo "Preparing for running the tests..." + +if docker network list | grep teraflowbridge; then echo "teraflowbridge is already created"; else docker network create -d bridge teraflowbridge; fi + +for COMPONENT in $COMPONENTS; do + IMAGE_NAME="$COMPONENT:$IMAGE_TAG" + echo " Running tests for $COMPONENT:" + docker run -it -d --name $COMPONENT $IMAGE_NAME --network=teraflowbridge + docker exec -it $COMPONENT bash -c "pytest --log-level=DEBUG --verbose $COMPONENT/tests/test_unitary.py" + docker stop $COMPONENT +done diff --git a/scripts/build_run_report_tests_locally.sh b/scripts/build_run_report_tests_locally.sh new file mode 100755 index 000000000..9bdc81d98 --- /dev/null +++ b/scripts/build_run_report_tests_locally.sh @@ -0,0 +1,57 @@ +#!/bin/bash +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +die () { + echo >&2 "$@" + exit 1 +} + +[ "$#" -eq 1 ] || die "component name required but not provided" + +COMPONENT_NAME=$1 # parameter +IMAGE_NAME="${COMPONENT_NAME}-local" +IMAGE_TAG="latest" + +if docker ps | grep $IMAGE_NAME +then + docker stop $IMAGE_NAME +fi + +if docker network list | grep teraflowbridge +then + echo "teraflowbridge is already created" +else + docker network create -d bridge teraflowbridge +fi + +docker build -t "$IMAGE_NAME:$IMAGE_TAG" -f ./src/$COMPONENT_NAME/Dockerfile . + +docker run --name $IMAGE_NAME -d -v "${PWD}/src/${COMPONENT_NAME}/tests:/home/${COMPONENT_NAME}/results" --network=teraflowbridge --rm $IMAGE_NAME:$IMAGE_TAG + +docker exec -i $IMAGE_NAME bash -c "coverage run -m pytest --log-level=INFO --verbose $COMPONENT_NAME/tests/ --junitxml=/home/${COMPONENT_NAME}/results/${COMPONENT_NAME}_report.xml" + +PROJECTDIR=`pwd` + +cd $PROJECTDIR/src +RCFILE=$PROJECTDIR/coverage/.coveragerc + +echo +echo "Coverage report:" +echo "----------------" +docker exec -i $IMAGE_NAME bash -c "coverage report --include='${COMPONENT_NAME}/*' --show-missing" + +# docker stop $IMAGE_NAME +docker rm -f $IMAGE_NAME +docker network rm teraflowbridge diff --git a/tutorial/3-2-develop-cth.md b/tutorial/3-2-develop-cth.md index eda70c9e8..1b2a4690a 100644 --- a/tutorial/3-2-develop-cth.md +++ b/tutorial/3-2-develop-cth.md @@ -1,5 +1,18 @@ # 3.2. Development Commands, Tricks, and Hints (WORK IN PROGRESS) +## Building, running, testing and reporting code coverage locally + +The project runs a CI/CD loops that ensures that all tests are run whenever new code is committed to our reporitory. +However, committing and waiting for the pipeline to run can take substantial time. +For this reason, we prepared a script that runs in your local machine, builds the container image and executes the tests within the image. + +To use the script receives one argument that is the name of the component you want to run. +For instance, if you want to build and run the tests of the `compute` component, you can run: + +```shell +scripts/build_run_report_tests_locally.sh compute +``` + ## Items to be addressed: -- GitLab From 8f697710c4001c05483e73261dee1e6b1152ce52 Mon Sep 17 00:00:00 2001 From: Lluis Gifre Date: Wed, 7 Sep 2022 12:53:19 +0000 Subject: [PATCH 21/91] ECOC'22 intermediate backup --- src/tests/ecoc22/deploy_specs.sh | 2 +- src/tests/ecoc22/tests/BuildDescriptors.py | 2 +- src/tests/ecoc22/tests/Fixtures.py | 24 ++ src/tests/ecoc22/tests/Objects_DC_CSGW_TN.py | 223 ++++++++++++++++++ .../{Objects.py => Objects_OldBigNet.py} | 0 .../ecoc22/tests/test_functional_bootstrap.py | 28 +-- .../ecoc22/tests/test_functional_cleanup.py | 46 +--- .../tests/test_functional_create_service.py | 23 +- .../tests/test_functional_delete_service.py | 2 +- 9 files changed, 266 insertions(+), 84 deletions(-) create mode 100644 src/tests/ecoc22/tests/Fixtures.py create mode 100644 src/tests/ecoc22/tests/Objects_DC_CSGW_TN.py rename src/tests/ecoc22/tests/{Objects.py => Objects_OldBigNet.py} (100%) diff --git a/src/tests/ecoc22/deploy_specs.sh b/src/tests/ecoc22/deploy_specs.sh index dc75c4b2c..03d17f428 100644 --- a/src/tests/ecoc22/deploy_specs.sh +++ b/src/tests/ecoc22/deploy_specs.sh @@ -2,7 +2,7 @@ export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/" # Set the list of components, separated by spaces, you want to build images for, and deploy. -export TFS_COMPONENTS="context device service slice compute" # automation webui +export TFS_COMPONENTS="context device service pathcomp slice compute webui" # automation # Set the tag you want to use for your images. export TFS_IMAGE_TAG="dev" diff --git a/src/tests/ecoc22/tests/BuildDescriptors.py b/src/tests/ecoc22/tests/BuildDescriptors.py index 00d09ae8d..fab6f2ceb 100644 --- a/src/tests/ecoc22/tests/BuildDescriptors.py +++ b/src/tests/ecoc22/tests/BuildDescriptors.py @@ -13,7 +13,7 @@ # limitations under the License. import copy, json, sys -from .Objects import CONTEXTS, DEVICES, LINKS, TOPOLOGIES +from .Objects_OldBigNet import CONTEXTS, DEVICES, LINKS, TOPOLOGIES def main(): with open('tests/ecoc22/descriptors_emulated.json', 'w', encoding='UTF-8') as f: diff --git a/src/tests/ecoc22/tests/Fixtures.py b/src/tests/ecoc22/tests/Fixtures.py new file mode 100644 index 000000000..5c5fd26e0 --- /dev/null +++ b/src/tests/ecoc22/tests/Fixtures.py @@ -0,0 +1,24 @@ +import pytest +from common.Settings import get_setting +from compute.tests.mock_osm.MockOSM import MockOSM +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from .Objects_DC_CSGW_TN import WIM_MAPPING, WIM_PASSWORD, WIM_USERNAME + +@pytest.fixture(scope='session') +def context_client(): + _client = ContextClient() + yield _client + _client.close() + +@pytest.fixture(scope='session') +def device_client(): + _client = DeviceClient() + yield _client + _client.close() + +@pytest.fixture(scope='session') +def osm_wim(): + wim_url = 'http://{:s}:{:s}'.format( + get_setting('COMPUTESERVICE_SERVICE_HOST'), str(get_setting('COMPUTESERVICE_SERVICE_PORT_HTTP'))) + return MockOSM(wim_url, WIM_MAPPING, WIM_USERNAME, WIM_PASSWORD) diff --git a/src/tests/ecoc22/tests/Objects_DC_CSGW_TN.py b/src/tests/ecoc22/tests/Objects_DC_CSGW_TN.py new file mode 100644 index 000000000..9422906d3 --- /dev/null +++ b/src/tests/ecoc22/tests/Objects_DC_CSGW_TN.py @@ -0,0 +1,223 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict, List +from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID +#from common.tools.object_factory.Constraint import json_constraint +from common.tools.object_factory.Context import json_context, json_context_id +from common.tools.object_factory.Device import ( + json_device_emulated_connect_rules, json_device_emulated_datacenter_disabled, json_device_emulated_packet_router_disabled, json_device_id) +from common.tools.object_factory.EndPoint import json_endpoints +from common.tools.object_factory.Link import get_link_uuid, json_link, json_link_id +from common.tools.object_factory.Service import get_service_uuid, json_service_l3nm_planned +from common.tools.object_factory.Topology import json_topology, json_topology_id + +def compose_router(device_uuid, endpoint_uuids, topology_id=None, with_connect_rules=True): + device_id = json_device_id(device_uuid) + r_endpoints = [(endpoint_uuid, 'copper', []) for endpoint_uuid in endpoint_uuids] + config_rules = json_device_emulated_connect_rules(r_endpoints) if with_connect_rules else [] + endpoints = json_endpoints(device_id, r_endpoints, topology_id=topology_id) + j_endpoints = [] if with_connect_rules else endpoints + device = json_device_emulated_packet_router_disabled(device_uuid, config_rules=config_rules, endpoints=j_endpoints) + return device_id, endpoints, device + +def compose_datacenter(device_uuid, endpoint_uuids, topology_id=None, with_connect_rules=True): + device_id = json_device_id(device_uuid) + r_endpoints = [(endpoint_uuid, 'copper', []) for endpoint_uuid in endpoint_uuids] + config_rules = json_device_emulated_connect_rules(r_endpoints) if with_connect_rules else [] + endpoints = json_endpoints(device_id, r_endpoints, topology_id=topology_id) + j_endpoints = [] if with_connect_rules else endpoints + device = json_device_emulated_datacenter_disabled(device_uuid, config_rules=config_rules, endpoints=j_endpoints) + return device_id, endpoints, device + +def compose_link(endpoint_a, endpoint_z): + link_uuid = get_link_uuid(endpoint_a['endpoint_id'], endpoint_z['endpoint_id']) + link_id = json_link_id(link_uuid) + link = json_link(link_uuid, [endpoint_a['endpoint_id'], endpoint_z['endpoint_id']]) + return link_id, link + +def compose_service(endpoint_a, endpoint_z, constraints=[]): + service_uuid = get_service_uuid(endpoint_a['endpoint_id'], endpoint_z['endpoint_id']) + endpoint_ids = [endpoint_a['endpoint_id'], endpoint_z['endpoint_id']] + service = json_service_l3nm_planned(service_uuid, endpoint_ids=endpoint_ids, constraints=constraints) + return service + +# ----- Context -------------------------------------------------------------------------------------------------------- +CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID) +CONTEXT = json_context(DEFAULT_CONTEXT_UUID) + +# ----- Domains -------------------------------------------------------------------------------------------------------- +# Overall network topology +TOPO_ADMIN_UUID = DEFAULT_TOPOLOGY_UUID +TOPO_ADMIN_ID = json_topology_id(TOPO_ADMIN_UUID, context_id=CONTEXT_ID) +TOPO_ADMIN = json_topology(TOPO_ADMIN_UUID, context_id=CONTEXT_ID) + +# DataCenter #1 Network +TOPO_DC1_UUID = 'DC1' +TOPO_DC1_ID = json_topology_id(TOPO_DC1_UUID, context_id=CONTEXT_ID) +TOPO_DC1 = json_topology(TOPO_DC1_UUID, context_id=CONTEXT_ID) + +# DataCenter #2 Network +TOPO_DC2_UUID = 'DC2' +TOPO_DC2_ID = json_topology_id(TOPO_DC2_UUID, context_id=CONTEXT_ID) +TOPO_DC2 = json_topology(TOPO_DC2_UUID, context_id=CONTEXT_ID) + +# CellSite #1 Network +TOPO_CS1_UUID = 'CS1' +TOPO_CS1_ID = json_topology_id(TOPO_CS1_UUID, context_id=CONTEXT_ID) +TOPO_CS1 = json_topology(TOPO_CS1_UUID, context_id=CONTEXT_ID) + +# CellSite #2 Network +TOPO_CS2_UUID = 'CS2' +TOPO_CS2_ID = json_topology_id(TOPO_CS2_UUID, context_id=CONTEXT_ID) +TOPO_CS2 = json_topology(TOPO_CS2_UUID, context_id=CONTEXT_ID) + +# Transport Network Network +TOPO_TN_UUID = 'TN' +TOPO_TN_ID = json_topology_id(TOPO_TN_UUID, context_id=CONTEXT_ID) +TOPO_TN = json_topology(TOPO_TN_UUID, context_id=CONTEXT_ID) + + +# ----- Devices -------------------------------------------------------------------------------------------------------- +# DataCenters +DEV_DC1GW_ID, DEV_DC1GW_EPS, DEV_DC1GW = compose_datacenter('DC1-GW', ['eth1', 'eth2', 'int'], topology_id=TOPO_DC1_ID) +DEV_DC2GW_ID, DEV_DC2GW_EPS, DEV_DC2GW = compose_datacenter('DC2-GW', ['eth1', 'eth2', 'int'], topology_id=TOPO_DC2_ID) + +# CellSites +DEV_CS1GW1_ID, DEV_CS1GW1_EPS, DEV_CS1GW1 = compose_router('CS1-GW1', ['10/1', '1/1', '1/2'], topology_id=TOPO_CS1_ID) +DEV_CS1GW2_ID, DEV_CS1GW2_EPS, DEV_CS1GW2 = compose_router('CS1-GW2', ['10/1', '1/1', '1/2'], topology_id=TOPO_CS1_ID) +DEV_CS2GW1_ID, DEV_CS2GW1_EPS, DEV_CS2GW1 = compose_router('CS2-GW1', ['10/1', '1/1', '1/2'], topology_id=TOPO_CS2_ID) +DEV_CS2GW2_ID, DEV_CS2GW2_EPS, DEV_CS2GW2 = compose_router('CS2-GW2', ['10/1', '1/1', '1/2'], topology_id=TOPO_CS2_ID) + +# Transport Network +DEV_TNR1_ID, DEV_TNR1_EPS, DEV_TNR1 = compose_router('TN-R1', ['1/1', '1/2', '2/1', '2/2', '2/3'], topology_id=TOPO_TN_ID) +DEV_TNR2_ID, DEV_TNR2_EPS, DEV_TNR2 = compose_router('TN-R2', ['1/1', '1/2', '2/1', '2/2', '2/3'], topology_id=TOPO_TN_ID) +DEV_TNR3_ID, DEV_TNR3_EPS, DEV_TNR3 = compose_router('TN-R3', ['1/1', '1/2', '2/1', '2/2', '2/3'], topology_id=TOPO_TN_ID) +DEV_TNR4_ID, DEV_TNR4_EPS, DEV_TNR4 = compose_router('TN-R4', ['1/1', '1/2', '2/1', '2/2', '2/3'], topology_id=TOPO_TN_ID) + + +# ----- Links ---------------------------------------------------------------------------------------------------------- +# InterDomain DC-CSGW +LINK_DC1GW_CS1GW1_ID, LINK_DC1GW_CS1GW1 = compose_link(DEV_DC1GW_EPS[0], DEV_CS1GW1_EPS[0]) +LINK_DC1GW_CS1GW2_ID, LINK_DC1GW_CS1GW2 = compose_link(DEV_DC1GW_EPS[1], DEV_CS1GW2_EPS[0]) +LINK_DC2GW_CS2GW1_ID, LINK_DC2GW_CS2GW1 = compose_link(DEV_DC2GW_EPS[0], DEV_CS2GW1_EPS[0]) +LINK_DC2GW_CS2GW2_ID, LINK_DC2GW_CS2GW2 = compose_link(DEV_DC2GW_EPS[1], DEV_CS2GW2_EPS[0]) + +# InterDomain CSGW-TN +LINK_CS1GW1_TNR1_ID, LINK_CS1GW1_TNR1 = compose_link(DEV_CS1GW1_EPS[1], DEV_TNR1_EPS[0]) +LINK_CS1GW2_TNR2_ID, LINK_CS1GW2_TNR2 = compose_link(DEV_CS1GW2_EPS[1], DEV_TNR2_EPS[0]) +LINK_CS1GW1_TNR2_ID, LINK_CS1GW1_TNR2 = compose_link(DEV_CS1GW1_EPS[2], DEV_TNR2_EPS[1]) +LINK_CS1GW2_TNR1_ID, LINK_CS1GW2_TNR1 = compose_link(DEV_CS1GW2_EPS[2], DEV_TNR1_EPS[1]) +LINK_CS2GW1_TNR3_ID, LINK_CS2GW1_TNR3 = compose_link(DEV_CS2GW1_EPS[1], DEV_TNR3_EPS[0]) +LINK_CS2GW2_TNR4_ID, LINK_CS2GW2_TNR4 = compose_link(DEV_CS2GW2_EPS[1], DEV_TNR4_EPS[0]) +LINK_CS2GW1_TNR4_ID, LINK_CS2GW1_TNR4 = compose_link(DEV_CS2GW1_EPS[2], DEV_TNR4_EPS[1]) +LINK_CS2GW2_TNR3_ID, LINK_CS2GW2_TNR3 = compose_link(DEV_CS2GW2_EPS[2], DEV_TNR3_EPS[1]) + +# IntraDomain TN +LINK_TNR1_TNR2_ID, LINK_TNR1_TNR2 = compose_link(DEV_TNR1_EPS[2], DEV_TNR2_EPS[3]) +LINK_TNR2_TNR3_ID, LINK_TNR2_TNR3 = compose_link(DEV_TNR2_EPS[2], DEV_TNR3_EPS[3]) +LINK_TNR3_TNR4_ID, LINK_TNR3_TNR4 = compose_link(DEV_TNR3_EPS[2], DEV_TNR4_EPS[3]) +LINK_TNR4_TNR1_ID, LINK_TNR4_TNR1 = compose_link(DEV_TNR4_EPS[2], DEV_TNR1_EPS[3]) +LINK_TNR1_TNR3_ID, LINK_TNR1_TNR3 = compose_link(DEV_TNR1_EPS[4], DEV_TNR3_EPS[4]) +LINK_TNR2_TNR4_ID, LINK_TNR2_TNR4 = compose_link(DEV_TNR2_EPS[4], DEV_TNR4_EPS[4]) + + +# ----- Service -------------------------------------------------------------------------------------------------------- +#SERVICE_DC1GW_DC2GW = compose_service(DEV_DC1GW_EPS[2], DEV_DC2GW_EPS[2], constraints=[ +# json_constraint('bandwidth[gbps]', 10.0), +# json_constraint('latency[ms]', 12.0), +#]) + +# ----- WIM Service Settings ------------------------------------------------------------------------------------------- +WIM_USERNAME = 'admin' +WIM_PASSWORD = 'admin' + +def mapping(site_id, ce_endpoint_id, pe_device_id, priority=None, redundant=[]): + ce_endpoint_id = ce_endpoint_id['endpoint_id'] + ce_device_uuid = ce_endpoint_id['device_id']['device_uuid']['uuid'] + ce_endpoint_uuid = ce_endpoint_id['endpoint_uuid']['uuid'] + pe_device_uuid = pe_device_id['device_uuid']['uuid'] + service_endpoint_id = '{:s}-{:s}-{:s}'.format(site_id, ce_device_uuid, ce_endpoint_uuid) + bearer = '{:s}-{:s}'.format(ce_device_uuid, pe_device_uuid) + _mapping = { + 'service_endpoint_id': service_endpoint_id, + 'datacenter_id': site_id, 'device_id': ce_device_uuid, 'device_interface_id': ce_endpoint_uuid, + 'service_mapping_info': { + 'site-id': site_id, + 'bearer': {'bearer-reference': bearer}, + } + } + if priority is not None: _mapping['service_mapping_info']['priority'] = priority + if len(redundant) > 0: _mapping['service_mapping_info']['redundant'] = redundant + return service_endpoint_id, _mapping + +WIM_SEP_DC1_PRI, WIM_MAP_DC1_PRI = mapping('DC1', DEV_DC1GW_EPS[0], DEV_CS1GW1_ID, priority=10, redundant=['DC1-DC1GW-eth2']) +WIM_SEP_DC1_SEC, WIM_MAP_DC1_SEC = mapping('DC1', DEV_DC1GW_EPS[1], DEV_CS1GW2_ID, priority=20, redundant=['DC1-DC1GW-eth1']) +WIM_SEP_DC2_PRI, WIM_MAP_DC2_PRI = mapping('DC2', DEV_DC2GW_EPS[0], DEV_CS2GW1_ID, priority=10, redundant=['DC2-DC2GW-eth2']) +WIM_SEP_DC2_SEC, WIM_MAP_DC2_SEC = mapping('DC2', DEV_DC2GW_EPS[1], DEV_CS2GW2_ID, priority=20, redundant=['DC2-DC2GW-eth1']) + +WIM_MAPPING = [WIM_MAP_DC1_PRI, WIM_MAP_DC1_SEC, WIM_MAP_DC2_PRI, WIM_MAP_DC2_SEC] + +WIM_SRV_VLAN_ID = 300 +WIM_SERVICE_TYPE = 'ELAN' +WIM_SERVICE_CONNECTION_POINTS = [ + {'service_endpoint_id': WIM_SEP_DC1_PRI, + 'service_endpoint_encapsulation_type': 'dot1q', + 'service_endpoint_encapsulation_info': {'vlan': WIM_SRV_VLAN_ID}}, + {'service_endpoint_id': WIM_SEP_DC2_PRI, + 'service_endpoint_encapsulation_type': 'dot1q', + 'service_endpoint_encapsulation_info': {'vlan': WIM_SRV_VLAN_ID}}, +] + + +# ----- Containers ----------------------------------------------------------------------------------------------------- +CONTEXTS = [ CONTEXT ] +TOPOLOGIES = [ TOPO_ADMIN, TOPO_DC1, TOPO_DC2, TOPO_CS1, TOPO_CS2, TOPO_TN ] +DEVICES = [ DEV_DC1GW, DEV_DC2GW, + DEV_CS1GW1, DEV_CS1GW2, DEV_CS2GW1, DEV_CS2GW2, + DEV_TNR1, DEV_TNR2, DEV_TNR3, DEV_TNR4 ] +LINKS = [ LINK_DC1GW_CS1GW1, LINK_DC1GW_CS1GW2, LINK_DC2GW_CS2GW1, LINK_DC2GW_CS2GW2, + LINK_CS1GW1_TNR1, LINK_CS1GW2_TNR2, LINK_CS1GW1_TNR2, LINK_CS1GW2_TNR1, + LINK_CS2GW1_TNR3, LINK_CS2GW2_TNR4, LINK_CS2GW1_TNR4, LINK_CS2GW2_TNR3, + LINK_TNR1_TNR2, LINK_TNR2_TNR3, LINK_TNR3_TNR4, LINK_TNR4_TNR1, LINK_TNR1_TNR3, LINK_TNR2_TNR4 ] +#SERVICES = [ SERVICE_DC1GW_DC2GW ] + +OBJECTS_PER_TOPOLOGY = [ + (TOPO_ADMIN_ID, + [ DEV_DC1GW_ID, DEV_DC2GW_ID, + DEV_CS1GW1_ID, DEV_CS1GW2_ID, DEV_CS2GW1_ID, DEV_CS2GW2_ID, + DEV_TNR1_ID, DEV_TNR2_ID, DEV_TNR3_ID, DEV_TNR4_ID ], + [ LINK_DC1GW_CS1GW1_ID, LINK_DC1GW_CS1GW2_ID, LINK_DC2GW_CS2GW1_ID, LINK_DC2GW_CS2GW2_ID, + LINK_CS1GW1_TNR1_ID, LINK_CS1GW2_TNR2_ID, LINK_CS1GW1_TNR2_ID, LINK_CS1GW2_TNR1_ID, + LINK_CS2GW1_TNR3_ID, LINK_CS2GW2_TNR4_ID, LINK_CS2GW1_TNR4_ID, LINK_CS2GW2_TNR3_ID, + LINK_TNR1_TNR2_ID, LINK_TNR2_TNR3_ID, LINK_TNR3_TNR4_ID, LINK_TNR4_TNR1_ID, LINK_TNR1_TNR3_ID, + LINK_TNR2_TNR4_ID ], + ), + (TOPO_DC1_ID, + [DEV_DC1GW_ID], + []), + (TOPO_DC2_ID, + [DEV_DC2GW_ID], + []), + (TOPO_CS1_ID, + [DEV_CS1GW1_ID, DEV_CS1GW2_ID], + []), + (TOPO_CS2_ID, + [DEV_CS2GW1_ID, DEV_CS2GW2_ID], + []), + (TOPO_TN_ID, + [DEV_TNR1_ID, DEV_TNR2_ID, DEV_TNR3_ID, DEV_TNR4_ID], + [LINK_TNR1_TNR2_ID, LINK_TNR2_TNR3_ID, LINK_TNR3_TNR4_ID, LINK_TNR4_TNR1_ID, LINK_TNR1_TNR3_ID, + LINK_TNR2_TNR4_ID]), +] diff --git a/src/tests/ecoc22/tests/Objects.py b/src/tests/ecoc22/tests/Objects_OldBigNet.py similarity index 100% rename from src/tests/ecoc22/tests/Objects.py rename to src/tests/ecoc22/tests/Objects_OldBigNet.py diff --git a/src/tests/ecoc22/tests/test_functional_bootstrap.py b/src/tests/ecoc22/tests/test_functional_bootstrap.py index a787cf049..d4e05f53d 100644 --- a/src/tests/ecoc22/tests/test_functional_bootstrap.py +++ b/src/tests/ecoc22/tests/test_functional_bootstrap.py @@ -12,32 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -import copy, logging, pytest -from common.Settings import get_setting +import copy, logging from common.proto.context_pb2 import Context, ContextId, Device, Empty, Link, Topology from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient -from .Objects import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES +from .Fixtures import context_client, device_client +#from .Objects_OldBigNet import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES +from .Objects_DC_CSGW_TN import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) -@pytest.fixture(scope='session') -def context_client(): - _client = ContextClient() - yield _client - _client.close() - - -@pytest.fixture(scope='session') -def device_client(): - _client = DeviceClient() - yield _client - _client.close() - - def test_scenario_empty(context_client : ContextClient): # pylint: disable=redefined-outer-name # ----- List entities - Ensure database is empty ------------------------------------------------------------------- response = context_client.ListContexts(Empty()) @@ -90,13 +77,10 @@ def test_devices_bootstraping( context_client : ContextClient, device_client : DeviceClient): # pylint: disable=redefined-outer-name # ----- Create Devices and Validate Collected Events --------------------------------------------------------------- - for device, connect_rules in DEVICES: + for device in DEVICES: device_uuid = device['device_id']['device_uuid']['uuid'] LOGGER.info('Adding Device {:s}'.format(device_uuid)) - - device_with_connect_rules = copy.deepcopy(device) - device_with_connect_rules['device_config']['config_rules'].extend(connect_rules) - response = device_client.AddDevice(Device(**device_with_connect_rules)) + response = device_client.AddDevice(Device(**device)) assert response.device_uuid.uuid == device_uuid diff --git a/src/tests/ecoc22/tests/test_functional_cleanup.py b/src/tests/ecoc22/tests/test_functional_cleanup.py index eb78a5850..8c7ef7885 100644 --- a/src/tests/ecoc22/tests/test_functional_cleanup.py +++ b/src/tests/ecoc22/tests/test_functional_cleanup.py @@ -12,37 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging, pytest -from common.Settings import get_setting -from common.tests.EventTools import EVENT_REMOVE, check_events +import logging +from common.proto.context_pb2 import ContextId, DeviceId, Empty, LinkId, TopologyId from common.tools.object_factory.Context import json_context_id -from common.tools.object_factory.Device import json_device_id -from common.tools.object_factory.Link import json_link_id -from common.tools.object_factory.Topology import json_topology_id from context.client.ContextClient import ContextClient -from context.client.EventsCollector import EventsCollector -from context.proto.context_pb2 import ContextId, DeviceId, Empty, LinkId, TopologyId from device.client.DeviceClient import DeviceClient -from .Objects import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES +from .Fixtures import context_client, device_client +#from .Objects_OldBigNet import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES +from .Objects_DC_CSGW_TN import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES + LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) -@pytest.fixture(scope='session') -def context_client(): - _client = ContextClient(get_setting('CONTEXTSERVICE_SERVICE_HOST'), get_setting('CONTEXTSERVICE_SERVICE_PORT_GRPC')) - yield _client - _client.close() - - -@pytest.fixture(scope='session') -def device_client(): - _client = DeviceClient(get_setting('DEVICESERVICE_SERVICE_HOST'), get_setting('DEVICESERVICE_SERVICE_PORT_GRPC')) - yield _client - _client.close() - - def test_services_removed(context_client : ContextClient): # pylint: disable=redefined-outer-name # ----- List entities - Ensure service is removed ------------------------------------------------------------------ response = context_client.ListContexts(Empty()) @@ -64,19 +47,12 @@ def test_services_removed(context_client : ContextClient): # pylint: disable=re def test_scenario_cleanup( context_client : ContextClient, device_client : DeviceClient): # pylint: disable=redefined-outer-name - # ----- Start the EventsCollector ---------------------------------------------------------------------------------- - events_collector = EventsCollector(context_client) - events_collector.start() - - expected_events = [] - # ----- Delete Links and Validate Collected Events ----------------------------------------------------------------- for link in LINKS: link_id = link['link_id'] link_uuid = link_id['link_uuid']['uuid'] LOGGER.info('Deleting Link {:s}'.format(link_uuid)) context_client.RemoveLink(LinkId(**link_id)) - expected_events.append(('LinkEvent', EVENT_REMOVE, json_link_id(link_uuid))) # ----- Delete Devices and Validate Collected Events --------------------------------------------------------------- for device, _ in DEVICES: @@ -84,7 +60,6 @@ def test_scenario_cleanup( device_uuid = device_id['device_uuid']['uuid'] LOGGER.info('Deleting Device {:s}'.format(device_uuid)) device_client.DeleteDevice(DeviceId(**device_id)) - expected_events.append(('DeviceEvent', EVENT_REMOVE, json_device_id(device_uuid))) # ----- Delete Topologies and Validate Collected Events ------------------------------------------------------------ for topology in TOPOLOGIES: @@ -93,8 +68,6 @@ def test_scenario_cleanup( topology_uuid = topology_id['topology_uuid']['uuid'] LOGGER.info('Deleting Topology {:s}/{:s}'.format(context_uuid, topology_uuid)) context_client.RemoveTopology(TopologyId(**topology_id)) - context_id = json_context_id(context_uuid) - expected_events.append(('TopologyEvent', EVENT_REMOVE, json_topology_id(topology_uuid, context_id=context_id))) # ----- Delete Contexts and Validate Collected Events -------------------------------------------------------------- for context in CONTEXTS: @@ -102,13 +75,6 @@ def test_scenario_cleanup( context_uuid = context_id['context_uuid']['uuid'] LOGGER.info('Deleting Context {:s}'.format(context_uuid)) context_client.RemoveContext(ContextId(**context_id)) - expected_events.append(('ContextEvent', EVENT_REMOVE, json_context_id(context_uuid))) - - # ----- Validate Collected Events ---------------------------------------------------------------------------------- - check_events(events_collector, expected_events) - - # ----- Stop the EventsCollector ----------------------------------------------------------------------------------- - events_collector.stop() def test_scenario_empty_again(context_client : ContextClient): # pylint: disable=redefined-outer-name diff --git a/src/tests/ecoc22/tests/test_functional_create_service.py b/src/tests/ecoc22/tests/test_functional_create_service.py index 2928d9b35..ed914bb69 100644 --- a/src/tests/ecoc22/tests/test_functional_create_service.py +++ b/src/tests/ecoc22/tests/test_functional_create_service.py @@ -12,34 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging, pytest -from common.Settings import get_setting +import logging from common.proto.context_pb2 import ContextId, Empty from common.tools.grpc.Tools import grpc_message_to_json_string from compute.tests.mock_osm.MockOSM import MockOSM from context.client.ContextClient import ContextClient -from .Objects import ( - CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_MAPPING, WIM_PASSWORD, WIM_SERVICE_CONNECTION_POINTS, - WIM_SERVICE_TYPE, WIM_USERNAME) +from .Fixtures import context_client, osm_wim +from .Objects_DC_CSGW_TN import ( + CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE) LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) -@pytest.fixture(scope='session') -def context_client(): - _client = ContextClient() - yield _client - _client.close() - - -@pytest.fixture(scope='session') -def osm_wim(): - wim_url = 'http://{:s}:{:s}'.format( - get_setting('COMPUTESERVICE_SERVICE_HOST'), str(get_setting('COMPUTESERVICE_SERVICE_PORT_HTTP'))) - return MockOSM(wim_url, WIM_MAPPING, WIM_USERNAME, WIM_PASSWORD) - - def test_scenario_is_correct(context_client : ContextClient): # pylint: disable=redefined-outer-name # ----- List entities - Ensure links are created ------------------------------------------------------------------- response = context_client.ListContexts(Empty()) diff --git a/src/tests/ecoc22/tests/test_functional_delete_service.py b/src/tests/ecoc22/tests/test_functional_delete_service.py index 89fe22b1f..d8a943db3 100644 --- a/src/tests/ecoc22/tests/test_functional_delete_service.py +++ b/src/tests/ecoc22/tests/test_functional_delete_service.py @@ -24,7 +24,7 @@ from common.tools.grpc.Tools import grpc_message_to_json_string from compute.tests.mock_osm.MockOSM import MockOSM from context.client.ContextClient import ContextClient from context.client.EventsCollector import EventsCollector -from .Objects import ( +from .Objects_OldBigNet import ( CONTEXT_ID, CONTEXTS, DEVICE_O1_UUID, DEVICE_R1_UUID, DEVICE_R3_UUID, DEVICES, LINKS, TOPOLOGIES, WIM_MAPPING, WIM_PASSWORD, WIM_USERNAME) -- GitLab From cb106875d74422b7dc74cc5028defc3266ac271f Mon Sep 17 00:00:00 2001 From: Lucie Long Date: Fri, 9 Sep 2022 14:35:09 +0000 Subject: [PATCH 22/91] WebUI component: - corrected constraint display in service list page - added list of connections related to service in detail page - added detail of links - improved look and feel of device detail page - added feature of loading services through JSON file --- src/webui/service/link/routes.py | 17 +- src/webui/service/main/routes.py | 90 ++++--- src/webui/service/service/routes.py | 7 +- .../service/templates/device/detail.html | 222 ++++++++++-------- src/webui/service/templates/link/detail.html | 59 +++++ src/webui/service/templates/link/home.html | 190 +++++++-------- .../service/templates/service/detail.html | 37 ++- src/webui/service/templates/service/home.html | 2 +- 8 files changed, 387 insertions(+), 237 deletions(-) create mode 100644 src/webui/service/templates/link/detail.html diff --git a/src/webui/service/link/routes.py b/src/webui/service/link/routes.py index 04c4b1de5..51e903d9e 100644 --- a/src/webui/service/link/routes.py +++ b/src/webui/service/link/routes.py @@ -12,10 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from flask import render_template, Blueprint, flash, session, redirect, url_for -from common.proto.context_pb2 import Empty, LinkList + +from flask import current_app, render_template, Blueprint, flash, session, redirect, url_for +from common.proto.context_pb2 import Empty, Link, LinkEvent, LinkId, LinkIdList, LinkList, DeviceId from context.client.ContextClient import ContextClient + link = Blueprint('link', __name__, url_prefix='/link') context_client = ContextClient() @@ -32,4 +34,13 @@ def home(): return render_template( "link/home.html", links=response.links, - ) \ No newline at end of file + ) + +@link.route('detail/', methods=('GET', 'POST')) +def detail(link_uuid: str): + request = LinkId() + request.link_uuid.uuid = link_uuid + context_client.connect() + response = context_client.GetLink(request) + context_client.close() + return render_template('link/detail.html',link=response) diff --git a/src/webui/service/main/routes.py b/src/webui/service/main/routes.py index 85d3aeeb7..893f08543 100644 --- a/src/webui/service/main/routes.py +++ b/src/webui/service/main/routes.py @@ -11,33 +11,45 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -import json, logging +import copy, json, logging from flask import jsonify, redirect, render_template, Blueprint, flash, session, url_for, request -from common.proto.context_pb2 import Context, Device, Empty, Link, Topology, ContextIdList +from common.proto.context_pb2 import Context, Device, Empty, Link, Service, Topology, ContextIdList +from common.tools.grpc.Tools import grpc_message_to_json_string from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient +from service.client.ServiceClient import ServiceClient from webui.service.main.forms import ContextForm, DescriptorForm - main = Blueprint('main', __name__) - context_client = ContextClient() device_client = DeviceClient() - +service_client = ServiceClient() logger = logging.getLogger(__name__) - -def process_descriptor(item_name_singluar, item_name_plural, grpc_method, grpc_class, items): +ENTITY_TO_TEXT = { + # name => singular, plural + 'context' : ('Context', 'Contexts' ), + 'topology': ('Topology', 'Topologies'), + 'device' : ('Device', 'Devices' ), + 'link' : ('Link', 'Links' ), + 'service' : ('Service', 'Services' ), +} +ACTION_TO_TEXT = { + # action => infinitive, past + 'add' : ('Add', 'Added'), + 'update' : ('Update', 'Updated'), +} +def process_descriptor(entity_name, action_name, grpc_method, grpc_class, entities): + entity_name_singluar,entity_name_plural = ENTITY_TO_TEXT[entity_name] + action_infinitive, action_past = ACTION_TO_TEXT[action_name] num_ok, num_err = 0, 0 - for item in items: + for entity in entities: try: - grpc_method(grpc_class(**item)) + grpc_method(grpc_class(**entity)) num_ok += 1 except Exception as e: # pylint: disable=broad-except - flash(f'Unable to add {item_name_singluar} {str(item)}: {str(e)}', 'error') + flash(f'Unable to {action_infinitive} {entity_name_singluar} {str(entity)}: {str(e)}', 'error') num_err += 1 - if num_ok : flash(f'{str(num_ok)} {item_name_plural} added', 'success') - if num_err: flash(f'{str(num_err)} {item_name_plural} failed', 'danger') - + if num_ok : flash(f'{str(num_ok)} {entity_name_plural} {action_past}', 'success') + if num_err: flash(f'{str(num_err)} {entity_name_plural} failed', 'danger') def process_descriptors(descriptors): logger.warning(str(descriptors.data)) logger.warning(str(descriptors.name)) @@ -52,16 +64,30 @@ def process_descriptors(descriptors): except Exception as e: # pylint: disable=broad-except flash(f'Unable to load descriptor file: {str(e)}', 'danger') return - + contexts = descriptors.get('contexts' , []) + topologies = descriptors.get('topologies', []) + devices = descriptors.get('devices' , []) + links = descriptors.get('links' , []) + services = descriptors.get('services' , []) + services_add = [] + for service in services: + service_copy = copy.deepcopy(service) + service_copy['service_endpoint_ids'] = [] + service_copy['service_constraints'] = [] + service_copy['service_config'] = {'config_rules': []} + services_add.append(service_copy) context_client.connect() device_client.connect() - process_descriptor('Context', 'Contexts', context_client.SetContext, Context, descriptors['contexts' ]) - process_descriptor('Topology', 'Topologies', context_client.SetTopology, Topology, descriptors['topologies']) - process_descriptor('Device', 'Devices', device_client .AddDevice, Device, descriptors['devices' ]) - process_descriptor('Link', 'Links', context_client.SetLink, Link, descriptors['links' ]) + service_client.connect() + process_descriptor('context', 'add', context_client.SetContext, Context, contexts ) + process_descriptor('topology', 'add', context_client.SetTopology, Topology, topologies ) + process_descriptor('device', 'add', device_client .AddDevice, Device, devices ) + process_descriptor('link', 'add', context_client.SetLink, Link, links ) + process_descriptor('service', 'add', service_client.CreateService, Service, services_add) + process_descriptor('service', 'update', service_client.UpdateService, Service, services ) + service_client.close() device_client.close() context_client.close() - @main.route('/', methods=['GET', 'POST']) def home(): context_client.connect() @@ -89,7 +115,6 @@ def home(): context_client.close() device_client.close() return render_template('main/home.html', context_form=context_form, descriptor_form=descriptor_form) - @main.route('/topology', methods=['GET']) def topology(): context_client.connect() @@ -100,29 +125,30 @@ def topology(): 'name': device.device_id.device_uuid.uuid, 'type': device.device_type, } for device in response.devices] - response = context_client.ListLinks(Empty()) - links = [{ - 'id': link.link_id.link_uuid.uuid, - 'source': link.link_endpoint_ids[0].device_id.device_uuid.uuid, - 'target': link.link_endpoint_ids[1].device_id.device_uuid.uuid, - } for link in response.links] - + links = [] + for link in response.links: + if len(link.link_endpoint_ids) != 2: + str_link = grpc_message_to_json_string(link) + logger.warning('Unexpected link with len(endpoints) != 2: {:s}'.format(str_link)) + continue + links.append({ + 'id': link.link_id.link_uuid.uuid, + 'source': link.link_endpoint_ids[0].device_id.device_uuid.uuid, + 'target': link.link_endpoint_ids[1].device_id.device_uuid.uuid, + }) return jsonify({'devices': devices, 'links': links}) except: logger.exception('Error retrieving topology') finally: context_client.close() - @main.get('/about') def about(): return render_template('main/about.html') - @main.get('/debug') def debug(): return render_template('main/debug.html') - @main.get('/resetsession') def reset_session(): session.clear() - return redirect(url_for("main.home")) + return redirect(url_for("main.home")) \ No newline at end of file diff --git a/src/webui/service/service/routes.py b/src/webui/service/service/routes.py index 81031490e..d62e28ca1 100644 --- a/src/webui/service/service/routes.py +++ b/src/webui/service/service/routes.py @@ -14,7 +14,7 @@ import grpc from flask import current_app, redirect, render_template, Blueprint, flash, session, url_for -from common.proto.context_pb2 import ContextId, Service, ServiceId, ServiceList, ServiceTypeEnum, ServiceStatusEnum +from common.proto.context_pb2 import ContextId, Service, ServiceId, ServiceList, ServiceTypeEnum, ServiceStatusEnum, Connection from context.client.ContextClient import ContextClient from service.client.ServiceClient import ServiceClient @@ -73,12 +73,13 @@ def detail(service_uuid: str): try: context_client.connect() response: Service = context_client.GetService(request) + connections: Connection = context_client.ListConnections(request) context_client.close() except Exception as e: flash('The system encountered an error and cannot show the details of this service.', 'warning') current_app.logger.exception(e) return redirect(url_for('service.home')) - return render_template('service/detail.html', service=response) + return render_template('service/detail.html', service=response, connections=connections) @service.get('/delete') @@ -100,4 +101,4 @@ def delete(service_uuid: str): except Exception as e: flash('Problem deleting service "{:s}": {:s}'.format(service_uuid, str(e.details())), 'danger') current_app.logger.exception(e) - return redirect(url_for('service.home')) + return redirect(url_for('service.home')) \ No newline at end of file diff --git a/src/webui/service/templates/device/detail.html b/src/webui/service/templates/device/detail.html index b4cf6b715..f2cdc5815 100644 --- a/src/webui/service/templates/device/detail.html +++ b/src/webui/service/templates/device/detail.html @@ -1,111 +1,129 @@ + + {% extends 'base.html' %} + + {% block content %} +

Device {{ device.device_id.device_uuid.uuid }}

+ +
+
+ +
+ +
+ + +
+
- Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. ---> - -{% extends 'base.html' %} - -{% block content %} -

Device {{ device.device_id.device_uuid.uuid }}

- -
-
- +
+
+
+ UUID: {{ device.device_id.device_uuid.uuid }}

+ Type: {{ device.device_type }}

+ Drivers: +
    + {% for driver in device.device_drivers %} +
  • {{ dde.Name(driver).replace('DEVICEDRIVER_', '').replace('UNDEFINED', 'EMULATED') }}
  • + {% endfor %} +
+
+
+ + + + + + + + + {% for endpoint in device.device_endpoints %} + + + + + {% endfor %} + +
EndpointsType
+ {{ endpoint.endpoint_id.endpoint_uuid.uuid }} + + {{ endpoint.endpoint_type }} +
+
+
- -
- - -
-
-
-
UUID:
-
- {{ device.device_id.device_uuid.uuid }} -
-
Type:
-
- {{ device.device_type }} -
-
-
-
Drivers:
-
-
    - {% for driver in device.device_drivers %} -
  • {{ dde.Name(driver).replace('DEVICEDRIVER_', '').replace('UNDEFINED', 'EMULATED') }}
  • - {% endfor %} -
-
-
-
- Endpoints: -
-
    - {% for endpoint in device.device_endpoints %} -
  • {{ endpoint.endpoint_id.endpoint_uuid.uuid }}: {{ endpoint.endpoint_type }}
  • - {% endfor %} -
-
-
-
Configurations: -
-
    - {% for config in device.device_config.config_rules %} + + + + + + + + + {% for config in device.device_config.config_rules %} {% if config.WhichOneof('config_rule') == 'custom' %} -
  • {{ config.custom.resource_key }}: -
      - {% for key, value in (config.custom.resource_value | from_json).items() %} -
    • {{ key }}: {{ value }}
    • - {% endfor %} -
    -
  • + + + + {% endif %} - {% endfor %} - - - + {% endfor %} + +
    KeyValue
    + {{ config.custom.resource_key }} + +
      + {% for key, value in (config.custom.resource_value | from_json).items() %} +
    • {{ key }}: {{ value }}
    • + {% endfor %} +
    +
    - - -{% endblock %} \ No newline at end of file + + + + {% endblock %} + \ No newline at end of file diff --git a/src/webui/service/templates/link/detail.html b/src/webui/service/templates/link/detail.html new file mode 100644 index 000000000..8b49a65eb --- /dev/null +++ b/src/webui/service/templates/link/detail.html @@ -0,0 +1,59 @@ + + {% extends 'base.html' %} + + {% block content %} +

    Link {{ link.link_id.link_uuid.uuid }}

    +
    +
    + +
    +
    + +
    +
    +
    + UUID: {{ link.link_id.link_uuid.uuid }}

    +
    +
    + + + + + + + + + {% for end_point in link.link_endpoint_ids %} + + + + + {% endfor %} + +
    EndpointsType
    + {{ end_point.endpoint_uuid.uuid }} + + {{ end_point.endpoint_uuid.uuid }} +
    +
    +
    + + {% endblock %} + \ No newline at end of file diff --git a/src/webui/service/templates/link/home.html b/src/webui/service/templates/link/home.html index d0c122f6a..77d00d341 100644 --- a/src/webui/service/templates/link/home.html +++ b/src/webui/service/templates/link/home.html @@ -1,96 +1,96 @@ - -{% extends 'base.html' %} - -{% block content %} -

    Links

    - -
    -
    - -
    -
    - {{ links | length }} links found -
    - -
    - - - - - - - - - - - {% if links %} - {% for link in links %} - - - - - - - - {% endfor %} - {% else %} - - - - {% endif %} - -
    #Endpoints
    - - {{ link.link_id.link_uuid.uuid }} - - - - - -
    No links found
    - -{% endblock %} \ No newline at end of file + Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + --> + + {% extends 'base.html' %} + + {% block content %} +

    Links

    + +
    +
    + +
    +
    + {{ links | length }} links found +
    + +
    + + + + + + + + + + + {% if links %} + {% for link in links %} + + + + + + + + {% endfor %} + {% else %} + + + + {% endif %} + +
    #Endpoints
    + + {{ link.link_id.link_uuid.uuid }} + + + + + + + + + + +
    No links found
    + + {% endblock %} \ No newline at end of file diff --git a/src/webui/service/templates/service/detail.html b/src/webui/service/templates/service/detail.html index 1e58b9eaa..3a0f0f7d0 100644 --- a/src/webui/service/templates/service/detail.html +++ b/src/webui/service/templates/service/detail.html @@ -98,4 +98,39 @@
-{% endblock %} \ No newline at end of file + + + + + + + + + + + {% for connections in connections.connections %} + + + + + {% for i in range(connections.path_hops_endpoint_ids|length) %} + +{% endfor %} + + + + {% endfor %} + +
Connection IdSub-servicePath
+ {{ connections.connection_id.connection_uuid.uuid }} + + {{ connections.sub_service_ids|map(attribute='service_uuid')|map(attribute='uuid')|join(', ') }} + + {{ connections.path_hops_endpoint_ids[i].device_id.device_uuid.uuid }} / {{ connections.path_hops_endpoint_ids[i].endpoint_uuid.uuid }} +
+ + + + + +{% endblock %} diff --git a/src/webui/service/templates/service/home.html b/src/webui/service/templates/service/home.html index 0e152006c..8d231cf17 100644 --- a/src/webui/service/templates/service/home.html +++ b/src/webui/service/templates/service/home.html @@ -73,7 +73,7 @@
    {% for constraint in service.service_constraints %} -
  • {{ constraint.constraint_type }}: {{ constraint.constraint_value }}
  • +
  • {{ constraint.custom.constraint_type }}: {{ constraint.custom.constraint_value }}
  • {% endfor %}
-- GitLab From b4c93fd72f13982305900db42b6e337d308f1de8 Mon Sep 17 00:00:00 2001 From: Lluis Gifre Date: Sun, 11 Sep 2022 14:50:27 +0000 Subject: [PATCH 23/91] Compute component: - Updated endpoint mapping for ECOC'22 scenario - Swapped get-service and get-slice methods to priorize slices over services --- .../nbi_plugins/ietf_l2vpn/Constants.py | 8 ++-- .../nbi_plugins/ietf_l2vpn/L2VPN_Service.py | 40 ++++++++++--------- .../ietf_l2vpn/L2VPN_SiteNetworkAccesses.py | 2 +- 3 files changed, 26 insertions(+), 24 deletions(-) diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/Constants.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/Constants.py index dae9a7041..41d58caa4 100644 --- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/Constants.py +++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/Constants.py @@ -56,8 +56,8 @@ BEARER_MAPPINGS = { #'R4@D2:3/3': ('R4@D2', '3/3', '10.0.2.4', '65002:104', 100, '2.4.3.3', 24), # ECOC'22 - 'CE1-PE1': ('PE1', '1/1', '10.0.0.101', '65000:101', 300, None, None), - 'CE2-PE2': ('PE2', '1/1', '10.0.0.102', '65000:102', 300, None, None), - 'CE3-PE3': ('PE3', '1/1', '10.0.0.103', '65000:103', 300, None, None), - 'CE4-PE4': ('PE4', '1/1', '10.0.0.104', '65000:104', 300, None, None), + 'DC1-GW:CS1-GW1': ('CS1-GW1', '10/1', '10.0.1.101', '65000:101', 300, None, None), + 'DC1-GW:CS1-GW2': ('CS1-GW2', '10/1', '10.0.2.101', '65000:102', 300, None, None), + 'DC2-GW:CS2-GW1': ('CS2-GW1', '10/1', '10.0.1.102', '65000:103', 300, None, None), + 'DC2-GW:CS2-GW2': ('CS2-GW2', '10/1', '10.0.2.102', '65000:104', 300, None, None), } diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py index ed2f01af1..9e4527f80 100644 --- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py +++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py @@ -36,24 +36,26 @@ class L2VPN_Service(Resource): try: context_client = ContextClient() - target = get_service(context_client, vpn_id) - if target is not None: - if target.service_id.service_uuid.uuid != vpn_id: # pylint: disable=no-member - raise Exception('Service retrieval failed. Wrong Service Id was returned') - service_ready_status = ServiceStatusEnum.SERVICESTATUS_ACTIVE - service_status = target.service_status.service_status # pylint: disable=no-member - response.status_code = HTTP_OK if service_status == service_ready_status else HTTP_GATEWAYTIMEOUT - return response - target = get_slice(context_client, vpn_id) if target is not None: if target.slice_id.slice_uuid.uuid != vpn_id: # pylint: disable=no-member raise Exception('Slice retrieval failed. Wrong Slice Id was returned') slice_ready_status = SliceStatusEnum.SLICESTATUS_ACTIVE slice_status = target.slice_status.slice_status # pylint: disable=no-member + response = jsonify({}) response.status_code = HTTP_OK if slice_status == slice_ready_status else HTTP_GATEWAYTIMEOUT return response + target = get_service(context_client, vpn_id) + if target is not None: + if target.service_id.service_uuid.uuid != vpn_id: # pylint: disable=no-member + raise Exception('Service retrieval failed. Wrong Service Id was returned') + service_ready_status = ServiceStatusEnum.SERVICESTATUS_ACTIVE + service_status = target.service_status.service_status # pylint: disable=no-member + response = jsonify({}) + response.status_code = HTTP_OK if service_status == service_ready_status else HTTP_GATEWAYTIMEOUT + return response + raise Exception('VPN({:s}) not found in database'.format(str(vpn_id))) except Exception as e: # pylint: disable=broad-except LOGGER.exception('Something went wrong Retrieving VPN({:s})'.format(str(vpn_id))) @@ -69,16 +71,6 @@ class L2VPN_Service(Resource): try: context_client = ContextClient() - target = get_service(context_client, vpn_id) - if target is not None: - if target.service_id.service_uuid.uuid != vpn_id: # pylint: disable=no-member - raise Exception('Service retrieval failed. Wrong Service Id was returned') - service_client = ServiceClient() - service_client.DeleteService(target.service_id) - response = jsonify({}) - response.status_code = HTTP_NOCONTENT - return response - target = get_slice(context_client, vpn_id) if target is not None: if target.slice_id.slice_uuid.uuid != vpn_id: # pylint: disable=no-member @@ -89,6 +81,16 @@ class L2VPN_Service(Resource): response.status_code = HTTP_NOCONTENT return response + target = get_service(context_client, vpn_id) + if target is not None: + if target.service_id.service_uuid.uuid != vpn_id: # pylint: disable=no-member + raise Exception('Service retrieval failed. Wrong Service Id was returned') + service_client = ServiceClient() + service_client.DeleteService(target.service_id) + response = jsonify({}) + response.status_code = HTTP_NOCONTENT + return response + raise Exception('VPN({:s}) not found in database'.format(str(vpn_id))) except Exception as e: # pylint: disable=broad-except LOGGER.exception('Something went wrong Deleting VPN({:s})'.format(str(vpn_id))) diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py index 401909940..8496b9f56 100644 --- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py +++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py @@ -68,8 +68,8 @@ def process_site_network_access(context_client : ContextClient, site_id : str, s device_uuid,endpoint_uuid,router_id,route_distinguisher,sub_if_index,address_ip,address_prefix = mapping target : Union[Service, Slice, None] = None - if target is None: target = get_service(context_client, vpn_id) if target is None: target = get_slice (context_client, vpn_id) + if target is None: target = get_service(context_client, vpn_id) if target is None: raise Exception('VPN({:s}) not found in database'.format(str(vpn_id))) if isinstance(target, Service): -- GitLab From 442ff9a6cb328023cb2f2ec5149e833d99d9543f Mon Sep 17 00:00:00 2001 From: Lluis Gifre Date: Sun, 11 Sep 2022 14:54:41 +0000 Subject: [PATCH 24/91] ECOC'22 test: - Improved formatting of scenario DC-CSGW-TN - Created clone of scenario DC-CSGW-TN including OLS - Cleaned up bootstrap and delete service unitary tests - Renamed device type OPTICAL_LINE_SYSTEM to OPEN_LINE_SYSTEM --- src/tests/ecoc22/tests/Objects_DC_CSGW_TN.py | 64 +++-- .../ecoc22/tests/Objects_DC_CSGW_TN_OLS.py | 232 ++++++++++++++++++ .../ecoc22/tests/test_functional_bootstrap.py | 99 +++----- .../tests/test_functional_delete_service.py | 2 +- 4 files changed, 294 insertions(+), 103 deletions(-) create mode 100644 src/tests/ecoc22/tests/Objects_DC_CSGW_TN_OLS.py diff --git a/src/tests/ecoc22/tests/Objects_DC_CSGW_TN.py b/src/tests/ecoc22/tests/Objects_DC_CSGW_TN.py index 9422906d3..642b12b58 100644 --- a/src/tests/ecoc22/tests/Objects_DC_CSGW_TN.py +++ b/src/tests/ecoc22/tests/Objects_DC_CSGW_TN.py @@ -12,12 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict, List from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID -#from common.tools.object_factory.Constraint import json_constraint from common.tools.object_factory.Context import json_context, json_context_id from common.tools.object_factory.Device import ( - json_device_emulated_connect_rules, json_device_emulated_datacenter_disabled, json_device_emulated_packet_router_disabled, json_device_id) + json_device_emulated_connect_rules, json_device_emulated_datacenter_disabled, + json_device_emulated_packet_router_disabled, json_device_id) from common.tools.object_factory.EndPoint import json_endpoints from common.tools.object_factory.Link import get_link_uuid, json_link, json_link_id from common.tools.object_factory.Service import get_service_uuid, json_service_l3nm_planned @@ -91,20 +90,20 @@ TOPO_TN = json_topology(TOPO_TN_UUID, context_id=CONTEXT_ID) # ----- Devices -------------------------------------------------------------------------------------------------------- # DataCenters -DEV_DC1GW_ID, DEV_DC1GW_EPS, DEV_DC1GW = compose_datacenter('DC1-GW', ['eth1', 'eth2', 'int'], topology_id=TOPO_DC1_ID) -DEV_DC2GW_ID, DEV_DC2GW_EPS, DEV_DC2GW = compose_datacenter('DC2-GW', ['eth1', 'eth2', 'int'], topology_id=TOPO_DC2_ID) +DEV_DC1GW_ID, DEV_DC1GW_EPS, DEV_DC1GW = compose_datacenter('DC1-GW', ['eth1', 'eth2', 'int']) +DEV_DC2GW_ID, DEV_DC2GW_EPS, DEV_DC2GW = compose_datacenter('DC2-GW', ['eth1', 'eth2', 'int']) # CellSites -DEV_CS1GW1_ID, DEV_CS1GW1_EPS, DEV_CS1GW1 = compose_router('CS1-GW1', ['10/1', '1/1', '1/2'], topology_id=TOPO_CS1_ID) -DEV_CS1GW2_ID, DEV_CS1GW2_EPS, DEV_CS1GW2 = compose_router('CS1-GW2', ['10/1', '1/1', '1/2'], topology_id=TOPO_CS1_ID) -DEV_CS2GW1_ID, DEV_CS2GW1_EPS, DEV_CS2GW1 = compose_router('CS2-GW1', ['10/1', '1/1', '1/2'], topology_id=TOPO_CS2_ID) -DEV_CS2GW2_ID, DEV_CS2GW2_EPS, DEV_CS2GW2 = compose_router('CS2-GW2', ['10/1', '1/1', '1/2'], topology_id=TOPO_CS2_ID) +DEV_CS1GW1_ID, DEV_CS1GW1_EPS, DEV_CS1GW1 = compose_router('CS1-GW1', ['10/1', '1/1', '1/2']) +DEV_CS1GW2_ID, DEV_CS1GW2_EPS, DEV_CS1GW2 = compose_router('CS1-GW2', ['10/1', '1/1', '1/2']) +DEV_CS2GW1_ID, DEV_CS2GW1_EPS, DEV_CS2GW1 = compose_router('CS2-GW1', ['10/1', '1/1', '1/2']) +DEV_CS2GW2_ID, DEV_CS2GW2_EPS, DEV_CS2GW2 = compose_router('CS2-GW2', ['10/1', '1/1', '1/2']) # Transport Network -DEV_TNR1_ID, DEV_TNR1_EPS, DEV_TNR1 = compose_router('TN-R1', ['1/1', '1/2', '2/1', '2/2', '2/3'], topology_id=TOPO_TN_ID) -DEV_TNR2_ID, DEV_TNR2_EPS, DEV_TNR2 = compose_router('TN-R2', ['1/1', '1/2', '2/1', '2/2', '2/3'], topology_id=TOPO_TN_ID) -DEV_TNR3_ID, DEV_TNR3_EPS, DEV_TNR3 = compose_router('TN-R3', ['1/1', '1/2', '2/1', '2/2', '2/3'], topology_id=TOPO_TN_ID) -DEV_TNR4_ID, DEV_TNR4_EPS, DEV_TNR4 = compose_router('TN-R4', ['1/1', '1/2', '2/1', '2/2', '2/3'], topology_id=TOPO_TN_ID) +DEV_TNR1_ID, DEV_TNR1_EPS, DEV_TNR1 = compose_router('TN-R1', ['1/1', '1/2', '2/1', '2/2', '2/3']) +DEV_TNR2_ID, DEV_TNR2_EPS, DEV_TNR2 = compose_router('TN-R2', ['1/1', '1/2', '2/1', '2/2', '2/3']) +DEV_TNR3_ID, DEV_TNR3_EPS, DEV_TNR3 = compose_router('TN-R3', ['1/1', '1/2', '2/1', '2/2', '2/3']) +DEV_TNR4_ID, DEV_TNR4_EPS, DEV_TNR4 = compose_router('TN-R4', ['1/1', '1/2', '2/1', '2/2', '2/3']) # ----- Links ---------------------------------------------------------------------------------------------------------- @@ -133,12 +132,6 @@ LINK_TNR1_TNR3_ID, LINK_TNR1_TNR3 = compose_link(DEV_TNR1_EPS[4], DEV_TNR3_EPS[4 LINK_TNR2_TNR4_ID, LINK_TNR2_TNR4 = compose_link(DEV_TNR2_EPS[4], DEV_TNR4_EPS[4]) -# ----- Service -------------------------------------------------------------------------------------------------------- -#SERVICE_DC1GW_DC2GW = compose_service(DEV_DC1GW_EPS[2], DEV_DC2GW_EPS[2], constraints=[ -# json_constraint('bandwidth[gbps]', 10.0), -# json_constraint('latency[ms]', 12.0), -#]) - # ----- WIM Service Settings ------------------------------------------------------------------------------------------- WIM_USERNAME = 'admin' WIM_PASSWORD = 'admin' @@ -148,8 +141,8 @@ def mapping(site_id, ce_endpoint_id, pe_device_id, priority=None, redundant=[]): ce_device_uuid = ce_endpoint_id['device_id']['device_uuid']['uuid'] ce_endpoint_uuid = ce_endpoint_id['endpoint_uuid']['uuid'] pe_device_uuid = pe_device_id['device_uuid']['uuid'] - service_endpoint_id = '{:s}-{:s}-{:s}'.format(site_id, ce_device_uuid, ce_endpoint_uuid) - bearer = '{:s}-{:s}'.format(ce_device_uuid, pe_device_uuid) + service_endpoint_id = '{:s}:{:s}:{:s}'.format(site_id, ce_device_uuid, ce_endpoint_uuid) + bearer = '{:s}:{:s}'.format(ce_device_uuid, pe_device_uuid) _mapping = { 'service_endpoint_id': service_endpoint_id, 'datacenter_id': site_id, 'device_id': ce_device_uuid, 'device_interface_id': ce_endpoint_uuid, @@ -162,10 +155,10 @@ def mapping(site_id, ce_endpoint_id, pe_device_id, priority=None, redundant=[]): if len(redundant) > 0: _mapping['service_mapping_info']['redundant'] = redundant return service_endpoint_id, _mapping -WIM_SEP_DC1_PRI, WIM_MAP_DC1_PRI = mapping('DC1', DEV_DC1GW_EPS[0], DEV_CS1GW1_ID, priority=10, redundant=['DC1-DC1GW-eth2']) -WIM_SEP_DC1_SEC, WIM_MAP_DC1_SEC = mapping('DC1', DEV_DC1GW_EPS[1], DEV_CS1GW2_ID, priority=20, redundant=['DC1-DC1GW-eth1']) -WIM_SEP_DC2_PRI, WIM_MAP_DC2_PRI = mapping('DC2', DEV_DC2GW_EPS[0], DEV_CS2GW1_ID, priority=10, redundant=['DC2-DC2GW-eth2']) -WIM_SEP_DC2_SEC, WIM_MAP_DC2_SEC = mapping('DC2', DEV_DC2GW_EPS[1], DEV_CS2GW2_ID, priority=20, redundant=['DC2-DC2GW-eth1']) +WIM_SEP_DC1_PRI, WIM_MAP_DC1_PRI = mapping('DC1', DEV_DC1GW_EPS[0], DEV_CS1GW1_ID, priority=10, redundant=['DC1:DC1-GW:eth2']) +WIM_SEP_DC1_SEC, WIM_MAP_DC1_SEC = mapping('DC1', DEV_DC1GW_EPS[1], DEV_CS1GW2_ID, priority=20, redundant=['DC1:DC1-GW:eth1']) +WIM_SEP_DC2_PRI, WIM_MAP_DC2_PRI = mapping('DC2', DEV_DC2GW_EPS[0], DEV_CS2GW1_ID, priority=10, redundant=['DC2:DC2-GW:eth2']) +WIM_SEP_DC2_SEC, WIM_MAP_DC2_SEC = mapping('DC2', DEV_DC2GW_EPS[1], DEV_CS2GW2_ID, priority=20, redundant=['DC2:DC2-GW:eth1']) WIM_MAPPING = [WIM_MAP_DC1_PRI, WIM_MAP_DC1_SEC, WIM_MAP_DC2_PRI, WIM_MAP_DC2_SEC] @@ -186,23 +179,26 @@ CONTEXTS = [ CONTEXT ] TOPOLOGIES = [ TOPO_ADMIN, TOPO_DC1, TOPO_DC2, TOPO_CS1, TOPO_CS2, TOPO_TN ] DEVICES = [ DEV_DC1GW, DEV_DC2GW, DEV_CS1GW1, DEV_CS1GW2, DEV_CS2GW1, DEV_CS2GW2, - DEV_TNR1, DEV_TNR2, DEV_TNR3, DEV_TNR4 ] + DEV_TNR1, DEV_TNR2, DEV_TNR3, DEV_TNR4, + ] LINKS = [ LINK_DC1GW_CS1GW1, LINK_DC1GW_CS1GW2, LINK_DC2GW_CS2GW1, LINK_DC2GW_CS2GW2, LINK_CS1GW1_TNR1, LINK_CS1GW2_TNR2, LINK_CS1GW1_TNR2, LINK_CS1GW2_TNR1, LINK_CS2GW1_TNR3, LINK_CS2GW2_TNR4, LINK_CS2GW1_TNR4, LINK_CS2GW2_TNR3, - LINK_TNR1_TNR2, LINK_TNR2_TNR3, LINK_TNR3_TNR4, LINK_TNR4_TNR1, LINK_TNR1_TNR3, LINK_TNR2_TNR4 ] -#SERVICES = [ SERVICE_DC1GW_DC2GW ] + LINK_TNR1_TNR2, LINK_TNR2_TNR3, LINK_TNR3_TNR4, LINK_TNR4_TNR1, LINK_TNR1_TNR3, LINK_TNR2_TNR4, + ] OBJECTS_PER_TOPOLOGY = [ (TOPO_ADMIN_ID, [ DEV_DC1GW_ID, DEV_DC2GW_ID, DEV_CS1GW1_ID, DEV_CS1GW2_ID, DEV_CS2GW1_ID, DEV_CS2GW2_ID, - DEV_TNR1_ID, DEV_TNR2_ID, DEV_TNR3_ID, DEV_TNR4_ID ], + DEV_TNR1_ID, DEV_TNR2_ID, DEV_TNR3_ID, DEV_TNR4_ID, + ], [ LINK_DC1GW_CS1GW1_ID, LINK_DC1GW_CS1GW2_ID, LINK_DC2GW_CS2GW1_ID, LINK_DC2GW_CS2GW2_ID, LINK_CS1GW1_TNR1_ID, LINK_CS1GW2_TNR2_ID, LINK_CS1GW1_TNR2_ID, LINK_CS1GW2_TNR1_ID, LINK_CS2GW1_TNR3_ID, LINK_CS2GW2_TNR4_ID, LINK_CS2GW1_TNR4_ID, LINK_CS2GW2_TNR3_ID, LINK_TNR1_TNR2_ID, LINK_TNR2_TNR3_ID, LINK_TNR3_TNR4_ID, LINK_TNR4_TNR1_ID, LINK_TNR1_TNR3_ID, - LINK_TNR2_TNR4_ID ], + LINK_TNR2_TNR4_ID, + ], ), (TOPO_DC1_ID, [DEV_DC1GW_ID], @@ -217,7 +213,9 @@ OBJECTS_PER_TOPOLOGY = [ [DEV_CS2GW1_ID, DEV_CS2GW2_ID], []), (TOPO_TN_ID, - [DEV_TNR1_ID, DEV_TNR2_ID, DEV_TNR3_ID, DEV_TNR4_ID], - [LINK_TNR1_TNR2_ID, LINK_TNR2_TNR3_ID, LINK_TNR3_TNR4_ID, LINK_TNR4_TNR1_ID, LINK_TNR1_TNR3_ID, - LINK_TNR2_TNR4_ID]), + [ DEV_TNR1_ID, DEV_TNR2_ID, DEV_TNR3_ID, DEV_TNR4_ID, + ], + [ LINK_TNR1_TNR2_ID, LINK_TNR2_TNR3_ID, LINK_TNR3_TNR4_ID, LINK_TNR4_TNR1_ID, LINK_TNR1_TNR3_ID, + LINK_TNR2_TNR4_ID, + ]), ] diff --git a/src/tests/ecoc22/tests/Objects_DC_CSGW_TN_OLS.py b/src/tests/ecoc22/tests/Objects_DC_CSGW_TN_OLS.py new file mode 100644 index 000000000..5b1d01c30 --- /dev/null +++ b/src/tests/ecoc22/tests/Objects_DC_CSGW_TN_OLS.py @@ -0,0 +1,232 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import uuid +from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID +from common.tools.object_factory.Context import json_context, json_context_id +from common.tools.object_factory.Device import ( + json_device_emulated_connect_rules, json_device_emulated_datacenter_disabled, + json_device_emulated_packet_router_disabled, json_device_emulated_tapi_disabled, json_device_id) +from common.tools.object_factory.EndPoint import json_endpoints +from common.tools.object_factory.Link import get_link_uuid, json_link, json_link_id +from common.tools.object_factory.Service import get_service_uuid, json_service_l3nm_planned +from common.tools.object_factory.Topology import json_topology, json_topology_id + +def compose_router(device_uuid, endpoint_uuids, topology_id=None, with_connect_rules=True): + device_id = json_device_id(device_uuid) + r_endpoints = [(endpoint_uuid, 'copper', []) for endpoint_uuid in endpoint_uuids] + config_rules = json_device_emulated_connect_rules(r_endpoints) if with_connect_rules else [] + endpoints = json_endpoints(device_id, r_endpoints, topology_id=topology_id) + j_endpoints = [] if with_connect_rules else endpoints + device = json_device_emulated_packet_router_disabled(device_uuid, config_rules=config_rules, endpoints=j_endpoints) + return device_id, endpoints, device + +def compose_ols(device_uuid, endpoint_uuids, topology_id=None, with_connect_rules=True): + device_id = json_device_id(device_uuid) + r_endpoints = [(endpoint_uuid, 'optical', []) for endpoint_uuid in endpoint_uuids] + config_rules = json_device_emulated_connect_rules(r_endpoints) if with_connect_rules else [] + endpoints = json_endpoints(device_id, r_endpoints, topology_id=topology_id) + j_endpoints = [] if with_connect_rules else endpoints + device = json_device_emulated_tapi_disabled(device_uuid, config_rules=config_rules, endpoints=j_endpoints) + return device_id, endpoints, device + +def compose_datacenter(device_uuid, endpoint_uuids, topology_id=None, with_connect_rules=True): + device_id = json_device_id(device_uuid) + r_endpoints = [(endpoint_uuid, 'copper', []) for endpoint_uuid in endpoint_uuids] + config_rules = json_device_emulated_connect_rules(r_endpoints) if with_connect_rules else [] + endpoints = json_endpoints(device_id, r_endpoints, topology_id=topology_id) + j_endpoints = [] if with_connect_rules else endpoints + device = json_device_emulated_datacenter_disabled(device_uuid, config_rules=config_rules, endpoints=j_endpoints) + return device_id, endpoints, device + +def compose_link(endpoint_a, endpoint_z): + link_uuid = get_link_uuid(endpoint_a['endpoint_id'], endpoint_z['endpoint_id']) + link_id = json_link_id(link_uuid) + link = json_link(link_uuid, [endpoint_a['endpoint_id'], endpoint_z['endpoint_id']]) + return link_id, link + +def compose_service(endpoint_a, endpoint_z, constraints=[]): + service_uuid = get_service_uuid(endpoint_a['endpoint_id'], endpoint_z['endpoint_id']) + endpoint_ids = [endpoint_a['endpoint_id'], endpoint_z['endpoint_id']] + service = json_service_l3nm_planned(service_uuid, endpoint_ids=endpoint_ids, constraints=constraints) + return service + +# ----- Context -------------------------------------------------------------------------------------------------------- +CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID) +CONTEXT = json_context(DEFAULT_CONTEXT_UUID) + +# ----- Domains -------------------------------------------------------------------------------------------------------- +# Overall network topology +TOPO_ADMIN_UUID = DEFAULT_TOPOLOGY_UUID +TOPO_ADMIN_ID = json_topology_id(TOPO_ADMIN_UUID, context_id=CONTEXT_ID) +TOPO_ADMIN = json_topology(TOPO_ADMIN_UUID, context_id=CONTEXT_ID) + +# DataCenter #1 Network +TOPO_DC1_UUID = 'DC1' +TOPO_DC1_ID = json_topology_id(TOPO_DC1_UUID, context_id=CONTEXT_ID) +TOPO_DC1 = json_topology(TOPO_DC1_UUID, context_id=CONTEXT_ID) + +# DataCenter #2 Network +TOPO_DC2_UUID = 'DC2' +TOPO_DC2_ID = json_topology_id(TOPO_DC2_UUID, context_id=CONTEXT_ID) +TOPO_DC2 = json_topology(TOPO_DC2_UUID, context_id=CONTEXT_ID) + +# CellSite #1 Network +TOPO_CS1_UUID = 'CS1' +TOPO_CS1_ID = json_topology_id(TOPO_CS1_UUID, context_id=CONTEXT_ID) +TOPO_CS1 = json_topology(TOPO_CS1_UUID, context_id=CONTEXT_ID) + +# CellSite #2 Network +TOPO_CS2_UUID = 'CS2' +TOPO_CS2_ID = json_topology_id(TOPO_CS2_UUID, context_id=CONTEXT_ID) +TOPO_CS2 = json_topology(TOPO_CS2_UUID, context_id=CONTEXT_ID) + +# Transport Network Network +TOPO_TN_UUID = 'TN' +TOPO_TN_ID = json_topology_id(TOPO_TN_UUID, context_id=CONTEXT_ID) +TOPO_TN = json_topology(TOPO_TN_UUID, context_id=CONTEXT_ID) + + +# ----- Devices -------------------------------------------------------------------------------------------------------- +# DataCenters +DEV_DC1GW_ID, DEV_DC1GW_EPS, DEV_DC1GW = compose_datacenter('DC1-GW', ['eth1', 'eth2', 'int']) +DEV_DC2GW_ID, DEV_DC2GW_EPS, DEV_DC2GW = compose_datacenter('DC2-GW', ['eth1', 'eth2', 'int']) + +# CellSites +DEV_CS1GW1_ID, DEV_CS1GW1_EPS, DEV_CS1GW1 = compose_router('CS1-GW1', ['10/1', '1/1', '1/2']) +DEV_CS1GW2_ID, DEV_CS1GW2_EPS, DEV_CS1GW2 = compose_router('CS1-GW2', ['10/1', '1/1', '1/2']) +DEV_CS2GW1_ID, DEV_CS2GW1_EPS, DEV_CS2GW1 = compose_router('CS2-GW1', ['10/1', '1/1', '1/2']) +DEV_CS2GW2_ID, DEV_CS2GW2_EPS, DEV_CS2GW2 = compose_router('CS2-GW2', ['10/1', '1/1', '1/2']) + +# Transport Network +DEV_TNR1_ID, DEV_TNR1_EPS, DEV_TNR1 = compose_router('TN-R1', ['1/1', '1/2', '2/1']) +DEV_TNR2_ID, DEV_TNR2_EPS, DEV_TNR2 = compose_router('TN-R2', ['1/1', '1/2', '2/1']) +DEV_TNR3_ID, DEV_TNR3_EPS, DEV_TNR3 = compose_router('TN-R3', ['1/1', '1/2', '2/1']) +DEV_TNR4_ID, DEV_TNR4_EPS, DEV_TNR4 = compose_router('TN-R4', ['1/1', '1/2', '2/1']) +tols_ep_uuids = [str(uuid.uuid4()).split('-')[-1] for _ in range(4)] +DEV_TOLS_ID, DEV_TOLS_EPS, DEV_TOLS = compose_ols('TN-OLS', tols_ep_uuids) + + +# ----- Links ---------------------------------------------------------------------------------------------------------- +# InterDomain DC-CSGW +LINK_DC1GW_CS1GW1_ID, LINK_DC1GW_CS1GW1 = compose_link(DEV_DC1GW_EPS[0], DEV_CS1GW1_EPS[0]) +LINK_DC1GW_CS1GW2_ID, LINK_DC1GW_CS1GW2 = compose_link(DEV_DC1GW_EPS[1], DEV_CS1GW2_EPS[0]) +LINK_DC2GW_CS2GW1_ID, LINK_DC2GW_CS2GW1 = compose_link(DEV_DC2GW_EPS[0], DEV_CS2GW1_EPS[0]) +LINK_DC2GW_CS2GW2_ID, LINK_DC2GW_CS2GW2 = compose_link(DEV_DC2GW_EPS[1], DEV_CS2GW2_EPS[0]) + +# InterDomain CSGW-TN +LINK_CS1GW1_TNR1_ID, LINK_CS1GW1_TNR1 = compose_link(DEV_CS1GW1_EPS[1], DEV_TNR1_EPS[0]) +LINK_CS1GW2_TNR2_ID, LINK_CS1GW2_TNR2 = compose_link(DEV_CS1GW2_EPS[1], DEV_TNR2_EPS[0]) +LINK_CS1GW1_TNR2_ID, LINK_CS1GW1_TNR2 = compose_link(DEV_CS1GW1_EPS[2], DEV_TNR2_EPS[1]) +LINK_CS1GW2_TNR1_ID, LINK_CS1GW2_TNR1 = compose_link(DEV_CS1GW2_EPS[2], DEV_TNR1_EPS[1]) +LINK_CS2GW1_TNR3_ID, LINK_CS2GW1_TNR3 = compose_link(DEV_CS2GW1_EPS[1], DEV_TNR3_EPS[0]) +LINK_CS2GW2_TNR4_ID, LINK_CS2GW2_TNR4 = compose_link(DEV_CS2GW2_EPS[1], DEV_TNR4_EPS[0]) +LINK_CS2GW1_TNR4_ID, LINK_CS2GW1_TNR4 = compose_link(DEV_CS2GW1_EPS[2], DEV_TNR4_EPS[1]) +LINK_CS2GW2_TNR3_ID, LINK_CS2GW2_TNR3 = compose_link(DEV_CS2GW2_EPS[2], DEV_TNR3_EPS[1]) + +# IntraDomain TN +LINK_TNR1_TOLS_ID, LINK_TNR1_TOLS = compose_link(DEV_TNR1_EPS[2], DEV_TOLS_EPS[0]) +LINK_TNR2_TOLS_ID, LINK_TNR2_TOLS = compose_link(DEV_TNR2_EPS[2], DEV_TOLS_EPS[1]) +LINK_TNR3_TOLS_ID, LINK_TNR3_TOLS = compose_link(DEV_TNR3_EPS[2], DEV_TOLS_EPS[2]) +LINK_TNR4_TOLS_ID, LINK_TNR4_TOLS = compose_link(DEV_TNR4_EPS[2], DEV_TOLS_EPS[3]) + + +# ----- WIM Service Settings ------------------------------------------------------------------------------------------- +WIM_USERNAME = 'admin' +WIM_PASSWORD = 'admin' + +def mapping(site_id, ce_endpoint_id, pe_device_id, priority=None, redundant=[]): + ce_endpoint_id = ce_endpoint_id['endpoint_id'] + ce_device_uuid = ce_endpoint_id['device_id']['device_uuid']['uuid'] + ce_endpoint_uuid = ce_endpoint_id['endpoint_uuid']['uuid'] + pe_device_uuid = pe_device_id['device_uuid']['uuid'] + service_endpoint_id = '{:s}:{:s}:{:s}'.format(site_id, ce_device_uuid, ce_endpoint_uuid) + bearer = '{:s}:{:s}'.format(ce_device_uuid, pe_device_uuid) + _mapping = { + 'service_endpoint_id': service_endpoint_id, + 'datacenter_id': site_id, 'device_id': ce_device_uuid, 'device_interface_id': ce_endpoint_uuid, + 'service_mapping_info': { + 'site-id': site_id, + 'bearer': {'bearer-reference': bearer}, + } + } + if priority is not None: _mapping['service_mapping_info']['priority'] = priority + if len(redundant) > 0: _mapping['service_mapping_info']['redundant'] = redundant + return service_endpoint_id, _mapping + +WIM_SEP_DC1_PRI, WIM_MAP_DC1_PRI = mapping('DC1', DEV_DC1GW_EPS[0], DEV_CS1GW1_ID, priority=10, redundant=['DC1:DC1-GW:eth2']) +WIM_SEP_DC1_SEC, WIM_MAP_DC1_SEC = mapping('DC1', DEV_DC1GW_EPS[1], DEV_CS1GW2_ID, priority=20, redundant=['DC1:DC1-GW:eth1']) +WIM_SEP_DC2_PRI, WIM_MAP_DC2_PRI = mapping('DC2', DEV_DC2GW_EPS[0], DEV_CS2GW1_ID, priority=10, redundant=['DC2:DC2-GW:eth2']) +WIM_SEP_DC2_SEC, WIM_MAP_DC2_SEC = mapping('DC2', DEV_DC2GW_EPS[1], DEV_CS2GW2_ID, priority=20, redundant=['DC2:DC2-GW:eth1']) + +WIM_MAPPING = [WIM_MAP_DC1_PRI, WIM_MAP_DC1_SEC, WIM_MAP_DC2_PRI, WIM_MAP_DC2_SEC] + +WIM_SRV_VLAN_ID = 300 +WIM_SERVICE_TYPE = 'ELAN' +WIM_SERVICE_CONNECTION_POINTS = [ + {'service_endpoint_id': WIM_SEP_DC1_PRI, + 'service_endpoint_encapsulation_type': 'dot1q', + 'service_endpoint_encapsulation_info': {'vlan': WIM_SRV_VLAN_ID}}, + {'service_endpoint_id': WIM_SEP_DC2_PRI, + 'service_endpoint_encapsulation_type': 'dot1q', + 'service_endpoint_encapsulation_info': {'vlan': WIM_SRV_VLAN_ID}}, +] + + +# ----- Containers ----------------------------------------------------------------------------------------------------- +CONTEXTS = [ CONTEXT ] +TOPOLOGIES = [ TOPO_ADMIN, TOPO_DC1, TOPO_DC2, TOPO_CS1, TOPO_CS2, TOPO_TN ] +DEVICES = [ DEV_DC1GW, DEV_DC2GW, + DEV_CS1GW1, DEV_CS1GW2, DEV_CS2GW1, DEV_CS2GW2, + DEV_TNR1, DEV_TNR2, DEV_TNR3, DEV_TNR4, + DEV_TOLS, + ] +LINKS = [ LINK_DC1GW_CS1GW1, LINK_DC1GW_CS1GW2, LINK_DC2GW_CS2GW1, LINK_DC2GW_CS2GW2, + LINK_CS1GW1_TNR1, LINK_CS1GW2_TNR2, LINK_CS1GW1_TNR2, LINK_CS1GW2_TNR1, + LINK_CS2GW1_TNR3, LINK_CS2GW2_TNR4, LINK_CS2GW1_TNR4, LINK_CS2GW2_TNR3, + LINK_TNR1_TOLS, LINK_TNR2_TOLS, LINK_TNR3_TOLS, LINK_TNR4_TOLS, + ] + +OBJECTS_PER_TOPOLOGY = [ + (TOPO_ADMIN_ID, + [ DEV_DC1GW_ID, DEV_DC2GW_ID, + DEV_CS1GW1_ID, DEV_CS1GW2_ID, DEV_CS2GW1_ID, DEV_CS2GW2_ID, + DEV_TNR1_ID, DEV_TNR2_ID, DEV_TNR3_ID, DEV_TNR4_ID, + DEV_TOLS_ID, + ], + [ LINK_DC1GW_CS1GW1_ID, LINK_DC1GW_CS1GW2_ID, LINK_DC2GW_CS2GW1_ID, LINK_DC2GW_CS2GW2_ID, + LINK_CS1GW1_TNR1_ID, LINK_CS1GW2_TNR2_ID, LINK_CS1GW1_TNR2_ID, LINK_CS1GW2_TNR1_ID, + LINK_CS2GW1_TNR3_ID, LINK_CS2GW2_TNR4_ID, LINK_CS2GW1_TNR4_ID, LINK_CS2GW2_TNR3_ID, + LINK_TNR1_TOLS_ID, LINK_TNR2_TOLS_ID, LINK_TNR3_TOLS_ID, LINK_TNR4_TOLS_ID, + ], + ), + (TOPO_DC1_ID, + [DEV_DC1GW_ID], + []), + (TOPO_DC2_ID, + [DEV_DC2GW_ID], + []), + (TOPO_CS1_ID, + [DEV_CS1GW1_ID, DEV_CS1GW2_ID], + []), + (TOPO_CS2_ID, + [DEV_CS2GW1_ID, DEV_CS2GW2_ID], + []), + (TOPO_TN_ID, + [ DEV_TNR1_ID, DEV_TNR2_ID, DEV_TNR3_ID, DEV_TNR4_ID, + DEV_TOLS_ID, + ], + [ LINK_TNR1_TOLS_ID, LINK_TNR2_TOLS_ID, LINK_TNR3_TOLS_ID, LINK_TNR4_TOLS_ID, + ]), +] diff --git a/src/tests/ecoc22/tests/test_functional_bootstrap.py b/src/tests/ecoc22/tests/test_functional_bootstrap.py index d4e05f53d..2e5e0daf8 100644 --- a/src/tests/ecoc22/tests/test_functional_bootstrap.py +++ b/src/tests/ecoc22/tests/test_functional_bootstrap.py @@ -12,13 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -import copy, logging -from common.proto.context_pb2 import Context, ContextId, Device, Empty, Link, Topology +import logging +from common.proto.context_pb2 import Context, ContextId, Device, DeviceId, Empty, Link, LinkId, Topology, TopologyId from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient from .Fixtures import context_client, device_client #from .Objects_OldBigNet import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES -from .Objects_DC_CSGW_TN import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES +from .Objects_DC_CSGW_TN import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, OBJECTS_PER_TOPOLOGY, TOPOLOGIES LOGGER = logging.getLogger(__name__) @@ -37,83 +37,44 @@ def test_scenario_empty(context_client : ContextClient): # pylint: disable=rede assert len(response.links) == 0 -def test_prepare_scenario(context_client : ContextClient): # pylint: disable=redefined-outer-name +def test_prepare_environment( + context_client : ContextClient, # pylint: disable=redefined-outer-name + device_client : DeviceClient): # pylint: disable=redefined-outer-name - # ----- Create Contexts and Topologies ----------------------------------------------------------------------------- - for context in CONTEXTS: - context_uuid = context['context_id']['context_uuid']['uuid'] - LOGGER.info('Adding Context {:s}'.format(context_uuid)) - response = context_client.SetContext(Context(**context)) - assert response.context_uuid.uuid == context_uuid + for context in CONTEXTS : context_client.SetContext (Context (**context )) + for topology in TOPOLOGIES: context_client.SetTopology(Topology(**topology)) + for device in DEVICES : device_client .AddDevice (Device (**device )) - for topology in TOPOLOGIES: - context_uuid = topology['topology_id']['context_id']['context_uuid']['uuid'] - topology_uuid = topology['topology_id']['topology_uuid']['uuid'] - LOGGER.info('Adding Topology {:s}/{:s}'.format(context_uuid, topology_uuid)) - response = context_client.SetTopology(Topology(**topology)) - assert response.context_id.context_uuid.uuid == context_uuid - assert response.topology_uuid.uuid == topology_uuid + for topology_id, device_ids, _ in OBJECTS_PER_TOPOLOGY: + topology = Topology() + topology.CopyFrom(context_client.GetTopology(TopologyId(**topology_id))) + device_ids_in_topology = {device_id.device_uuid.uuid for device_id in topology.device_ids} + func_device_id_not_added = lambda device_id: device_id['device_uuid']['uuid'] not in device_ids_in_topology + func_device_id_json_to_grpc = lambda device_id: DeviceId(**device_id) + device_ids_to_add = list(map(func_device_id_json_to_grpc, filter(func_device_id_not_added, device_ids))) + topology.device_ids.extend(device_ids_to_add) -def test_scenario_ready(context_client : ContextClient): # pylint: disable=redefined-outer-name - # ----- List entities - Ensure scenario is ready ------------------------------------------------------------------- - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == len(CONTEXTS) - - response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) - assert len(response.topologies) == len(TOPOLOGIES) - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == 0 + context_client.SetTopology(topology) - response = context_client.ListLinks(Empty()) - assert len(response.links) == 0 - response = context_client.ListServices(ContextId(**CONTEXT_ID)) - assert len(response.services) == 0 + for link in LINKS : context_client.SetLink (Link (**link )) + for topology_id, _, link_ids in OBJECTS_PER_TOPOLOGY: + topology = Topology() + topology.CopyFrom(context_client.GetTopology(TopologyId(**topology_id))) -def test_devices_bootstraping( - context_client : ContextClient, device_client : DeviceClient): # pylint: disable=redefined-outer-name + link_ids_in_topology = {link_id.link_uuid.uuid for link_id in topology.link_ids} + func_link_id_not_added = lambda link_id: link_id['link_uuid']['uuid'] not in link_ids_in_topology + func_link_id_json_to_grpc = lambda link_id: LinkId(**link_id) + link_ids_to_add = list(map(func_link_id_json_to_grpc, filter(func_link_id_not_added, link_ids))) + topology.link_ids.extend(link_ids_to_add) - # ----- Create Devices and Validate Collected Events --------------------------------------------------------------- - for device in DEVICES: - device_uuid = device['device_id']['device_uuid']['uuid'] - LOGGER.info('Adding Device {:s}'.format(device_uuid)) - response = device_client.AddDevice(Device(**device)) - assert response.device_uuid.uuid == device_uuid + context_client.SetTopology(topology) -def test_devices_bootstrapped(context_client : ContextClient): # pylint: disable=redefined-outer-name - # ----- List entities - Ensure bevices are created ----------------------------------------------------------------- - response = context_client.ListContexts(Empty()) - assert len(response.contexts) == len(CONTEXTS) - - response = context_client.ListTopologies(ContextId(**CONTEXT_ID)) - assert len(response.topologies) == len(TOPOLOGIES) - - response = context_client.ListDevices(Empty()) - assert len(response.devices) == len(DEVICES) - - response = context_client.ListLinks(Empty()) - assert len(response.links) == 0 - - response = context_client.ListServices(ContextId(**CONTEXT_ID)) - assert len(response.services) == 0 - - -def test_links_creation(context_client : ContextClient): # pylint: disable=redefined-outer-name - - # ----- Create Links and Validate Collected Events ----------------------------------------------------------------- - for link in LINKS: - link_uuid = link['link_id']['link_uuid']['uuid'] - LOGGER.info('Adding Link {:s}'.format(link_uuid)) - response = context_client.SetLink(Link(**link)) - assert response.link_uuid.uuid == link_uuid - - -def test_links_created(context_client : ContextClient): # pylint: disable=redefined-outer-name - # ----- List entities - Ensure links are created ------------------------------------------------------------------- +def test_scenario_ready(context_client : ContextClient): # pylint: disable=redefined-outer-name + # ----- List entities - Ensure scenario is ready ------------------------------------------------------------------- response = context_client.ListContexts(Empty()) assert len(response.contexts) == len(CONTEXTS) diff --git a/src/tests/ecoc22/tests/test_functional_delete_service.py b/src/tests/ecoc22/tests/test_functional_delete_service.py index d8a943db3..c24d70185 100644 --- a/src/tests/ecoc22/tests/test_functional_delete_service.py +++ b/src/tests/ecoc22/tests/test_functional_delete_service.py @@ -33,7 +33,7 @@ LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) DEVTYPE_EMU_PR = DeviceTypeEnum.EMULATED_PACKET_ROUTER.value -DEVTYPE_EMU_OLS = DeviceTypeEnum.EMULATED_OPTICAL_LINE_SYSTEM.value +DEVTYPE_EMU_OLS = DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM.value @pytest.fixture(scope='session') -- GitLab From f30968528e2bc7b84b833e028cee1da044631635 Mon Sep 17 00:00:00 2001 From: Lluis Gifre Date: Sun, 11 Sep 2022 14:56:31 +0000 Subject: [PATCH 25/91] Slice component: - Added methods to copy endpoints, constraints and config rules from request to slice stored in Context component. --- src/slice/service/SliceServiceServicerImpl.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/slice/service/SliceServiceServicerImpl.py b/src/slice/service/SliceServiceServicerImpl.py index 8c70b5e5a..aaa076b7b 100644 --- a/src/slice/service/SliceServiceServicerImpl.py +++ b/src/slice/service/SliceServiceServicerImpl.py @@ -99,6 +99,10 @@ class SliceServiceServicerImpl(SliceServiceServicer): if service_reply != service_request.service_id: # pylint: disable=no-member raise Exception('Service update failed. Wrong Service Id was returned') + copy_endpoint_ids(request.slice_endpoint_ids, slice_request.slice_endpoint_ids) + copy_constraints(request.slice_constraints, slice_request.slice_constraints) + copy_config_rules(request.slice_config.config_rules, slice_request.slice_config.config_rules) + update_service_ids(slice_request.slice_service_ids, context_uuid, service_uuid) context_client.SetSlice(slice_request) slice_id = slice_request.slice_id -- GitLab From 06898aae4eca1a7f8386f5720d32634ad98de4b6 Mon Sep 17 00:00:00 2001 From: Lluis Gifre Date: Sun, 11 Sep 2022 15:10:29 +0000 Subject: [PATCH 26/91] Service component: - Renamed DependencyResolver to TaskScheduler - Enhanced TaskScheduler with an extensible Task API framework and extensible dependency resolver - Created TaskExecutor providing an extensible task execution environment - Removed in-memory database; now is stateless and interrogates/updates Context when needed - Extended gRPC servicer to use new TaskScheduler - Removed unneeded files and re-organized helper methods - Extended ServiceHandlerAPI; now SetEndpoint/DeleteEndpoint enables to configure/deconfigure a connection - Moved ServiceHandlerFactory-related methods to the appropriate file - Created ConnectionExpander to resolve from ERO-like paths to lists of links - Created basic tasks: ServiceSetState, ServiceDelete, ConnectionConfigure, ConnectionDeconfigure - Added skeleton for L2NMEmulatedHandler (to be adapted, now is a copy of L3NM one) --- scripts/run_tests_locally-service.sh | 2 +- src/service/service/DependencyResolver.py | 73 ---- src/service/service/ServiceService.py | 6 +- .../service/ServiceServiceServicerImpl.py | 145 +++---- src/service/service/Tools.py | 358 ------------------ .../service_handler_api/FilterFields.py | 23 +- .../ServiceHandlerFactory.py | 41 +- .../service_handler_api/_ServiceHandler.py | 12 +- .../service/service_handlers/__init__.py | 19 +- .../l2nm_emulated/ConfigRules.py | 268 +++++++++++++ .../L2NMEmulatedServiceHandler.py | 296 ++------------- .../task_scheduler/ConnectionExpander.py | 66 ++++ .../service/task_scheduler/TaskExecutor.py | 142 +++++++ .../service/task_scheduler/TaskScheduler.py | 179 +++++++++ .../service/task_scheduler/__init__.py | 51 +++ .../tasks/Task_ConnectionConfigure.py | 63 +++ .../tasks/Task_ConnectionDeconfigure.py | 63 +++ .../tasks/Task_ServiceDelete.py | 39 ++ .../tasks/Task_ServiceSetStatus.py | 46 +++ .../service/task_scheduler/tasks/_Task.py | 30 ++ .../service/task_scheduler/tasks/__init__.py | 14 + src/service/service/tools/ContextGetters.py | 42 ++ .../service/tools/EndpointIdFormatters.py | 27 ++ src/service/service/tools/ObjectKeys.py | 26 ++ src/service/service/tools/__init__.py | 14 + ...lver.py => test_unitary_task_scheduler.py} | 60 ++- 26 files changed, 1276 insertions(+), 829 deletions(-) delete mode 100644 src/service/service/DependencyResolver.py delete mode 100644 src/service/service/Tools.py create mode 100644 src/service/service/service_handlers/l2nm_emulated/ConfigRules.py create mode 100644 src/service/service/task_scheduler/ConnectionExpander.py create mode 100644 src/service/service/task_scheduler/TaskExecutor.py create mode 100644 src/service/service/task_scheduler/TaskScheduler.py create mode 100644 src/service/service/task_scheduler/__init__.py create mode 100644 src/service/service/task_scheduler/tasks/Task_ConnectionConfigure.py create mode 100644 src/service/service/task_scheduler/tasks/Task_ConnectionDeconfigure.py create mode 100644 src/service/service/task_scheduler/tasks/Task_ServiceDelete.py create mode 100644 src/service/service/task_scheduler/tasks/Task_ServiceSetStatus.py create mode 100644 src/service/service/task_scheduler/tasks/_Task.py create mode 100644 src/service/service/task_scheduler/tasks/__init__.py create mode 100644 src/service/service/tools/ContextGetters.py create mode 100644 src/service/service/tools/EndpointIdFormatters.py create mode 100644 src/service/service/tools/ObjectKeys.py create mode 100644 src/service/service/tools/__init__.py rename src/service/tests/{test_unitary_dependency_resolver.py => test_unitary_task_scheduler.py} (65%) diff --git a/scripts/run_tests_locally-service.sh b/scripts/run_tests_locally-service.sh index e2ccc3ebe..8816b9faa 100755 --- a/scripts/run_tests_locally-service.sh +++ b/scripts/run_tests_locally-service.sh @@ -21,5 +21,5 @@ RCFILE=$PROJECTDIR/coverage/.coveragerc # Run unitary tests and analyze coverage of code at same time coverage run --rcfile=$RCFILE --append -m pytest --log-level=INFO --verbose \ - service/tests/test_unitary_dependency_resolver.py \ + service/tests/test_unitary_task_scheduler.py \ service/tests/test_unitary.py diff --git a/src/service/service/DependencyResolver.py b/src/service/service/DependencyResolver.py deleted file mode 100644 index 0bf5923c8..000000000 --- a/src/service/service/DependencyResolver.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import graphlib -from enum import Enum -from typing import Dict, List, Optional, Tuple, Union -from common.proto.context_pb2 import Connection, ConnectionId, Service, ServiceId -from common.proto.pathcomp_pb2 import PathCompReply - -# Compose Directed Acyclic Graph of dependencies between connections and services -# retrieved by PathComp to create them in the appropriate order. - -class ObjectType(Enum): - CONNECTION = 'connection' - SERVICE = 'service' - -ObjectKey = Tuple[ObjectType, str] -ObjectId = Union[ConnectionId, ServiceId] -ObjectData = Union[Connection, Service] -ObjectItem = Tuple[ObjectId, Optional[ObjectData]] -ObjectDict = Dict[ObjectKey, ObjectItem] -Resolution = List[Tuple[ObjectKey, ObjectItem]] - -def get_connection_key(connection_id : ConnectionId) -> ObjectKey: - connection_uuid = connection_id.connection_uuid.uuid - return ObjectType.CONNECTION.value, connection_uuid - -def get_service_key(service_id : ServiceId) -> ObjectKey: - context_uuid = service_id.context_id.context_uuid.uuid - service_uuid = service_id.service_uuid.uuid - return ObjectType.SERVICE.value, '/'.join([context_uuid, service_uuid]) - -def resolve_dependencies(pathcomp_reply : PathCompReply) -> Resolution: - dag = graphlib.TopologicalSorter() - objects : ObjectDict = dict() - - for service in pathcomp_reply.services: - service_key = get_service_key(service.service_id) - objects[service_key] = (service.service_id, service) - - for connection in pathcomp_reply.connections: - connection_key = get_connection_key(connection.connection_id) - objects[connection_key] = (connection.connection_id, connection) - - # the connection's service depends on the connection - service_key = get_service_key(connection.service_id) - dag.add(service_key, connection_key) - if service_key not in objects: objects[service_key] = (connection.service_id, None) - - # the connection depends on these sub-services - for sub_service_id in connection.sub_service_ids: - sub_service_key = get_service_key(sub_service_id) - dag.add(connection_key, sub_service_key) - if sub_service_key not in objects: objects[sub_service_key] = (sub_service_id, None) - - resolution : Resolution = list() - for item_key in dag.static_order(): - item_tuple = objects.get(item_key) - resolution.append((item_key, item_tuple)) - - return resolution diff --git a/src/service/service/ServiceService.py b/src/service/service/ServiceService.py index b15237625..2f44fe018 100644 --- a/src/service/service/ServiceService.py +++ b/src/service/service/ServiceService.py @@ -14,9 +14,6 @@ from common.Constants import ServiceNameEnum from common.Settings import get_service_port_grpc -from common.orm.backend.BackendEnum import BackendEnum -from common.orm.Database import Database -from common.orm.Factory import get_database_backend from common.proto.service_pb2_grpc import add_ServiceServiceServicer_to_server from common.tools.service.GenericGrpcService import GenericGrpcService from .ServiceServiceServicerImpl import ServiceServiceServicerImpl @@ -26,8 +23,7 @@ class ServiceService(GenericGrpcService): def __init__(self, service_handler_factory : ServiceHandlerFactory, cls_name: str = __name__) -> None: port = get_service_port_grpc(ServiceNameEnum.SERVICE) super().__init__(port, cls_name=cls_name) - database = Database(get_database_backend(backend=BackendEnum.INMEMORY)) - self.service_servicer = ServiceServiceServicerImpl(database, service_handler_factory) + self.service_servicer = ServiceServiceServicerImpl(service_handler_factory) def install_servicers(self): add_ServiceServiceServicer_to_server(self.service_servicer, self.server) diff --git a/src/service/service/ServiceServiceServicerImpl.py b/src/service/service/ServiceServiceServicerImpl.py index 2591f5bda..bc71168f6 100644 --- a/src/service/service/ServiceServiceServicerImpl.py +++ b/src/service/service/ServiceServiceServicerImpl.py @@ -12,30 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict, List -import graphlib, grpc, json, logging -from common.orm.Database import Database -from common.orm.HighLevel import get_object -from common.orm.backend.Tools import key_to_str -from common.proto.context_pb2 import ConnectionId, Empty, Service, ServiceId +import grpc, json, logging +from typing import Optional +from common.proto.context_pb2 import Empty, Service, ServiceId, ServiceStatusEnum from common.proto.pathcomp_pb2 import PathCompRequest from common.proto.service_pb2_grpc import ServiceServiceServicer from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method -from common.rpc_method_wrapper.ServiceExceptions import AlreadyExistsException, InvalidArgumentException, NotFoundException, NotImplementedException +from common.rpc_method_wrapper.ServiceExceptions import AlreadyExistsException, InvalidArgumentException from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_string -from common.tools.object_factory.Connection import json_connection_id -from common.tools.object_factory.Service import json_service_id from context.client.ContextClient import ContextClient -from device.client.DeviceClient import DeviceClient from pathcomp.frontend.client.PathCompClient import PathCompClient -from service.service.DependencyResolver import ObjectType, resolve_dependencies -from service.service.database.DeviceModel import DeviceModel -from .database.DatabaseServiceTools import ( - sync_service_from_context, sync_service_to_context, update_service_in_local_database) -from .database.ServiceModel import ServiceModel -from .path_computation_element.PathComputationElement import PathComputationElement, dump_connectivity +from service.service.tools.ContextGetters import get_service from .service_handler_api.ServiceHandlerFactory import ServiceHandlerFactory -from .Tools import delete_service, get_connection, get_service, sync_devices_from_context, update_service +from .task_scheduler.TaskScheduler import TasksScheduler LOGGER = logging.getLogger(__name__) @@ -44,9 +33,8 @@ METHOD_NAMES = ['CreateService', 'UpdateService', 'DeleteService'] METRICS = create_metrics(SERVICE_NAME, METHOD_NAMES) class ServiceServiceServicerImpl(ServiceServiceServicer): - def __init__(self, database : Database, service_handler_factory : ServiceHandlerFactory) -> None: + def __init__(self, service_handler_factory : ServiceHandlerFactory) -> None: LOGGER.debug('Creating Servicer...') - self.database = database self.service_handler_factory = service_handler_factory LOGGER.debug('Servicer Created') @@ -96,7 +84,7 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): raise AlreadyExistsException( 'Service', service_uuid, extra_details='context_uuid={:s}'.format(str(context_uuid))) - # just create the service in the database to lock the service_id + # just create the service in the Context database to lock the service_id # update will perform changes on the resources service_id = context_client.SetService(request) return service_id @@ -105,82 +93,63 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): def UpdateService(self, request : Service, context : grpc.ServicerContext) -> ServiceId: LOGGER.info('[UpdateService] begin ; request = {:s}'.format(grpc_message_to_json_string(request))) - service_id = request.service_id - service_uuid = service_id.service_uuid.uuid - service_context_uuid = service_id.context_id.context_uuid.uuid - - pathcomp_request = PathCompRequest() - pathcomp_request.services.append(request) - pathcomp_request.services.k_shortest_path.k_inspection = 5 - pathcomp_request.services.k_shortest_path.k_return = 5 - - pathcomp = PathCompClient() - pathcomp_response = pathcomp.Compute(pathcomp_request) - - # convert from a unordered lists of services and connections to a list of ordered items - # that fulfill interdependencies among them. E.g., a service cannot be created if connections - # supporting that service still does not exist. - resolution = resolve_dependencies(pathcomp_response) - - # implement changes + # Set service status to "SERVICESTATUS_PLANNED" to ensure rest of components are aware the service is + # being modified. context_client = ContextClient() - device_client = DeviceClient() - for (obj_type, obj_key), (grpc_objid, grpc_obj) in resolution: - if grpc_obj is None: - # check if the resource already exists - if obj_type == ObjectType.CONNECTION.value: - connection = get_connection(context_client, grpc_objid) - if connection is None: raise NotFoundException('Connection', obj_key) - elif obj_type == ObjectType.SERVICE.value: - service = get_service(context_client, grpc_objid) - if service is None: raise NotFoundException('Service', obj_key) - else: - MSG_EXTRA_DETAILS = 'obj_type={:s} obj_key={:s} grpc_objid={:s} grpc_obj={:s}' - str_grpc_obj = 'None' if grpc_obj is None else grpc_message_to_json_string(grpc_obj) - str_grpc_objid = 'None' if grpc_objid is None else grpc_message_to_json_string(grpc_objid) - msg_extra_details = MSG_EXTRA_DETAILS.format(obj_type, obj_key, str_grpc_objid, str_grpc_obj) - raise NotImplementedException('Empty Dependency', extra_details=msg_extra_details) + _service : Optional[Service] = get_service(context_client, request.service_id) + service = Service() + service.CopyFrom(request if _service is None else _service) + service.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PLANNED + context_client.SetService(service) + + num_disjoint_paths = None + for constraint in request.service_constraints: + if constraint.WhichOneof('constraint') == 'sla_availability': + num_disjoint_paths = constraint.sla_availability.num_disjoint_paths + break + + tasks_scheduler = TasksScheduler(self.service_handler_factory) + if len(request.service_endpoint_ids) >= (2 if num_disjoint_paths is None else 4): + pathcomp_request = PathCompRequest() + pathcomp_request.services.append(request) + + if num_disjoint_paths is None: + pathcomp_request.shortest_path.Clear() else: - # create/update the resource - if obj_type == ObjectType.CONNECTION.value: - update_connection(context_client, device_client, self.service_handler_factory, grpc_obj) - context_client.SetConnection(grpc_obj) - elif obj_type == ObjectType.SERVICE.value: - update_service(context_client, device_client, self.service_handler_factory, grpc_obj) - context_client.SetService(grpc_obj) - else: - MSG_EXTRA_DETAILS = 'obj_type={:s} obj_key={:s} grpc_objid={:s} grpc_obj={:s}' - str_grpc_obj = 'None' if grpc_obj is None else grpc_message_to_json_string(grpc_obj) - str_grpc_objid = 'None' if grpc_objid is None else grpc_message_to_json_string(grpc_objid) - msg_extra_details = MSG_EXTRA_DETAILS.format(obj_type, obj_key, str_grpc_objid, str_grpc_obj) - raise NotImplementedException('Specified Dependency', extra_details=msg_extra_details) + pathcomp_request.k_disjoint_path.num_disjoint = num_disjoint_paths + pathcomp = PathCompClient() + LOGGER.info('pathcomp_request={:s}'.format(grpc_message_to_json_string(pathcomp_request))) + pathcomp_reply = pathcomp.Compute(pathcomp_request) + LOGGER.info('pathcomp_reply={:s}'.format(grpc_message_to_json_string(pathcomp_reply))) + + # Feed TaskScheduler with this path computation reply. TaskScheduler identifies inter-dependencies among + # the services and connections retrieved and produces a schedule of tasks (an ordered list of tasks to be + # executed) to implement the requested create/update operation. + tasks_scheduler.compose_from_pathcompreply(pathcomp_reply, is_delete=False) + + tasks_scheduler.execute_all() return request.service_id @safe_and_metered_rpc_method(METRICS, LOGGER) def DeleteService(self, request : ServiceId, context : grpc.ServicerContext) -> Empty: LOGGER.info('[DeleteService] begin ; request = {:s}'.format(grpc_message_to_json_string(request))) - pce = PathComputationElement() - pce.load_topology(self.context_client) - pce.load_connectivity(self.context_client, request) - #pce.dump_topology_to_file('../data/topo.dot') - #pce.dump_connectivity_to_file('../data/conn-before.txt') - connectivity = pce.get_connectivity_from_service_id(request) - if connectivity is None: return Empty() - #pce.dump_connectivity_to_file('../data/conn-after.txt') - - LOGGER.info('[DeleteService] connectivity = {:s}'.format(str(dump_connectivity(connectivity)))) - - for connection in connectivity.get('connections'): - delete_service( - self.database, self.context_client, self.device_client, self.service_handler_factory, - request, connection) - - for sub_service, sub_connections in connectivity.get('requirements', []): - for sub_connection in sub_connections: - delete_service( - self.database, self.context_client, self.device_client, self.service_handler_factory, - sub_service.service_id, sub_connection) + context_client = ContextClient() + # Set service status to "SERVICESTATUS_PENDING_REMOVAL" to ensure rest of components are aware the service is + # being modified. + _service : Optional[Service] = get_service(context_client, request) + if _service is None: raise Exception('Service({:s}) not found'.format(grpc_message_to_json_string(request))) + service = Service() + service.CopyFrom(_service) + service.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PENDING_REMOVAL + context_client.SetService(service) + + # Feed TaskScheduler with this service and the sub-services and sub-connections related to this service. + # TaskScheduler identifies inter-dependencies among them and produces a schedule of tasks (an ordered list of + # tasks to be executed) to implement the requested delete operation. + tasks_scheduler = TasksScheduler(self.service_handler_factory) + tasks_scheduler.compose_from_service(service, is_delete=True) + tasks_scheduler.execute_all() return Empty() diff --git a/src/service/service/Tools.py b/src/service/service/Tools.py deleted file mode 100644 index ea4369fd5..000000000 --- a/src/service/service/Tools.py +++ /dev/null @@ -1,358 +0,0 @@ -# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import grpc, logging -from typing import Any, Dict, List, Optional, Tuple -from common.orm.Database import Database -from common.orm.HighLevel import get_object, get_related_objects -from common.orm.backend.Tools import key_to_str -from common.proto.context_pb2 import ( - ConfigRule, Connection, ConnectionId, Constraint, EndPointId, Service, ServiceId, ServiceStatusEnum) -from common.rpc_method_wrapper.ServiceExceptions import ( - InvalidArgumentException, NotFoundException, OperationFailedException) -from context.client.ContextClient import ContextClient -from device.client.DeviceClient import DeviceClient -from .database.ConfigModel import ( - ConfigModel, ConfigRuleModel, ORM_ConfigActionEnum, get_config_rules, grpc_config_rules_to_raw) -from .database.ConstraintModel import ConstraintModel, ConstraintsModel, get_constraints, grpc_constraints_to_raw -from .database.DatabaseDeviceTools import sync_device_from_context -from .database.DatabaseServiceTools import ( - delete_service_from_context, sync_service_from_context, sync_service_to_context, update_service_in_local_database) -from .database.DeviceModel import DeviceModel, DriverModel -from .database.EndPointModel import EndPointModel, grpc_endpointids_to_raw -from .database.RelationModels import ServiceEndPointModel -from .database.ServiceModel import ServiceModel -from .service_handler_api._ServiceHandler import _ServiceHandler -from .service_handler_api.FilterFields import FilterFieldEnum -from .service_handler_api.ServiceHandlerFactory import ServiceHandlerFactory -from .service_handler_api.Tools import ( - check_errors_deleteconfig, check_errors_deleteconstraint, check_errors_deleteendpoint, check_errors_setconfig, - check_errors_setconstraint, check_errors_setendpoint) - -LOGGER = logging.getLogger(__name__) - -def get_connection(context_client : ContextClient, connection_id : ConnectionId) -> Optional[Connection]: - try: - connection : Connection = context_client.GetConnection(connection_id) - return connection - except grpc.RpcError as e: - if e.code() != grpc.StatusCode.NOT_FOUND: raise # pylint: disable=no-member - return None - -def get_service(context_client : ContextClient, service_id : ServiceId) -> Optional[Service]: - try: - service : Service = context_client.GetService(service_id) - return service - except grpc.RpcError as e: - if e.code() != grpc.StatusCode.NOT_FOUND: raise # pylint: disable=no-member - return None - -def sync_devices_from_context( - context_client : ContextClient, database : Database, db_service : Optional[ServiceModel], - service_endpoint_ids : List[EndPointId] - ) -> Dict[str, DeviceModel]: - - required_device_uuids = set() - if db_service is not None: - db_endpoints = get_related_objects(db_service, ServiceEndPointModel, 'endpoint_fk') - for db_endpoint in db_endpoints: - db_device = get_object(database, DeviceModel, db_endpoint.device_fk, raise_if_not_found=False) - required_device_uuids.add(db_device.device_uuid) - - for endpoint_id in service_endpoint_ids: - required_device_uuids.add(endpoint_id.device_id.device_uuid.uuid) - - db_devices = {} - devices_not_found = set() - for device_uuid in required_device_uuids: - sync_device_from_context(device_uuid, context_client, database) - db_device = get_object(database, DeviceModel, device_uuid, raise_if_not_found=False) - if db_device is None: - devices_not_found.add(device_uuid) - else: - db_devices[device_uuid] = db_device - - if len(devices_not_found) > 0: - extra_details = ['Devices({:s}) cannot be retrieved from Context'.format(str(devices_not_found))] - raise NotFoundException('Device', '...', extra_details=extra_details) - - return db_devices - -def classify_config_rules( - db_service : ServiceModel, service_config_rules : List[ConfigRule], - resources_to_set: List[Tuple[str, Any]], resources_to_delete : List[Tuple[str, Any]]): - - context_config_rules = get_config_rules(db_service.database, db_service.pk, 'running') - context_config_rules = {config_rule[1]: config_rule[2] for config_rule in context_config_rules} - #LOGGER.info('[classify_config_rules] context_config_rules = {:s}'.format(str(context_config_rules))) - - request_config_rules = grpc_config_rules_to_raw(service_config_rules) - #LOGGER.info('[classify_config_rules] request_config_rules = {:s}'.format(str(request_config_rules))) - - for config_rule in request_config_rules: - action, key, value = config_rule - if action == ORM_ConfigActionEnum.SET: - if (key not in context_config_rules) or (context_config_rules[key] != value): - resources_to_set.append((key, value)) - elif action == ORM_ConfigActionEnum.DELETE: - if key in context_config_rules: - resources_to_delete.append((key, value)) - else: - raise InvalidArgumentException('config_rule.action', str(action), extra_details=str(request_config_rules)) - - #LOGGER.info('[classify_config_rules] resources_to_set = {:s}'.format(str(resources_to_set))) - #LOGGER.info('[classify_config_rules] resources_to_delete = {:s}'.format(str(resources_to_delete))) - -def classify_constraints( - db_service : ServiceModel, service_constraints : List[Constraint], - constraints_to_set: List[Tuple[str, str]], constraints_to_delete : List[Tuple[str, str]]): - - context_constraints = get_constraints(db_service.database, db_service.pk, 'running') - context_constraints = {constraint[0]: constraint[1] for constraint in context_constraints} - #LOGGER.info('[classify_constraints] context_constraints = {:s}'.format(str(context_constraints))) - - request_constraints = grpc_constraints_to_raw(service_constraints) - #LOGGER.info('[classify_constraints] request_constraints = {:s}'.format(str(request_constraints))) - - for constraint in request_constraints: - constraint_type, constraint_value = constraint - if constraint_type in context_constraints: - if context_constraints[constraint_type] != constraint_value: - constraints_to_set.append(constraint) - else: - constraints_to_set.append(constraint) - context_constraints.pop(constraint_type, None) - - for constraint in context_constraints: - constraints_to_delete.append(constraint) - - #LOGGER.info('[classify_constraints] constraints_to_set = {:s}'.format(str(constraints_to_set))) - #LOGGER.info('[classify_constraints] constraints_to_delete = {:s}'.format(str(constraints_to_delete))) - -def get_service_endpointids(db_service : ServiceModel) -> List[Tuple[str, str, Optional[str]]]: - db_endpoints : List[EndPointModel] = get_related_objects(db_service, ServiceEndPointModel, 'endpoint_fk') - endpoint_ids = [db_endpoint.dump_id() for db_endpoint in db_endpoints] - return [ - (endpoint_id['device_id']['device_uuid']['uuid'], endpoint_id['endpoint_uuid']['uuid'], - endpoint_id.get('topology_id', {}).get('topology_uuid', {}).get('uuid', None)) - for endpoint_id in endpoint_ids - ] - -def classify_endpointids( - db_service : ServiceModel, service_endpoint_ids : List[EndPointId], - endpointids_to_set: List[Tuple[str, str, Optional[str]]], - endpointids_to_delete : List[Tuple[str, str, Optional[str]]]): - - context_endpoint_ids = get_service_endpointids(db_service) - #LOGGER.info('[classify_endpointids] context_endpoint_ids = {:s}'.format(str(context_endpoint_ids))) - context_endpoint_ids = set(context_endpoint_ids) - #LOGGER.info('[classify_endpointids] context_endpoint_ids = {:s}'.format(str(context_endpoint_ids))) - - request_endpoint_ids = grpc_endpointids_to_raw(service_endpoint_ids) - #LOGGER.info('[classify_endpointids] request_endpoint_ids = {:s}'.format(str(request_endpoint_ids))) - - if len(service_endpoint_ids) != 2: return - for endpoint_id in request_endpoint_ids: - #if endpoint_id not in context_endpoint_ids: - # endpointids_to_set.append(endpoint_id) - #context_endpoint_ids.discard(endpoint_id) - endpointids_to_set.append(endpoint_id) - - #for endpoint_id in context_endpoint_ids: - # endpointids_to_delete.append(endpoint_id) - - #LOGGER.info('[classify_endpointids] endpointids_to_set = {:s}'.format(str(endpointids_to_set))) - #LOGGER.info('[classify_endpointids] endpointids_to_delete = {:s}'.format(str(endpointids_to_delete))) - -def get_service_handler_class( - service_handler_factory : ServiceHandlerFactory, db_service : ServiceModel, db_devices : Dict[str, DeviceModel] - ) -> Optional[_ServiceHandler]: - - str_service_key = db_service.pk - database = db_service.database - - # Assume all devices involved in the service must support at least one driver in common - device_drivers = None - for _,db_device in db_devices.items(): - db_driver_pks = db_device.references(DriverModel) - db_driver_names = [DriverModel(database, pk).driver.value for pk,_ in db_driver_pks] - if device_drivers is None: - device_drivers = set(db_driver_names) - else: - device_drivers.intersection_update(db_driver_names) - - filter_fields = { - FilterFieldEnum.SERVICE_TYPE.value : db_service.service_type.value, # must be supported - FilterFieldEnum.DEVICE_DRIVER.value : device_drivers, # at least one must be supported - } - - msg = 'Selecting service handler for service({:s}) with filter_fields({:s})...' - LOGGER.info(msg.format(str(str_service_key), str(filter_fields))) - service_handler_class = service_handler_factory.get_service_handler_class(**filter_fields) - msg = 'ServiceHandler({:s}) selected for service({:s}) with filter_fields({:s})...' - LOGGER.info(msg.format(str(service_handler_class.__name__), str(str_service_key), str(filter_fields))) - return service_handler_class - -def update_service( - database : Database, context_client : ContextClient, device_client : DeviceClient, - service_handler_factory : ServiceHandlerFactory, service : Service, connection : Connection - ) -> ServiceModel: - - service_id = service.service_id - service_uuid = service_id.service_uuid.uuid - service_context_uuid = service_id.context_id.context_uuid.uuid - str_service_key = key_to_str([service_context_uuid, service_uuid]) - - # Sync before updating service to ensure we have devices, endpoints, constraints, and config rules to be - # set/deleted before actuallymodifying them in the local in-memory database. - - sync_service_from_context(service_context_uuid, service_uuid, context_client, database) - db_service = get_object(database, ServiceModel, str_service_key, raise_if_not_found=False) - db_devices = sync_devices_from_context(context_client, database, db_service, service.service_endpoint_ids) - - if db_service is None: db_service,_ = update_service_in_local_database(database, service) - LOGGER.info('[update_service] db_service = {:s}'.format(str(db_service.dump( - include_endpoint_ids=True, include_constraints=True, include_config_rules=True)))) - - resources_to_set : List[Tuple[str, Any]] = [] # resource_key, resource_value - resources_to_delete : List[Tuple[str, Any]] = [] # resource_key, resource_value - classify_config_rules(db_service, service.service_config.config_rules, resources_to_set, resources_to_delete) - - constraints_to_set : List[Tuple[str, str]] = [] # constraint_type, constraint_value - constraints_to_delete : List[Tuple[str, str]] = [] # constraint_type, constraint_value - classify_constraints(db_service, service.service_constraints, constraints_to_set, constraints_to_delete) - - endpointids_to_set : List[Tuple[str, str, Optional[str]]] = [] # device_uuid, endpoint_uuid, topology_uuid - endpointids_to_delete : List[Tuple[str, str, Optional[str]]] = [] # device_uuid, endpoint_uuid, topology_uuid - classify_endpointids(db_service, service.service_endpoint_ids, endpointids_to_set, endpointids_to_delete) - - service_handler_class = get_service_handler_class(service_handler_factory, db_service, db_devices) - service_handler_settings = {} - service_handler : _ServiceHandler = service_handler_class( - db_service, database, context_client, device_client, **service_handler_settings) - - errors = [] - - if len(errors) == 0: - results_deleteendpoint = service_handler.DeleteEndpoint(endpointids_to_delete) - errors.extend(check_errors_deleteendpoint(endpointids_to_delete, results_deleteendpoint)) - - if len(errors) == 0: - results_deleteconstraint = service_handler.DeleteConstraint(constraints_to_delete) - errors.extend(check_errors_deleteconstraint(constraints_to_delete, results_deleteconstraint)) - - if len(errors) == 0: - results_deleteconfig = service_handler.DeleteConfig(resources_to_delete) - errors.extend(check_errors_deleteconfig(resources_to_delete, results_deleteconfig)) - - if len(errors) == 0: - results_setconfig = service_handler.SetConfig(resources_to_set) - errors.extend(check_errors_setconfig(resources_to_set, results_setconfig)) - - if len(errors) == 0: - results_setconstraint = service_handler.SetConstraint(constraints_to_set) - errors.extend(check_errors_setconstraint(constraints_to_set, results_setconstraint)) - - if len(errors) == 0: - results_setendpoint = service_handler.SetEndpoint(endpointids_to_set) - errors.extend(check_errors_setendpoint(endpointids_to_set, results_setendpoint)) - - if len(errors) > 0: - raise OperationFailedException('UpdateService', extra_details=errors) - - LOGGER.info('[update_service] len(service.service_endpoint_ids) = {:d}'.format(len(service.service_endpoint_ids))) - if len(service.service_endpoint_ids) >= 2: - service.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_ACTIVE - - db_service,_ = update_service_in_local_database(database, service) - LOGGER.info('[update_service] db_service = {:s}'.format(str(db_service.dump( - include_endpoint_ids=True, include_constraints=True, include_config_rules=True)))) - - sync_service_to_context(db_service, context_client) - context_client.SetConnection(connection) - return db_service - -def delete_service( - database : Database, context_client : ContextClient, device_client : DeviceClient, - service_handler_factory : ServiceHandlerFactory, service_id : ServiceId, connection : Connection - ) -> None: - - context_client.RemoveConnection(connection.connection_id) - - service_uuid = service_id.service_uuid.uuid - service_context_uuid = service_id.context_id.context_uuid.uuid - str_service_key = key_to_str([service_context_uuid, service_uuid]) - - # Sync before updating service to ensure we have devices, endpoints, constraints, and config rules to be - # set/deleted before actuallymodifying them in the local in-memory database. - - sync_service_from_context(service_context_uuid, service_uuid, context_client, database) - db_service : ServiceModel = get_object(database, ServiceModel, str_service_key, raise_if_not_found=False) - if db_service is None: return - LOGGER.info('[delete_service] db_service = {:s}'.format(str(db_service.dump( - include_endpoint_ids=True, include_constraints=True, include_config_rules=True)))) - - db_devices = sync_devices_from_context(context_client, database, db_service, []) - - resources_to_delete : List[Tuple[str, str]] = [ # resource_key, resource_value - (config_rule[1], config_rule[2]) - for config_rule in get_config_rules(db_service.database, db_service.pk, 'running') - ] - - constraints_to_delete : List[Tuple[str, str]] = [ # constraint_type, constraint_value - (constraint[0], constraint[1]) - for constraint in get_constraints(db_service.database, db_service.pk, 'running') - ] - - # device_uuid, endpoint_uuid, topology_uuid - endpointids_to_delete : List[Tuple[str, str, Optional[str]]] = list(set(get_service_endpointids(db_service))) - - service_handler_class = get_service_handler_class(service_handler_factory, db_service, db_devices) - service_handler_settings = {} - service_handler : _ServiceHandler = service_handler_class( - db_service, database, context_client, device_client, **service_handler_settings) - - errors = [] - - if len(errors) == 0: - results_deleteendpoint = service_handler.DeleteEndpoint(endpointids_to_delete) - errors.extend(check_errors_deleteendpoint(endpointids_to_delete, results_deleteendpoint)) - - if len(errors) == 0: - results_deleteconstraint = service_handler.DeleteConstraint(constraints_to_delete) - errors.extend(check_errors_deleteconstraint(constraints_to_delete, results_deleteconstraint)) - - if len(errors) == 0: - results_deleteconfig = service_handler.DeleteConfig(resources_to_delete) - errors.extend(check_errors_deleteconfig(resources_to_delete, results_deleteconfig)) - - if len(errors) > 0: - raise OperationFailedException('DeleteService', extra_details=errors) - - delete_service_from_context(db_service, context_client) - - for db_service_endpoint_pk,_ in db_service.references(ServiceEndPointModel): - ServiceEndPointModel(database, db_service_endpoint_pk).delete() - - db_running_config = ConfigModel(database, db_service.service_config_fk) - for db_config_rule_pk,_ in db_running_config.references(ConfigRuleModel): - ConfigRuleModel(database, db_config_rule_pk).delete() - - db_running_constraints = ConstraintsModel(database, db_service.service_constraints_fk) - for db_constraint_pk,_ in db_running_constraints.references(ConstraintModel): - ConstraintModel(database, db_constraint_pk).delete() - - db_service.delete() - db_running_config.delete() - db_running_constraints.delete() diff --git a/src/service/service/service_handler_api/FilterFields.py b/src/service/service/service_handler_api/FilterFields.py index 9d8f9ad28..98113ba30 100644 --- a/src/service/service/service_handler_api/FilterFields.py +++ b/src/service/service/service_handler_api/FilterFields.py @@ -13,15 +13,30 @@ # limitations under the License. from enum import Enum -from service.service.database.ServiceModel import ORM_ServiceTypeEnum -from service.service.database.DeviceModel import ORM_DeviceDriverEnum +from common.proto.context_pb2 import DeviceDriverEnum, ServiceTypeEnum class FilterFieldEnum(Enum): SERVICE_TYPE = 'service_type' DEVICE_DRIVER = 'device_driver' +SERVICE_TYPE_VALUES = { + ServiceTypeEnum.SERVICETYPE_UNKNOWN, + ServiceTypeEnum.SERVICETYPE_L3NM, + ServiceTypeEnum.SERVICETYPE_L2NM, + ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE, +} + +DEVICE_DRIVER_VALUES = { + DeviceDriverEnum.DEVICEDRIVER_UNDEFINED, + DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG, + DeviceDriverEnum.DEVICEDRIVER_TRANSPORT_API, + DeviceDriverEnum.DEVICEDRIVER_P4, + DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY, + DeviceDriverEnum.DEVICEDRIVER_ONF_TR_352, +} + # Map allowed filter fields to allowed values per Filter field. If no restriction (free text) None is specified FILTER_FIELD_ALLOWED_VALUES = { - FilterFieldEnum.SERVICE_TYPE.value : {i.value for i in ORM_ServiceTypeEnum}, - FilterFieldEnum.DEVICE_DRIVER.value : {i.value for i in ORM_DeviceDriverEnum}, + FilterFieldEnum.SERVICE_TYPE.value : SERVICE_TYPE_VALUES, + FilterFieldEnum.DEVICE_DRIVER.value : DEVICE_DRIVER_VALUES, } diff --git a/src/service/service/service_handler_api/ServiceHandlerFactory.py b/src/service/service/service_handler_api/ServiceHandlerFactory.py index 8b7223a95..09a56775d 100644 --- a/src/service/service/service_handler_api/ServiceHandlerFactory.py +++ b/src/service/service/service_handler_api/ServiceHandlerFactory.py @@ -14,7 +14,9 @@ import logging, operator from enum import Enum -from typing import Any, Dict, Iterable, List, Set, Tuple +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple +from common.proto.context_pb2 import Device, Service +from common.tools.grpc.Tools import grpc_message_to_json_string from service.service.service_handler_api._ServiceHandler import _ServiceHandler from .Exceptions import ( UnsatisfiedFilterException, UnsupportedServiceHandlerClassException, UnsupportedFilterFieldException, @@ -91,3 +93,40 @@ class ServiceHandlerFactory: candidate_service_handler_classes = sorted( candidate_service_handler_classes.items(), key=operator.itemgetter(1), reverse=True) return candidate_service_handler_classes[0][0] + +def get_device_supported_drivers(device : Device) -> Set[int]: + return {device_driver for device_driver in device.device_drivers} + +def get_common_device_drivers(drivers_per_device : List[Set[int]]) -> Set[int]: + common_device_drivers = None + for device_drivers in drivers_per_device: + if common_device_drivers is None: + common_device_drivers = set(device_drivers) + else: + common_device_drivers.intersection_update(device_drivers) + if common_device_drivers is None: common_device_drivers = set() + return common_device_drivers + +def get_service_handler_class( + service_handler_factory : ServiceHandlerFactory, service : Service, connection_devices : Dict[str, Device] +) -> Optional[_ServiceHandler]: + + str_service_key = grpc_message_to_json_string(service.service_id) + + # Assume all devices involved in the service's connection must support at least one driver in common + common_device_drivers = get_common_device_drivers([ + get_device_supported_drivers(device) + for device in connection_devices.values() + ]) + + filter_fields = { + FilterFieldEnum.SERVICE_TYPE.value : service.service_type, # must be supported + FilterFieldEnum.DEVICE_DRIVER.value : common_device_drivers, # at least one must be supported + } + + MSG = 'Selecting service handler for service({:s}) with filter_fields({:s})...' + LOGGER.info(MSG.format(str(str_service_key), str(filter_fields))) + service_handler_class = service_handler_factory.get_service_handler_class(**filter_fields) + MSG = 'ServiceHandler({:s}) selected for service({:s}) with filter_fields({:s})...' + LOGGER.info(MSG.format(str(service_handler_class.__name__), str(str_service_key), str(filter_fields))) + return service_handler_class diff --git a/src/service/service/service_handler_api/_ServiceHandler.py b/src/service/service/service_handler_api/_ServiceHandler.py index e724ebcc9..170e842cd 100644 --- a/src/service/service/service_handler_api/_ServiceHandler.py +++ b/src/service/service/service_handler_api/_ServiceHandler.py @@ -38,12 +38,16 @@ class _ServiceHandler: """ raise NotImplementedError() - def SetEndpoint(self, endpoints : List[Tuple[str, str, Optional[str]]]) -> List[Union[bool, Exception]]: + def SetEndpoint( + self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None + ) -> List[Union[bool, Exception]]: """ Set endpoints from a list. Parameters: endpoints : List[Tuple[str, str, Optional[str]]] List of tuples, each containing a device_uuid, endpoint_uuid and, optionally, the topology_uuid of the endpoint to be added. + connection_uuid : Optional[str] + If specified, is the UUID of the connection this endpoint is associated to. Returns: results : List[Union[bool, Exception]] List of results for endpoint changes requested. Return values must be in the same order than @@ -52,12 +56,16 @@ class _ServiceHandler: """ raise NotImplementedError() - def DeleteEndpoint(self, endpoints : List[Tuple[str, str, Optional[str]]]) -> List[Union[bool, Exception]]: + def DeleteEndpoint( + self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None + ) -> List[Union[bool, Exception]]: """ Delete endpoints form a list. Parameters: endpoints : List[Tuple[str, str, Optional[str]]] List of tuples, each containing a device_uuid, endpoint_uuid, and the topology_uuid of the endpoint to be removed. + connection_uuid : Optional[str] + If specified, is the UUID of the connection this endpoint is associated to. Returns: results : List[Union[bool, Exception]] List of results for endpoint deletions requested. Return values must be in the same order than diff --git a/src/service/service/service_handlers/__init__.py b/src/service/service/service_handlers/__init__.py index 33e345c42..89e717722 100644 --- a/src/service/service/service_handlers/__init__.py +++ b/src/service/service/service_handlers/__init__.py @@ -12,7 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ..service_handler_api.FilterFields import FilterFieldEnum, ORM_DeviceDriverEnum, ORM_ServiceTypeEnum +from common.proto.context_pb2 import DeviceDriverEnum, ServiceTypeEnum +from ..service_handler_api.FilterFields import FilterFieldEnum from .l2nm_emulated.L2NMEmulatedServiceHandler import L2NMEmulatedServiceHandler from .l3nm_emulated.L3NMEmulatedServiceHandler import L3NMEmulatedServiceHandler from .l3nm_openconfig.L3NMOpenConfigServiceHandler import L3NMOpenConfigServiceHandler @@ -21,26 +22,26 @@ from .tapi_tapi.TapiServiceHandler import TapiServiceHandler SERVICE_HANDLERS = [ (L2NMEmulatedServiceHandler, [ { - FilterFieldEnum.SERVICE_TYPE : ORM_ServiceTypeEnum.L2NM, - FilterFieldEnum.DEVICE_DRIVER : ORM_DeviceDriverEnum.UNDEFINED, + FilterFieldEnum.SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_L2NM, + FilterFieldEnum.DEVICE_DRIVER : DeviceDriverEnum.DEVICEDRIVER_UNDEFINED, } ]), (L3NMEmulatedServiceHandler, [ { - FilterFieldEnum.SERVICE_TYPE : ORM_ServiceTypeEnum.L3NM, - FilterFieldEnum.DEVICE_DRIVER : ORM_DeviceDriverEnum.UNDEFINED, + FilterFieldEnum.SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_L3NM, + FilterFieldEnum.DEVICE_DRIVER : DeviceDriverEnum.DEVICEDRIVER_UNDEFINED, } ]), (L3NMOpenConfigServiceHandler, [ { - FilterFieldEnum.SERVICE_TYPE : ORM_ServiceTypeEnum.L3NM, - FilterFieldEnum.DEVICE_DRIVER : ORM_DeviceDriverEnum.OPENCONFIG, + FilterFieldEnum.SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_L3NM, + FilterFieldEnum.DEVICE_DRIVER : DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG, } ]), (TapiServiceHandler, [ { - FilterFieldEnum.SERVICE_TYPE : ORM_ServiceTypeEnum.TAPI_CONNECTIVITY_SERVICE, - FilterFieldEnum.DEVICE_DRIVER : ORM_DeviceDriverEnum.TRANSPORT_API, + FilterFieldEnum.SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE, + FilterFieldEnum.DEVICE_DRIVER : DeviceDriverEnum.DEVICEDRIVER_TRANSPORT_API, } ]), ] diff --git a/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py b/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py new file mode 100644 index 000000000..be0f1fda5 --- /dev/null +++ b/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py @@ -0,0 +1,268 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict, List +from common.proto.context_pb2 import EndPointId, Service +from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set +from service.service.service_handler_api.AnyTreeTools import TreeNode + +def setup_config_rules( + service_uuid : str, connection_uuid : str, device_uuid : str, endpoint_uuid : str, + service_settings : TreeNode, endpoint_settings : TreeNode +) -> List[Dict]: + + connection_short_uuid = connection_uuid.split('-')[-1] + network_instance_name = '{:s}-NetInst'.format(connection_short_uuid) + network_interface_desc = '{:s}-NetIf'.format(connection_uuid) + network_subinterface_desc = '{:s}-NetSubIf'.format(connection_uuid) + + if service_settings is None: + # MSG = 'Unable to retrieve settings for Service({:s})' + #raise Exception(MSG.format(connection_uuid)) + mtu = 1450 + bgp_as = 0 + bgp_route_target = '0:0' + else: + json_settings : Dict = service_settings.value + mtu = json_settings.get('mtu', 1450 ) # 1512 + #address_families = json_settings.get('address_families', [] ) # ['IPV4'] + bgp_as = json_settings.get('bgp_as', 0 ) # 65000 + bgp_route_target = json_settings.get('bgp_route_target', '0:0') # 65000:333 + + if endpoint_settings is None: + #MSG = 'Unable to retrieve settings for device({:s}/endpoint({:s}) in service({:s})' + #raise Exception(MSG.format(device_uuid, endpoint_uuid, connection_uuid)) + route_distinguisher = '0:0' + sub_interface_index = 0 + vlan_id = 1 + address_ip = '0.0.0.0' + address_prefix = 24 + if_subif_name = '{:s}.{:d}'.format(endpoint_uuid, vlan_id) + else: + json_endpoint_settings : Dict = endpoint_settings.value + #router_id = json_endpoint_settings.get('router_id', '0.0.0.0') # '10.95.0.10' + route_distinguisher = json_endpoint_settings.get('route_distinguisher', '0:0' ) # '60001:801' + sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0 ) # 1 + vlan_id = json_endpoint_settings.get('vlan_id', 1 ) # 400 + address_ip = json_endpoint_settings.get('address_ip', '0.0.0.0') # '2.2.2.1' + address_prefix = json_endpoint_settings.get('address_prefix', 24 ) # 30 + if_subif_name = '{:s}.{:d}'.format(endpoint_uuid, vlan_id) + + json_config_rules = [ + json_config_rule_set( + '/network_instance[{:s}]'.format(network_instance_name), { + 'name': network_instance_name, 'description': network_interface_desc, 'type': 'L3VRF', + 'route_distinguisher': route_distinguisher, + #'router_id': router_id, 'address_families': address_families, + }), + json_config_rule_set( + '/interface[{:s}]'.format(endpoint_uuid), { + 'name': endpoint_uuid, 'description': network_interface_desc, 'mtu': mtu, + }), + json_config_rule_set( + '/interface[{:s}]/subinterface[{:d}]'.format(endpoint_uuid, sub_interface_index), { + 'name': endpoint_uuid, 'index': sub_interface_index, + 'description': network_subinterface_desc, 'vlan_id': vlan_id, + 'address_ip': address_ip, 'address_prefix': address_prefix, + }), + json_config_rule_set( + '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_subif_name), { + 'name': network_instance_name, 'id': if_subif_name, 'interface': endpoint_uuid, + 'subinterface': sub_interface_index, + }), + json_config_rule_set( + '/network_instance[{:s}]/protocols[BGP]'.format(network_instance_name), { + 'name': network_instance_name, 'identifier': 'BGP', 'protocol_name': 'BGP', 'as': bgp_as, + }), + json_config_rule_set( + '/network_instance[{:s}]/table_connections[STATIC][BGP][IPV4]'.format(network_instance_name), { + 'name': network_instance_name, 'src_protocol': 'STATIC', 'dst_protocol': 'BGP', + 'address_family': 'IPV4', #'default_import_policy': 'REJECT_ROUTE', + }), + json_config_rule_set( + '/network_instance[{:s}]/table_connections[DIRECTLY_CONNECTED][BGP][IPV4]'.format( + network_instance_name), { + 'name': network_instance_name, 'src_protocol': 'DIRECTLY_CONNECTED', 'dst_protocol': 'BGP', + 'address_family': 'IPV4', #'default_import_policy': 'REJECT_ROUTE', + }), + json_config_rule_set( + '/routing_policy/bgp_defined_set[{:s}_rt_import]'.format(network_instance_name), { + 'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name), + }), + json_config_rule_set( + '/routing_policy/bgp_defined_set[{:s}_rt_import][route-target:{:s}]'.format( + network_instance_name, bgp_route_target), { + 'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name), + 'ext_community_member' : 'route-target:{:s}'.format(bgp_route_target), + }), + json_config_rule_set( + '/routing_policy/policy_definition[{:s}_import]'.format(network_instance_name), { + 'policy_name': '{:s}_import'.format(network_instance_name), + }), + json_config_rule_set( + '/routing_policy/policy_definition[{:s}_import]/statement[{:s}]'.format( + network_instance_name, '3'), { + 'policy_name': '{:s}_import'.format(network_instance_name), 'statement_name': '3', + 'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name), + 'match_set_options': 'ANY', 'policy_result': 'ACCEPT_ROUTE', + }), + json_config_rule_set( + # pylint: disable=duplicate-string-formatting-argument + '/network_instance[{:s}]/inter_instance_policies[{:s}_import]'.format( + network_instance_name, network_instance_name), { + 'name': network_instance_name, 'import_policy': '{:s}_import'.format(network_instance_name), + }), + json_config_rule_set( + '/routing_policy/bgp_defined_set[{:s}_rt_export]'.format(network_instance_name), { + 'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name), + }), + json_config_rule_set( + '/routing_policy/bgp_defined_set[{:s}_rt_export][route-target:{:s}]'.format( + network_instance_name, bgp_route_target), { + 'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name), + 'ext_community_member' : 'route-target:{:s}'.format(bgp_route_target), + }), + json_config_rule_set( + '/routing_policy/policy_definition[{:s}_export]'.format(network_instance_name), { + 'policy_name': '{:s}_export'.format(network_instance_name), + }), + json_config_rule_set( + '/routing_policy/policy_definition[{:s}_export]/statement[{:s}]'.format( + network_instance_name, '3'), { + 'policy_name': '{:s}_export'.format(network_instance_name), 'statement_name': '3', + 'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name), + 'match_set_options': 'ANY', 'policy_result': 'ACCEPT_ROUTE', + }), + json_config_rule_set( + # pylint: disable=duplicate-string-formatting-argument + '/network_instance[{:s}]/inter_instance_policies[{:s}_export]'.format( + network_instance_name, network_instance_name), { + 'name': network_instance_name, 'export_policy': '{:s}_export'.format(network_instance_name), + }), + ] + + return json_config_rules + +def teardown_config_rules( + service_uuid : str, connection_uuid : str, device_uuid : str, endpoint_uuid : str, + service_settings : TreeNode, endpoint_settings : TreeNode +) -> List[Dict]: + + connection_short_uuid = connection_uuid.split('-')[-1] + network_instance_name = '{:s}-NetInst'.format(connection_short_uuid) + + if service_settings is None: + # MSG = 'Unable to retrieve settings for Service({:s})' + #raise Exception(MSG.format(connection_uuid)) + bgp_route_target = '0:0' + else: + json_settings : Dict = service_settings.value + bgp_route_target = json_settings.get('bgp_route_target', '0:0') # 65000:333 + + if endpoint_settings is None: + #MSG = 'Unable to retrieve settings for device({:s}/endpoint({:s}) in service({:s})' + #raise Exception(MSG.format(device_uuid, endpoint_uuid, connection_uuid)) + sub_interface_index = 0 + vlan_id = 1 + if_subif_name = '{:s}.{:d}'.format(endpoint_uuid, vlan_id) + else: + json_endpoint_settings : Dict = endpoint_settings.value + sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0 ) # 1 + vlan_id = json_endpoint_settings.get('vlan_id', 1 ) # 400 + if_subif_name = '{:s}.{:d}'.format(endpoint_uuid, vlan_id) + + json_config_rules = [ + json_config_rule_delete( + '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_subif_name), { + 'name': network_instance_name, 'id': if_subif_name, + }), + json_config_rule_delete( + '/interface[{:s}]/subinterface[{:d}]'.format(endpoint_uuid, sub_interface_index), { + 'name': endpoint_uuid, 'index': sub_interface_index, + }), + json_config_rule_delete( + '/interface[{:s}]'.format(endpoint_uuid), { + 'name': endpoint_uuid, + }), + json_config_rule_delete( + '/network_instance[{:s}]/table_connections[DIRECTLY_CONNECTED][BGP][IPV4]'.format( + network_instance_name), { + 'name': network_instance_name, 'src_protocol': 'DIRECTLY_CONNECTED', 'dst_protocol': 'BGP', + 'address_family': 'IPV4', + }), + json_config_rule_delete( + '/network_instance[{:s}]/table_connections[STATIC][BGP][IPV4]'.format(network_instance_name), { + 'name': network_instance_name, 'src_protocol': 'STATIC', 'dst_protocol': 'BGP', + 'address_family': 'IPV4', + }), + json_config_rule_delete( + '/network_instance[{:s}]/protocols[BGP]'.format(network_instance_name), { + 'name': network_instance_name, 'identifier': 'BGP', 'protocol_name': 'BGP', + }), + json_config_rule_delete( + # pylint: disable=duplicate-string-formatting-argument + '/network_instance[{:s}]/inter_instance_policies[{:s}_import]'.format( + network_instance_name, network_instance_name), { + 'name': network_instance_name, + }), + json_config_rule_delete( + '/routing_policy/policy_definition[{:s}_import]/statement[{:s}]'.format( + network_instance_name, '3'), { + 'policy_name': '{:s}_import'.format(network_instance_name), 'statement_name': '3', + }), + json_config_rule_delete( + '/routing_policy/policy_definition[{:s}_import]'.format(network_instance_name), { + 'policy_name': '{:s}_import'.format(network_instance_name), + }), + json_config_rule_delete( + '/routing_policy/bgp_defined_set[{:s}_rt_import][route-target:{:s}]'.format( + network_instance_name, bgp_route_target), { + 'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name), + 'ext_community_member' : 'route-target:{:s}'.format(bgp_route_target), + }), + json_config_rule_delete( + '/routing_policy/bgp_defined_set[{:s}_rt_import]'.format(network_instance_name), { + 'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name), + }), + json_config_rule_delete( + # pylint: disable=duplicate-string-formatting-argument + '/network_instance[{:s}]/inter_instance_policies[{:s}_export]'.format( + network_instance_name, network_instance_name), { + 'name': network_instance_name, + }), + json_config_rule_delete( + '/routing_policy/policy_definition[{:s}_export]/statement[{:s}]'.format( + network_instance_name, '3'), { + 'policy_name': '{:s}_export'.format(network_instance_name), 'statement_name': '3', + }), + json_config_rule_delete( + '/routing_policy/policy_definition[{:s}_export]'.format(network_instance_name), { + 'policy_name': '{:s}_export'.format(network_instance_name), + }), + json_config_rule_delete( + '/routing_policy/bgp_defined_set[{:s}_rt_export][route-target:{:s}]'.format( + network_instance_name, bgp_route_target), { + 'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name), + 'ext_community_member' : 'route-target:{:s}'.format(bgp_route_target), + }), + json_config_rule_delete( + '/routing_policy/bgp_defined_set[{:s}_rt_export]'.format(network_instance_name), { + 'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name), + }), + json_config_rule_delete( + '/network_instance[{:s}]'.format(network_instance_name), { + 'name': network_instance_name + }), + ] + return json_config_rules diff --git a/src/service/service/service_handlers/l2nm_emulated/L2NMEmulatedServiceHandler.py b/src/service/service/service_handlers/l2nm_emulated/L2NMEmulatedServiceHandler.py index 889a60ad5..5b0bd0304 100644 --- a/src/service/service/service_handlers/l2nm_emulated/L2NMEmulatedServiceHandler.py +++ b/src/service/service/service_handlers/l2nm_emulated/L2NMEmulatedServiceHandler.py @@ -13,188 +13,65 @@ # limitations under the License. import anytree, json, logging -from typing import Any, Dict, List, Optional, Tuple, Union -from common.orm.Database import Database -from common.orm.HighLevel import get_object -from common.orm.backend.Tools import key_to_str -from common.proto.context_pb2 import Device -from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set +from typing import Any, List, Optional, Tuple, Union +from common.proto.context_pb2 import ConfigActionEnum, ConfigRule, DeviceId, Service +from common.tools.object_factory.Device import json_device_id from common.type_checkers.Checkers import chk_length, chk_type -from context.client.ContextClient import ContextClient -from device.client.DeviceClient import DeviceClient -from service.service.database.ConfigModel import ORM_ConfigActionEnum, get_config_rules -from service.service.database.ContextModel import ContextModel -from service.service.database.DeviceModel import DeviceModel -from service.service.database.ServiceModel import ServiceModel from service.service.service_handler_api._ServiceHandler import _ServiceHandler from service.service.service_handler_api.AnyTreeTools import TreeNode, delete_subnode, get_subnode, set_subnode_value +from service.service.task_scheduler.TaskExecutor import TaskExecutor +from .ConfigRules import setup_config_rules, teardown_config_rules LOGGER = logging.getLogger(__name__) class L2NMEmulatedServiceHandler(_ServiceHandler): def __init__( # pylint: disable=super-init-not-called - self, db_service : ServiceModel, database : Database, context_client : ContextClient, - device_client : DeviceClient, **settings + self, service : Service, task_executor : TaskExecutor, **settings ) -> None: - self.__db_service = db_service - self.__database = database - self.__context_client = context_client # pylint: disable=unused-private-member - self.__device_client = device_client - - self.__db_context : ContextModel = get_object(self.__database, ContextModel, self.__db_service.context_fk) - str_service_key = key_to_str([self.__db_context.context_uuid, self.__db_service.service_uuid]) - db_config = get_config_rules(self.__database, str_service_key, 'running') + self.__service = service + self.__task_executor = task_executor # pylint: disable=unused-private-member self.__resolver = anytree.Resolver(pathattr='name') self.__config = TreeNode('.') - for action, resource_key, resource_value in db_config: - if action == ORM_ConfigActionEnum.SET: + for config_rule in service.service_config.config_rules: + action = config_rule.action + if config_rule.WhichOneof('config_rule') != 'custom': continue + resource_key = config_rule.custom.resource_key + resource_value = config_rule.custom.resource_value + if action == ConfigActionEnum.CONFIGACTION_SET: try: resource_value = json.loads(resource_value) except: # pylint: disable=bare-except pass set_subnode_value(self.__resolver, self.__config, resource_key, resource_value) - elif action == ORM_ConfigActionEnum.DELETE: + elif action == ConfigActionEnum.CONFIGACTION_DELETE: delete_subnode(self.__resolver, self.__config, resource_key) - def SetEndpoint(self, endpoints : List[Tuple[str, str, Optional[str]]]) -> List[Union[bool, Exception]]: + def SetEndpoint( + self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None + ) -> List[Union[bool, Exception]]: chk_type('endpoints', endpoints, list) if len(endpoints) == 0: return [] - service_uuid = self.__db_service.service_uuid - service_short_uuid = service_uuid.split('-')[-1] - network_instance_name = '{:s}-NetInst'.format(service_short_uuid) - network_interface_desc = '{:s}-NetIf'.format(service_uuid) - network_subinterface_desc = '{:s}-NetSubIf'.format(service_uuid) - + service_uuid = self.__service.service_id.service_uuid.uuid settings : TreeNode = get_subnode(self.__resolver, self.__config, '/settings', None) - if settings is None: raise Exception('Unable to retrieve service settings') - json_settings : Dict = settings.value - mtu = json_settings.get('mtu', 1450 ) # 1512 - #address_families = json_settings.get('address_families', [] ) # ['IPV4'] - bgp_as = json_settings.get('bgp_as', 0 ) # 65000 - bgp_route_target = json_settings.get('bgp_route_target', '0:0') # 65000:333 results = [] for endpoint in endpoints: try: chk_type('endpoint', endpoint, (tuple, list)) chk_length('endpoint', endpoint, min_length=2, max_length=3) - if len(endpoint) == 2: - device_uuid, endpoint_uuid = endpoint - else: - device_uuid, endpoint_uuid, _ = endpoint # ignore topology_uuid by now + device_uuid, endpoint_uuid = endpoint[0:2] # ignore topology_uuid by now endpoint_settings_uri = '/device[{:s}]/endpoint[{:s}]/settings'.format(device_uuid, endpoint_uuid) endpoint_settings : TreeNode = get_subnode(self.__resolver, self.__config, endpoint_settings_uri, None) - if endpoint_settings is None: - raise Exception('Unable to retrieve service settings for endpoint({:s})'.format( - str(endpoint_settings_uri))) - json_endpoint_settings : Dict = endpoint_settings.value - #router_id = json_endpoint_settings.get('router_id', '0.0.0.0') # '10.95.0.10' - route_distinguisher = json_endpoint_settings.get('route_distinguisher', '0:0' ) # '60001:801' - sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0 ) # 1 - vlan_id = json_endpoint_settings.get('vlan_id', 1 ) # 400 - address_ip = json_endpoint_settings.get('address_ip', '0.0.0.0') # '2.2.2.1' - address_prefix = json_endpoint_settings.get('address_prefix', 24 ) # 30 - if_subif_name = '{:s}.{:d}'.format(endpoint_uuid, vlan_id) - db_device : DeviceModel = get_object(self.__database, DeviceModel, device_uuid, raise_if_not_found=True) - json_device = db_device.dump(include_config_rules=False, include_drivers=True, include_endpoints=True) - json_device_config : Dict = json_device.setdefault('device_config', {}) - json_device_config_rules : List = json_device_config.setdefault('config_rules', []) - json_device_config_rules.extend([ - json_config_rule_set( - '/network_instance[{:s}]'.format(network_instance_name), { - 'name': network_instance_name, 'description': network_interface_desc, 'type': 'L3VRF', - 'route_distinguisher': route_distinguisher, - #'router_id': router_id, 'address_families': address_families, - }), - json_config_rule_set( - '/interface[{:s}]'.format(endpoint_uuid), { - 'name': endpoint_uuid, 'description': network_interface_desc, 'mtu': mtu, - }), - json_config_rule_set( - '/interface[{:s}]/subinterface[{:d}]'.format(endpoint_uuid, sub_interface_index), { - 'name': endpoint_uuid, 'index': sub_interface_index, - 'description': network_subinterface_desc, 'vlan_id': vlan_id, - 'address_ip': address_ip, 'address_prefix': address_prefix, - }), - json_config_rule_set( - '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_subif_name), { - 'name': network_instance_name, 'id': if_subif_name, 'interface': endpoint_uuid, - 'subinterface': sub_interface_index, - }), - json_config_rule_set( - '/network_instance[{:s}]/protocols[BGP]'.format(network_instance_name), { - 'name': network_instance_name, 'identifier': 'BGP', 'protocol_name': 'BGP', 'as': bgp_as, - }), - json_config_rule_set( - '/network_instance[{:s}]/table_connections[STATIC][BGP][IPV4]'.format(network_instance_name), { - 'name': network_instance_name, 'src_protocol': 'STATIC', 'dst_protocol': 'BGP', - 'address_family': 'IPV4', #'default_import_policy': 'REJECT_ROUTE', - }), - json_config_rule_set( - '/network_instance[{:s}]/table_connections[DIRECTLY_CONNECTED][BGP][IPV4]'.format( - network_instance_name), { - 'name': network_instance_name, 'src_protocol': 'DIRECTLY_CONNECTED', 'dst_protocol': 'BGP', - 'address_family': 'IPV4', #'default_import_policy': 'REJECT_ROUTE', - }), - json_config_rule_set( - '/routing_policy/bgp_defined_set[{:s}_rt_import]'.format(network_instance_name), { - 'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name), - }), - json_config_rule_set( - '/routing_policy/bgp_defined_set[{:s}_rt_import][route-target:{:s}]'.format( - network_instance_name, bgp_route_target), { - 'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name), - 'ext_community_member' : 'route-target:{:s}'.format(bgp_route_target), - }), - json_config_rule_set( - '/routing_policy/policy_definition[{:s}_import]'.format(network_instance_name), { - 'policy_name': '{:s}_import'.format(network_instance_name), - }), - json_config_rule_set( - '/routing_policy/policy_definition[{:s}_import]/statement[{:s}]'.format( - network_instance_name, '3'), { - 'policy_name': '{:s}_import'.format(network_instance_name), 'statement_name': '3', - 'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name), - 'match_set_options': 'ANY', 'policy_result': 'ACCEPT_ROUTE', - }), - json_config_rule_set( - # pylint: disable=duplicate-string-formatting-argument - '/network_instance[{:s}]/inter_instance_policies[{:s}_import]'.format( - network_instance_name, network_instance_name), { - 'name': network_instance_name, 'import_policy': '{:s}_import'.format(network_instance_name), - }), - json_config_rule_set( - '/routing_policy/bgp_defined_set[{:s}_rt_export]'.format(network_instance_name), { - 'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name), - }), - json_config_rule_set( - '/routing_policy/bgp_defined_set[{:s}_rt_export][route-target:{:s}]'.format( - network_instance_name, bgp_route_target), { - 'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name), - 'ext_community_member' : 'route-target:{:s}'.format(bgp_route_target), - }), - json_config_rule_set( - '/routing_policy/policy_definition[{:s}_export]'.format(network_instance_name), { - 'policy_name': '{:s}_export'.format(network_instance_name), - }), - json_config_rule_set( - '/routing_policy/policy_definition[{:s}_export]/statement[{:s}]'.format( - network_instance_name, '3'), { - 'policy_name': '{:s}_export'.format(network_instance_name), 'statement_name': '3', - 'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name), - 'match_set_options': 'ANY', 'policy_result': 'ACCEPT_ROUTE', - }), - json_config_rule_set( - # pylint: disable=duplicate-string-formatting-argument - '/network_instance[{:s}]/inter_instance_policies[{:s}_export]'.format( - network_instance_name, network_instance_name), { - 'name': network_instance_name, 'export_policy': '{:s}_export'.format(network_instance_name), - }), - ]) - self.__device_client.ConfigureDevice(Device(**json_device)) + json_config_rules = setup_config_rules( + service_uuid, connection_uuid, device_uuid, endpoint_uuid, settings, endpoint_settings) + + device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) + for json_config_rule in json_config_rules: + device.device_config.config_rules.append(ConfigRule(**json_config_rule)) + self.__task_executor.configure_device(device) results.append(True) except Exception as e: # pylint: disable=broad-except LOGGER.exception('Unable to SetEndpoint({:s})'.format(str(endpoint))) @@ -202,127 +79,32 @@ class L2NMEmulatedServiceHandler(_ServiceHandler): return results - def DeleteEndpoint(self, endpoints : List[Tuple[str, str, Optional[str]]]) -> List[Union[bool, Exception]]: + def DeleteEndpoint( + self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None + ) -> List[Union[bool, Exception]]: chk_type('endpoints', endpoints, list) if len(endpoints) == 0: return [] - service_uuid = self.__db_service.service_uuid - service_short_uuid = service_uuid.split('-')[-1] - network_instance_name = '{:s}-NetInst'.format(service_short_uuid) - + service_uuid = self.__service.service_uuid.uuid settings : TreeNode = get_subnode(self.__resolver, self.__config, '/settings', None) - if settings is None: raise Exception('Unable to retrieve service settings') - json_settings : Dict = settings.value - bgp_route_target = json_settings.get('bgp_route_target', '0:0') # 65000:333 results = [] for endpoint in endpoints: try: chk_type('endpoint', endpoint, (tuple, list)) chk_length('endpoint', endpoint, min_length=2, max_length=3) - if len(endpoint) == 2: - device_uuid, endpoint_uuid = endpoint - else: - device_uuid, endpoint_uuid, _ = endpoint # ignore topology_uuid by now + device_uuid, endpoint_uuid = endpoint[0:2] # ignore topology_uuid by now endpoint_settings_uri = '/device[{:s}]/endpoint[{:s}]/settings'.format(device_uuid, endpoint_uuid) endpoint_settings : TreeNode = get_subnode(self.__resolver, self.__config, endpoint_settings_uri, None) - if endpoint_settings is None: - raise Exception('Unable to retrieve service settings for endpoint({:s})'.format( - str(endpoint_settings_uri))) - json_endpoint_settings : Dict = endpoint_settings.value - sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0 ) # 1 - vlan_id = json_endpoint_settings.get('vlan_id', 1 ) # 400 - if_subif_name = '{:s}.{:d}'.format(endpoint_uuid, vlan_id) - db_device : DeviceModel = get_object(self.__database, DeviceModel, device_uuid, raise_if_not_found=True) - json_device = db_device.dump(include_config_rules=False, include_drivers=True, include_endpoints=True) - json_device_config : Dict = json_device.setdefault('device_config', {}) - json_device_config_rules : List = json_device_config.setdefault('config_rules', []) - json_device_config_rules.extend([ - json_config_rule_delete( - '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_subif_name), { - 'name': network_instance_name, 'id': if_subif_name, - }), - json_config_rule_delete( - '/interface[{:s}]/subinterface[{:d}]'.format(endpoint_uuid, sub_interface_index), { - 'name': endpoint_uuid, 'index': sub_interface_index, - }), - json_config_rule_delete( - '/interface[{:s}]'.format(endpoint_uuid), { - 'name': endpoint_uuid, - }), - json_config_rule_delete( - '/network_instance[{:s}]/table_connections[DIRECTLY_CONNECTED][BGP][IPV4]'.format( - network_instance_name), { - 'name': network_instance_name, 'src_protocol': 'DIRECTLY_CONNECTED', 'dst_protocol': 'BGP', - 'address_family': 'IPV4', - }), - json_config_rule_delete( - '/network_instance[{:s}]/table_connections[STATIC][BGP][IPV4]'.format(network_instance_name), { - 'name': network_instance_name, 'src_protocol': 'STATIC', 'dst_protocol': 'BGP', - 'address_family': 'IPV4', - }), - json_config_rule_delete( - '/network_instance[{:s}]/protocols[BGP]'.format(network_instance_name), { - 'name': network_instance_name, 'identifier': 'BGP', 'protocol_name': 'BGP', - }), - json_config_rule_delete( - # pylint: disable=duplicate-string-formatting-argument - '/network_instance[{:s}]/inter_instance_policies[{:s}_import]'.format( - network_instance_name, network_instance_name), { - 'name': network_instance_name, - }), - json_config_rule_delete( - '/routing_policy/policy_definition[{:s}_import]/statement[{:s}]'.format( - network_instance_name, '3'), { - 'policy_name': '{:s}_import'.format(network_instance_name), 'statement_name': '3', - }), - json_config_rule_delete( - '/routing_policy/policy_definition[{:s}_import]'.format(network_instance_name), { - 'policy_name': '{:s}_import'.format(network_instance_name), - }), - json_config_rule_delete( - '/routing_policy/bgp_defined_set[{:s}_rt_import][route-target:{:s}]'.format( - network_instance_name, bgp_route_target), { - 'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name), - 'ext_community_member' : 'route-target:{:s}'.format(bgp_route_target), - }), - json_config_rule_delete( - '/routing_policy/bgp_defined_set[{:s}_rt_import]'.format(network_instance_name), { - 'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name), - }), - json_config_rule_delete( - # pylint: disable=duplicate-string-formatting-argument - '/network_instance[{:s}]/inter_instance_policies[{:s}_export]'.format( - network_instance_name, network_instance_name), { - 'name': network_instance_name, - }), - json_config_rule_delete( - '/routing_policy/policy_definition[{:s}_export]/statement[{:s}]'.format( - network_instance_name, '3'), { - 'policy_name': '{:s}_export'.format(network_instance_name), 'statement_name': '3', - }), - json_config_rule_delete( - '/routing_policy/policy_definition[{:s}_export]'.format(network_instance_name), { - 'policy_name': '{:s}_export'.format(network_instance_name), - }), - json_config_rule_delete( - '/routing_policy/bgp_defined_set[{:s}_rt_export][route-target:{:s}]'.format( - network_instance_name, bgp_route_target), { - 'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name), - 'ext_community_member' : 'route-target:{:s}'.format(bgp_route_target), - }), - json_config_rule_delete( - '/routing_policy/bgp_defined_set[{:s}_rt_export]'.format(network_instance_name), { - 'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name), - }), - json_config_rule_delete( - '/network_instance[{:s}]'.format(network_instance_name), { - 'name': network_instance_name - }), - ]) - self.__device_client.ConfigureDevice(Device(**json_device)) + json_config_rules = teardown_config_rules( + service_uuid, connection_uuid, device_uuid, endpoint_uuid, settings, endpoint_settings) + + device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) + for json_config_rule in json_config_rules: + device.device_config.config_rules.append(ConfigRule(**json_config_rule)) + self.__task_executor.configure_device(device) results.append(True) except Exception as e: # pylint: disable=broad-except LOGGER.exception('Unable to DeleteEndpoint({:s})'.format(str(endpoint))) diff --git a/src/service/service/task_scheduler/ConnectionExpander.py b/src/service/service/task_scheduler/ConnectionExpander.py new file mode 100644 index 000000000..39c91b1ba --- /dev/null +++ b/src/service/service/task_scheduler/ConnectionExpander.py @@ -0,0 +1,66 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict, List, Optional, Tuple +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.proto.context_pb2 import Connection, Empty, EndPointId, Link +from context.client.ContextClient import ContextClient + +class ConnectionExpander: + def __init__(self) -> None: + self.context_client = ContextClient() + self.endpointkey_to_link : Dict[Tuple[str, str], Link] = dict() + self.refresh_links() + + def refresh_links(self) -> None: + links = self.context_client.ListLinks(Empty()) + for link in links.links: + for link_endpoint_id in link.link_endpoint_ids: + device_uuid = link_endpoint_id.device_id.device_uuid.uuid + endpoint_uuid = link_endpoint_id.endpoint_uuid.uuid + endpoint_key = (device_uuid, endpoint_uuid) + self.endpointkey_to_link[endpoint_key] = link + + def get_link_from_endpoint_id(self, endpoint_id : EndPointId, raise_if_not_found : bool = False) -> Optional[Link]: + device_uuid = endpoint_id.device_id.device_uuid.uuid + endpoint_uuid = endpoint_id.endpoint_uuid.uuid + endpoint_key = (device_uuid, endpoint_uuid) + link = self.endpointkey_to_link.get(endpoint_key) + if link is None and raise_if_not_found: + str_endpoint_id = grpc_message_to_json_string(endpoint_id) + raise Exception('Link for Endpoint({:s}) not found'.format(str_endpoint_id)) + return link + + def get_links(self, connection : Connection) -> List[Link]: + path_links = list() + last_link_uuid = None + for endpoint_id in connection.path_hops_endpoint_ids: + link = self.get_link_from_endpoint_id(endpoint_id, raise_if_not_found=True) + link_uuid = link.link_id.link_uuid.uuid + if last_link_uuid is None or last_link_uuid != link_uuid: + path_links.append(link) + last_link_uuid = link_uuid + return path_links + + def get_endpoints_traversed(self, connection : Connection) -> List[EndPointId]: + path_endpoint_ids = list() + last_link_uuid = None + for endpoint_id in connection.path_hops_endpoint_ids: + link = self.get_link_from_endpoint_id(endpoint_id, raise_if_not_found=True) + link_uuid = link.link_id.link_uuid.uuid + if last_link_uuid is None or last_link_uuid != link_uuid: + for link_endpoint_id in link.link_endpoint_ids: + path_endpoint_ids.append(link_endpoint_id) + last_link_uuid = link_uuid + return path_endpoint_ids diff --git a/src/service/service/task_scheduler/TaskExecutor.py b/src/service/service/task_scheduler/TaskExecutor.py new file mode 100644 index 000000000..416e1698f --- /dev/null +++ b/src/service/service/task_scheduler/TaskExecutor.py @@ -0,0 +1,142 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from typing import Any, Dict, Optional, Union +from common.proto.context_pb2 import Connection, ConnectionId, Device, DeviceId, Service, ServiceId +from common.rpc_method_wrapper.ServiceExceptions import NotFoundException +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from service.service.service_handler_api._ServiceHandler import _ServiceHandler +from service.service.service_handler_api.ServiceHandlerFactory import ServiceHandlerFactory, get_service_handler_class +from service.service.tools.ContextGetters import get_connection, get_device, get_service +from service.service.tools.ObjectKeys import get_connection_key, get_device_key, get_service_key + +CacheableObject = Union[Connection, Device, Service] + +class CacheableObjectType(Enum): + CONNECTION = 'connection' + DEVICE = 'device' + SERVICE = 'service' + +class TaskExecutor: + def __init__(self, service_handler_factory : ServiceHandlerFactory) -> None: + self._service_handler_factory = service_handler_factory + self._context_client = ContextClient() + self._device_client = DeviceClient() + self._grpc_objects_cache : Dict[str, CacheableObject] = dict() + + @property + def service_handler_factory(self) -> ServiceHandlerFactory: return self._service_handler_factory + + # ----- Common methods --------------------------------------------------------------------------------------------- + + def _load_grpc_object(self, object_type : CacheableObjectType, object_key : str) -> Optional[CacheableObject]: + object_key = '{:s}:{:s}'.format(object_type.value, object_key) + return self._grpc_objects_cache.get(object_key) + + def _store_grpc_object(self, object_type : CacheableObjectType, object_key : str, grpc_object) -> None: + object_key = '{:s}:{:s}'.format(object_type.value, object_key) + self._grpc_objects_cache[object_key] = grpc_object + + def _delete_grpc_object(self, object_type : CacheableObjectType, object_key : str) -> None: + object_key = '{:s}:{:s}'.format(object_type.value, object_key) + self._grpc_objects_cache.pop(object_key, None) + + def _store_editable_grpc_object( + self, object_type : CacheableObjectType, object_key : str, grpc_class, grpc_ro_object + ) -> Any: + grpc_rw_object = grpc_class() + grpc_rw_object.CopyFrom(grpc_ro_object) + self._store_grpc_object(object_type, object_key, grpc_rw_object) + return grpc_rw_object + + # ----- Connection-related methods --------------------------------------------------------------------------------- + + def get_connection(self, connection_id : ConnectionId) -> Connection: + connection_key = get_connection_key(connection_id) + connection = self._load_grpc_object(CacheableObjectType.CONNECTION, connection_key) + if connection is None: + connection = get_connection(self._context_client, connection_id) + if connection is None: raise NotFoundException('Connection', connection_key) + connection : Connection = self._store_editable_grpc_object( + CacheableObjectType.CONNECTION, connection_key, Connection, connection) + return connection + + def set_connection(self, connection : Connection) -> None: + connection_key = get_connection_key(connection.connection_id) + self._context_client.SetConnection(connection) + self._store_grpc_object(CacheableObjectType.CONNECTION, connection_key, connection) + + def delete_connection(self, connection_id : ConnectionId) -> None: + connection_key = get_connection_key(connection_id) + self._context_client.RemoveConnection(connection_id) + self._delete_grpc_object(CacheableObjectType.CONNECTION, connection_key) + + # ----- Device-related methods ------------------------------------------------------------------------------------- + + def get_device(self, device_id : DeviceId) -> Device: + device_key = get_device_key(device_id) + device = self._load_grpc_object(CacheableObjectType.DEVICE, device_key) + if device is None: + device = get_device(self._context_client, device_id) + if device is None: raise NotFoundException('Device', device_key) + device : Device = self._store_editable_grpc_object( + CacheableObjectType.DEVICE, device_key, Device, device) + return device + + def configure_device(self, device : Device) -> None: + device_key = get_device_key(device.device_id) + self._device_client.ConfigureDevice(device) + self._store_grpc_object(CacheableObjectType.DEVICE, device_key, device) + + def get_devices_from_connection(self, connection : Connection) -> Dict[str, Device]: + devices = dict() + for endpoint_id in connection.path_hops_endpoint_ids: + device = self.get_device(endpoint_id.device_id) + device_uuid = endpoint_id.device_id.device_uuid.uuid + if device is None: raise Exception('Device({:s}) not found'.format(str(device_uuid))) + devices[device_uuid] = device + return devices + + # ----- Service-related methods ------------------------------------------------------------------------------------ + + def get_service(self, service_id : ServiceId) -> Service: + service_key = get_service_key(service_id) + service = self._load_grpc_object(CacheableObjectType.SERVICE, service_key) + if service is None: + service = get_service(self._context_client, service_id) + if service is None: raise NotFoundException('Service', service_key) + service : service = self._store_editable_grpc_object( + CacheableObjectType.SERVICE, service_key, Service, service) + return service + + def set_service(self, service : Service) -> None: + service_key = get_service_key(service.service_id) + self._context_client.SetService(service) + self._store_grpc_object(CacheableObjectType.SERVICE, service_key, service) + + def delete_service(self, service_id : ServiceId) -> None: + service_key = get_service_key(service_id) + self._context_client.RemoveService(service_id) + self._delete_grpc_object(CacheableObjectType.SERVICE, service_key) + + # ----- Service Handler Factory ------------------------------------------------------------------------------------ + + def get_service_handler( + self, connection : Connection, service : Service, **service_handler_settings + ) -> _ServiceHandler: + connection_devices = self.get_devices_from_connection(connection) + service_handler_class = get_service_handler_class(self._service_handler_factory, service, connection_devices) + return service_handler_class(service, self, **service_handler_settings) diff --git a/src/service/service/task_scheduler/TaskScheduler.py b/src/service/service/task_scheduler/TaskScheduler.py new file mode 100644 index 000000000..e5656bd0d --- /dev/null +++ b/src/service/service/task_scheduler/TaskScheduler.py @@ -0,0 +1,179 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import graphlib, logging, queue +from typing import Dict, Tuple +from common.proto.context_pb2 import Connection, ConnectionId, Service, ServiceId, ServiceStatusEnum +from common.proto.pathcomp_pb2 import PathCompReply +from common.tools.grpc.Tools import grpc_message_to_json_string +from context.client.ContextClient import ContextClient +from service.service.service_handler_api.ServiceHandlerFactory import ServiceHandlerFactory +from service.service.tools.ObjectKeys import get_connection_key, get_service_key +from .tasks._Task import _Task +from .tasks.Task_ConnectionConfigure import Task_ConnectionConfigure +from .tasks.Task_ConnectionDeconfigure import Task_ConnectionDeconfigure +from .tasks.Task_ServiceDelete import Task_ServiceDelete +from .tasks.Task_ServiceSetStatus import Task_ServiceSetStatus +from .TaskExecutor import CacheableObjectType, TaskExecutor + +LOGGER = logging.getLogger(__name__) + +class TasksScheduler: + def __init__(self, service_handler_factory : ServiceHandlerFactory) -> None: + self._dag = graphlib.TopologicalSorter() + self._executor = TaskExecutor(service_handler_factory) + self._tasks : Dict[str, _Task] = dict() + self._context_client = ContextClient() + + # ----- Helper methods --------------------------------------------------------------------------------------------- + + def _add_task_if_not_exists(self, task : _Task) -> str: + task_key = task.key + if task_key not in self._tasks: + self._tasks[task_key] = task + return task_key + + def _add_connection_to_executor_cache(self, connection : Connection) -> None: + connection_key = get_connection_key(connection.connection_id) + self._executor._store_editable_grpc_object( + CacheableObjectType.CONNECTION, connection_key, Connection, connection) + + def _add_service_to_executor_cache(self, service : Service) -> None: + service_key = get_service_key(service.service_id) + self._executor._store_editable_grpc_object( + CacheableObjectType.SERVICE, service_key, Service, service) + + # ----- Task & DAG composition methods ----------------------------------------------------------------------------- + + def _service_create(self, service_id : ServiceId) -> Tuple[str, str]: + service_planned_key = self._add_task_if_not_exists(Task_ServiceSetStatus( + self._executor, service_id, ServiceStatusEnum.SERVICESTATUS_PLANNED)) + + service_active_key = self._add_task_if_not_exists(Task_ServiceSetStatus( + self._executor, service_id, ServiceStatusEnum.SERVICESTATUS_ACTIVE)) + + # activating a service requires the service is in planning state + self._dag.add(service_active_key, service_planned_key) + return service_planned_key, service_active_key + + def _service_remove(self, service_id : ServiceId) -> Tuple[str, str]: + service_removing_key = self._add_task_if_not_exists(Task_ServiceSetStatus( + self._executor, service_id, ServiceStatusEnum.SERVICESTATUS_PENDING_REMOVAL)) + + service_delete_key = self._add_task_if_not_exists(Task_ServiceDelete(self._executor, service_id)) + + # deleting a service requires the service is in removing state + self._dag.add(service_delete_key, service_removing_key) + return service_removing_key, service_delete_key + + def _connection_configure(self, connection_id : ConnectionId, service_id : ServiceId) -> str: + connection_configure_key = self._add_task_if_not_exists(Task_ConnectionConfigure( + self._executor, connection_id)) + + # the connection configuration depends on its connection's service being in planning state + service_planned_key = self._add_task_if_not_exists(Task_ServiceSetStatus( + self._executor, service_id, ServiceStatusEnum.SERVICESTATUS_PLANNED)) + self._dag.add(connection_configure_key, service_planned_key) + + # the connection's service depends on the connection configuration to transition to active state + service_active_key = self._add_task_if_not_exists(Task_ServiceSetStatus( + self._executor, service_id, ServiceStatusEnum.SERVICESTATUS_ACTIVE)) + self._dag.add(service_active_key, connection_configure_key) + + return connection_configure_key + + def _connection_deconfigure(self, connection_id : ConnectionId, service_id : ServiceId) -> str: + connection_deconfigure_key = self._add_task_if_not_exists(Task_ConnectionDeconfigure( + self._executor, connection_id)) + + # the connection deconfiguration depends on its connection's service being in removing state + service_pending_removal_key = self._add_task_if_not_exists(Task_ServiceSetStatus( + self._executor, service_id, ServiceStatusEnum.SERVICESTATUS_PENDING_REMOVAL)) + self._dag.add(connection_deconfigure_key, service_pending_removal_key) + + # the connection's service depends on the connection deconfiguration to transition to delete + service_delete_key = self._add_task_if_not_exists(Task_ServiceDelete( + self._executor, service_id)) + self._dag.add(service_delete_key, connection_deconfigure_key) + + return connection_deconfigure_key + + def compose_from_pathcompreply(self, pathcomp_reply : PathCompReply, is_delete : bool = False) -> None: + include_service = self._service_remove if is_delete else self._service_create + include_connection = self._connection_deconfigure if is_delete else self._connection_configure + + for service in pathcomp_reply.services: + include_service(service.service_id) + self._add_service_to_executor_cache(service) + + for connection in pathcomp_reply.connections: + connection_key = include_connection(connection.connection_id, connection.service_id) + self._add_connection_to_executor_cache(connection) + self._executor.get_service(connection.service_id) + for sub_service_id in connection.sub_service_ids: + _,service_key_done = include_service(sub_service_id) + self._executor.get_service(sub_service_id) + self._dag.add(connection_key, service_key_done) + + def compose_from_service(self, service : Service, is_delete : bool = False) -> None: + include_service = self._service_remove if is_delete else self._service_create + include_connection = self._connection_deconfigure if is_delete else self._connection_configure + + pending_items_to_explore = queue.Queue() + pending_items_to_explore.put(service) + + while not pending_items_to_explore.empty(): + item = pending_items_to_explore.get() + + if isinstance(item, Service): + include_service(item.service_id) + self._add_service_to_executor_cache(item) + connections = self._context_client.ListConnections(item.service_id) + for connection in connections: + self._add_connection_to_executor_cache(connection) + pending_items_to_explore.put(connection) + + elif isinstance(item, ServiceId): + include_service(item) + self._executor.get_service(item) + connections = self._context_client.ListConnections(item) + for connection in connections: + self._add_connection_to_executor_cache(connection) + pending_items_to_explore.put(connection) + + elif isinstance(item, Connection): + connection_key = include_connection(item) + self._add_connection_to_executor_cache(connection) + self._executor.get_service(item.service_id) + pending_items_to_explore.put(item.service_id) + for sub_service_id in connection.sub_service_ids: + _,service_key_done = include_service(sub_service_id) + self._executor.get_service(sub_service_id) + self._dag.add(connection_key, service_key_done) + + else: + MSG = 'Unsupported item {:s}({:s})' + raise Exception(MSG.format(type(item).__name__, grpc_message_to_json_string(item))) + + def execute_all(self, dry_run : bool = False) -> None: + ordered_task_keys = list(self._dag.static_order()) + LOGGER.info('ordered_task_keys={:s}'.format(str(ordered_task_keys))) + + results = [] + for task_key in ordered_task_keys: + task = self._tasks.get(task_key) + succeeded = True if dry_run else task.execute() + results.append(succeeded) + + return zip(ordered_task_keys, results) diff --git a/src/service/service/task_scheduler/__init__.py b/src/service/service/task_scheduler/__init__.py new file mode 100644 index 000000000..70bfa5118 --- /dev/null +++ b/src/service/service/task_scheduler/__init__.py @@ -0,0 +1,51 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# TaskScheduler is initialized with a PathComputation Reply or a Service, and it collects/identifies the sub-services, +# sub-connections, and operations associated to them. It discovers and takes care of the inter-dependencies among them, +# and produces an ordered list of tasks to be executed to implement the desired create/delete operation on the service. +# E.g., a service cannot be deleted if connections supporting that service still exist. If these connections are +# supported by sub-services, the connection needs to be torn down before destroying the services. +# +# Internally, it composes a Directed Acyclic Graph (DAG) of dependencies between tasks. Each task performs a specific +# operation on a connection or service. The DAG composition is based on information extracted from a PathComp reply +# and/or interrogating the Context component. +# +# Example: +# A B C +# *---L3---*---L3---* +# *--L0--* *--L0--* +# - L3 service between A and C, depends on L3 connections A-B and B-C. +# - Each L3 connection is supported by an L0 service and its corresponding L0 connection. +# +# Dependency structure: +# service L3:A-C +# connection L3:A-B +# service L0:A-B +# connection L0:A-B +# connection L3:B-C +# service L0:B-C +# connection L0:B-C +# +# Resolution: +# - service.set(L3:A-C, state=PLANNING) +# - service.set(L0:A-B, state=PLANNING) +# - connection.configure(L0:A-B) +# - service.set(L0:A-B, state=ACTIVE) +# - connection.configure(L3:A-B) +# - service.set(L0:B-C, state=PLANNING) +# - connection.configure(L0:B-C) +# - service.set(L0:B-C, state=ACTIVE) +# - connection.configure(L3:B-C) +# - service.set(L3:A-C, state=ACTIVE) diff --git a/src/service/service/task_scheduler/tasks/Task_ConnectionConfigure.py b/src/service/service/task_scheduler/tasks/Task_ConnectionConfigure.py new file mode 100644 index 000000000..ea9692142 --- /dev/null +++ b/src/service/service/task_scheduler/tasks/Task_ConnectionConfigure.py @@ -0,0 +1,63 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from common.proto.context_pb2 import ConnectionId +from common.rpc_method_wrapper.ServiceExceptions import OperationFailedException +from common.tools.grpc.Tools import grpc_message_to_json_string +from service.service.service_handler_api.Tools import check_errors_setendpoint +from service.service.task_scheduler.ConnectionExpander import ConnectionExpander +from service.service.task_scheduler.TaskExecutor import TaskExecutor +from service.service.tools.EndpointIdFormatters import endpointids_to_raw +from service.service.tools.ObjectKeys import get_connection_key +from ._Task import _Task + +KEY_TEMPLATE = 'connection({connection_id:s}):configure' + +class Task_ConnectionConfigure(_Task): + def __init__(self, task_executor : TaskExecutor, connection_id : ConnectionId) -> None: + super().__init__(task_executor) + self._connection_id = connection_id + + @property + def connection_id(self) -> ConnectionId: return self._connection_id + + @staticmethod + def build_key(connection_id : ConnectionId) -> str: + str_connection_id = get_connection_key(connection_id) + return KEY_TEMPLATE.format(connection_id=str_connection_id) + + @property + def key(self) -> str: return self.build_key(self._connection_id) + + def execute(self) -> None: + connection = self._task_executor.get_connection(self._connection_id) + service = self._task_executor.get_service(connection.service_id) + + service_handler_settings = {} + service_handler = self._task_executor.get_service_handler(connection, service, **service_handler_settings) + + connection_expander = ConnectionExpander() + traversed_endpoint_ids = connection_expander.get_endpoints_traversed(connection) + endpointids_to_set = endpointids_to_raw(traversed_endpoint_ids) + + connection_uuid = connection.connection_id.connection_uuid.uuid + results_setendpoint = service_handler.SetEndpoint(endpointids_to_set, connection_uuid=connection_uuid) + errors = check_errors_setendpoint(endpointids_to_set, results_setendpoint) + if len(errors) > 0: + MSG = 'SetEndpoint for Connection({:s}) from Service({:s})' + str_connection = grpc_message_to_json_string(connection) + str_service = grpc_message_to_json_string(service) + raise OperationFailedException(MSG.format(str_connection, str_service), extra_details=errors) + + self._task_executor.set_connection(connection) diff --git a/src/service/service/task_scheduler/tasks/Task_ConnectionDeconfigure.py b/src/service/service/task_scheduler/tasks/Task_ConnectionDeconfigure.py new file mode 100644 index 000000000..fc849560e --- /dev/null +++ b/src/service/service/task_scheduler/tasks/Task_ConnectionDeconfigure.py @@ -0,0 +1,63 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from common.proto.context_pb2 import ConnectionId +from common.rpc_method_wrapper.ServiceExceptions import OperationFailedException +from common.tools.grpc.Tools import grpc_message_to_json_string +from service.service.service_handler_api.Tools import check_errors_deleteendpoint +from service.service.task_scheduler.ConnectionExpander import ConnectionExpander +from service.service.task_scheduler.TaskExecutor import TaskExecutor +from service.service.tools.EndpointIdFormatters import endpointids_to_raw +from service.service.tools.ObjectKeys import get_connection_key +from ._Task import _Task + +KEY_TEMPLATE = 'connection({connection_id:s}):deconfigure' + +class Task_ConnectionDeconfigure(_Task): + def __init__(self, task_executor : TaskExecutor, connection_id : ConnectionId) -> None: + super().__init__(task_executor) + self._connection_id = connection_id + + @property + def connection_id(self) -> ConnectionId: return self._connection_id + + @staticmethod + def build_key(connection_id : ConnectionId) -> str: + str_connection_id = get_connection_key(connection_id) + return KEY_TEMPLATE.format(connection_id=str_connection_id) + + @property + def key(self) -> str: return self.build_key(self._connection_id) + + def execute(self) -> None: + connection = self._task_executor.get_connection(self._connection_id) + service = self._task_executor.get_service(connection.service_id) + + service_handler_settings = {} + service_handler = self._task_executor.get_service_handler(connection, service, **service_handler_settings) + + connection_expander = ConnectionExpander() + traversed_endpoint_ids = connection_expander.get_endpoints_traversed(connection) + endpointids_to_delete = endpointids_to_raw(traversed_endpoint_ids) + + connection_uuid = connection.connection_id.connection_uuid.uuid + results_deleteendpoint = service_handler.DeleteEndpoint(endpointids_to_delete, connection_uuid=connection_uuid) + errors = check_errors_deleteendpoint(endpointids_to_delete, results_deleteendpoint) + if len(errors) > 0: + MSG = 'DeleteEndpoint for Connection({:s}) from Service({:s})' + str_connection = grpc_message_to_json_string(connection) + str_service = grpc_message_to_json_string(service) + raise OperationFailedException(MSG.format(str_connection, str_service), extra_details=errors) + + self._task_executor.delete_connection(self._connection_id) diff --git a/src/service/service/task_scheduler/tasks/Task_ServiceDelete.py b/src/service/service/task_scheduler/tasks/Task_ServiceDelete.py new file mode 100644 index 000000000..15da1ffed --- /dev/null +++ b/src/service/service/task_scheduler/tasks/Task_ServiceDelete.py @@ -0,0 +1,39 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from common.proto.context_pb2 import ServiceId +from service.service.task_scheduler.TaskExecutor import TaskExecutor +from service.service.tools.ObjectKeys import get_service_key +from ._Task import _Task + +KEY_TEMPLATE = 'service({service_id:s}):delete' + +class Task_ServiceDelete(_Task): + def __init__(self, task_executor : TaskExecutor, service_id : ServiceId) -> None: + super().__init__(task_executor) + self._service_id = service_id + + @property + def service_id(self) -> ServiceId: return self._service_id + + @staticmethod + def build_key(service_id : ServiceId) -> str: + str_service_id = get_service_key(service_id) + return KEY_TEMPLATE.format(service_id=str_service_id) + + @property + def key(self) -> str: return self.build_key(self._service_id) + + def execute(self) -> None: + self._task_executor.delete_service(self._service_id) diff --git a/src/service/service/task_scheduler/tasks/Task_ServiceSetStatus.py b/src/service/service/task_scheduler/tasks/Task_ServiceSetStatus.py new file mode 100644 index 000000000..163954f1b --- /dev/null +++ b/src/service/service/task_scheduler/tasks/Task_ServiceSetStatus.py @@ -0,0 +1,46 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from common.proto.context_pb2 import ServiceId, ServiceStatusEnum +from service.service.task_scheduler.TaskExecutor import TaskExecutor +from service.service.tools.ObjectKeys import get_service_key +from ._Task import _Task + +KEY_TEMPLATE = 'service({service_id:s}):set_status({new_status:s})' + +class Task_ServiceSetStatus(_Task): + def __init__(self, task_executor : TaskExecutor, service_id : ServiceId, new_status : ServiceStatusEnum) -> None: + super().__init__(task_executor) + self._service_id = service_id + self._new_status = new_status + + @property + def service_id(self) -> ServiceId: return self._service_id + + @property + def new_status(self) -> ServiceStatusEnum: return self._new_status + + @staticmethod + def build_key(service_id : ServiceId, new_status : ServiceStatusEnum) -> str: + str_service_id = get_service_key(service_id) + str_new_status = ServiceStatusEnum.Name(new_status) + return KEY_TEMPLATE.format(service_id=str_service_id, new_status=str_new_status) + + @property + def key(self) -> str: return self.build_key(self._service_id, self._new_status) + + def execute(self) -> None: + service = self._task_executor.get_service(self._service_id) + service.service_status.service_status = self._new_status + self._task_executor.set_service(service) diff --git a/src/service/service/task_scheduler/tasks/_Task.py b/src/service/service/task_scheduler/tasks/_Task.py new file mode 100644 index 000000000..c36f92973 --- /dev/null +++ b/src/service/service/task_scheduler/tasks/_Task.py @@ -0,0 +1,30 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from service.service.task_scheduler.TaskExecutor import TaskExecutor + +class _Task: + def __init__(self, task_executor : TaskExecutor) -> None: + self._task_executor = task_executor + + @staticmethod + def build_key() -> str: + raise NotImplementedError('Task:build_key() not implemented') + + @property + def key(self) -> str: + raise NotImplementedError('Task:key() not implemented') + + def execute(self) -> bool: + raise NotImplementedError('Task:execute() not implemented') diff --git a/src/service/service/task_scheduler/tasks/__init__.py b/src/service/service/task_scheduler/tasks/__init__.py new file mode 100644 index 000000000..70a332512 --- /dev/null +++ b/src/service/service/task_scheduler/tasks/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/service/service/tools/ContextGetters.py b/src/service/service/tools/ContextGetters.py new file mode 100644 index 000000000..79ccf956b --- /dev/null +++ b/src/service/service/tools/ContextGetters.py @@ -0,0 +1,42 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import grpc +from typing import Optional +from common.proto.context_pb2 import Connection, ConnectionId, Device, DeviceId, Service, ServiceId +from context.client.ContextClient import ContextClient + +def get_connection(context_client : ContextClient, connection_id : ConnectionId) -> Optional[Connection]: + try: + connection : Connection = context_client.GetConnection(connection_id) + return connection + except grpc.RpcError as e: + if e.code() != grpc.StatusCode.NOT_FOUND: raise # pylint: disable=no-member + return None + +def get_device(context_client : ContextClient, device_id : DeviceId) -> Optional[Device]: + try: + device : Device = context_client.GetDevice(device_id) + return device + except grpc.RpcError as e: + if e.code() != grpc.StatusCode.NOT_FOUND: raise # pylint: disable=no-member + return None + +def get_service(context_client : ContextClient, service_id : ServiceId) -> Optional[Service]: + try: + service : Service = context_client.GetService(service_id) + return service + except grpc.RpcError as e: + if e.code() != grpc.StatusCode.NOT_FOUND: raise # pylint: disable=no-member + return None diff --git a/src/service/service/tools/EndpointIdFormatters.py b/src/service/service/tools/EndpointIdFormatters.py new file mode 100644 index 000000000..2435df42c --- /dev/null +++ b/src/service/service/tools/EndpointIdFormatters.py @@ -0,0 +1,27 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import List, Optional, Tuple +from common.proto.context_pb2 import EndPointId + +def endpointids_to_raw(traversed_endpoint_ids : List[EndPointId]) -> List[Tuple[str, str, Optional[str]]]: + raw_endpoint_ids : List[Tuple[str, str, Optional[str]]] = [] + for endpoint_id in traversed_endpoint_ids: + device_uuid = endpoint_id.device_id.device_uuid.uuid + endpoint_uuid = endpoint_id.endpoint_uuid.uuid + topology_uuid = endpoint_id.topology_id.topology_uuid.uuid + if len(topology_uuid) == 0: topology_uuid = None + endpoint_id_tuple = device_uuid, endpoint_uuid, topology_uuid + raw_endpoint_ids.append(endpoint_id_tuple) + return raw_endpoint_ids diff --git a/src/service/service/tools/ObjectKeys.py b/src/service/service/tools/ObjectKeys.py new file mode 100644 index 000000000..e58d8bd3e --- /dev/null +++ b/src/service/service/tools/ObjectKeys.py @@ -0,0 +1,26 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from common.proto.context_pb2 import ConnectionId, DeviceId, ServiceId + +def get_connection_key(connection_id : ConnectionId) -> str: + return connection_id.connection_uuid.uuid + +def get_device_key(device_id : DeviceId) -> str: + return device_id.device_uuid.uuid + +def get_service_key(service_id : ServiceId) -> str: + context_uuid = service_id.context_id.context_uuid.uuid + service_uuid = service_id.service_uuid.uuid + return '{:s}/{:s}'.format(context_uuid, service_uuid) diff --git a/src/service/service/tools/__init__.py b/src/service/service/tools/__init__.py new file mode 100644 index 000000000..70a332512 --- /dev/null +++ b/src/service/service/tools/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/service/tests/test_unitary_dependency_resolver.py b/src/service/tests/test_unitary_task_scheduler.py similarity index 65% rename from src/service/tests/test_unitary_dependency_resolver.py rename to src/service/tests/test_unitary_task_scheduler.py index 1dd70ba7e..020386d76 100644 --- a/src/service/tests/test_unitary_dependency_resolver.py +++ b/src/service/tests/test_unitary_task_scheduler.py @@ -12,16 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging, operator -from common.proto.context_pb2 import Connection, Service +import logging +#from common.proto.context_pb2 import Connection, Service from common.proto.pathcomp_pb2 import PathCompReply from common.tools.grpc.Tools import grpc_message_to_json_string -from service.service.DependencyResolver import resolve_dependencies +from service.service.service_handler_api.ServiceHandlerFactory import ServiceHandlerFactory +from service.service.task_scheduler.TaskScheduler import TasksScheduler +from .PrepareTestScenario import context_client # pylint: disable=unused-import LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) -def test_dependency_resolver(): +def test_task_scheduler(): # test: add services and connections that depend on each other # then, check if they are properly resolved. # - service MAIN, depends on connection PKT-1, TAPI, and PKT-2 @@ -68,31 +70,27 @@ def test_dependency_resolver(): connection_tapi.sub_service_ids.append(service_tapi2.service_id) LOGGER.info('pathcomp_reply={:s}'.format(grpc_message_to_json_string(pathcomp_reply))) - resolution = resolve_dependencies(pathcomp_reply) - LOGGER.info('resolution={:s}'.format(str(list(map(operator.itemgetter(0), resolution))))) - - CORRECT_RESOLUTION_KEYS = [ - ('connection', 'PKT-1' ), - ('connection', 'PKT-2' ), - ('connection', 'TAPI-1' ), - ('connection', 'TAPI-2' ), - ('service' , 'admin/TAPI-1'), - ('service' , 'admin/TAPI-2'), - ('connection', 'TAPI' ), - ('service' , 'admin/MAIN' ), + + service_handler_factory = ServiceHandlerFactory([]) + task_scheduler = TasksScheduler(service_handler_factory) + task_scheduler.compose_from_pathcompreply(pathcomp_reply) + tasks_and_results = list(task_scheduler.execute_all(dry_run=True)) + + LOGGER.info('tasks_and_results={:s}'.format(str(tasks_and_results))) + + CORRECT_ORDERED_TASK_KEYS = [ + 'service(admin/MAIN):set_status(SERVICESTATUS_PLANNED)', + 'service(admin/TAPI-1):set_status(SERVICESTATUS_PLANNED)', + 'service(admin/TAPI-2):set_status(SERVICESTATUS_PLANNED)', + 'connection(PKT-1):configure', + 'connection(PKT-2):configure', + 'connection(TAPI-1):configure', + 'connection(TAPI-2):configure', + 'service(admin/TAPI-1):set_status(SERVICESTATUS_ACTIVE)', + 'service(admin/TAPI-2):set_status(SERVICESTATUS_ACTIVE)', + 'connection(TAPI):configure', + 'service(admin/MAIN):set_status(SERVICESTATUS_ACTIVE)' ] - for (resolved_key,(resolved_objid, resolved_obj)),correct_key in zip(resolution, CORRECT_RESOLUTION_KEYS): - assert resolved_key == correct_key - assert resolved_obj is not None - if resolved_key[0] == 'connection': - assert isinstance(resolved_obj, Connection) - assert resolved_objid == resolved_obj.connection_id - connection_key = resolved_obj.connection_id.connection_uuid.uuid - assert resolved_key[1] == connection_key - elif resolved_key[0] == 'service': - assert isinstance(resolved_obj, Service) - assert resolved_objid == resolved_obj.service_id - context_uuid = resolved_obj.service_id.context_id.context_uuid.uuid - service_uuid = resolved_obj.service_id.service_uuid.uuid - service_key = '/'.join([context_uuid, service_uuid]) - assert resolved_key[1] == service_key + + for (task_key,_),correct_key in zip(tasks_and_results, CORRECT_ORDERED_TASK_KEYS): + assert task_key == correct_key -- GitLab From af8fd343579836ee52512510ef3db9e960d75c7f Mon Sep 17 00:00:00 2001 From: Lluis Gifre Date: Sun, 11 Sep 2022 16:41:38 +0000 Subject: [PATCH 27/91] ECOC'22 test: - Renamed OldBigNet scenario to BigNet - Updated JSON descriptor builder scripts - Generated descriptor files for scenario - Updated definition of functional tests --- .../ecoc22/descriptors_emulated-BigNet.json | 1299 +++++++++++++++++ .../descriptors_emulated-DC_CSGW_TN.json | 1005 +++++++++++++ .../descriptors_emulated-DC_CSGW_TN_OLS.json | 985 +++++++++++++ src/tests/ecoc22/tests/BuildDescriptors.py | 48 +- src/tests/ecoc22/tests/Fixtures.py | 2 + ...Objects_OldBigNet.py => Objects_BigNet.py} | 1 - src/tests/ecoc22/tests/Objects_DC_CSGW_TN.py | 18 +- .../ecoc22/tests/Objects_DC_CSGW_TN_OLS.py | 25 +- .../ecoc22/tests/test_functional_bootstrap.py | 5 +- .../ecoc22/tests/test_functional_cleanup.py | 3 +- .../tests/test_functional_create_service.py | 4 + .../tests/test_functional_delete_service.py | 9 +- 12 files changed, 3375 insertions(+), 29 deletions(-) create mode 100644 src/tests/ecoc22/descriptors_emulated-BigNet.json create mode 100644 src/tests/ecoc22/descriptors_emulated-DC_CSGW_TN.json create mode 100644 src/tests/ecoc22/descriptors_emulated-DC_CSGW_TN_OLS.json rename src/tests/ecoc22/tests/{Objects_OldBigNet.py => Objects_BigNet.py} (99%) diff --git a/src/tests/ecoc22/descriptors_emulated-BigNet.json b/src/tests/ecoc22/descriptors_emulated-BigNet.json new file mode 100644 index 000000000..cd0382694 --- /dev/null +++ b/src/tests/ecoc22/descriptors_emulated-BigNet.json @@ -0,0 +1,1299 @@ +{ + "contexts": [ + { + "context_id": { + "context_uuid": { + "uuid": "admin" + } + }, + "service_ids": [], + "topology_ids": [] + } + ], + "devices": [ + { + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}]}" + } + } + ] + }, + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_id": { + "device_uuid": { + "uuid": "CE1" + } + }, + "device_operational_status": 1, + "device_type": "emu-packet-router" + }, + { + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}]}" + } + } + ] + }, + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_id": { + "device_uuid": { + "uuid": "CE2" + } + }, + "device_operational_status": 1, + "device_type": "emu-packet-router" + }, + { + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}]}" + } + } + ] + }, + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_id": { + "device_uuid": { + "uuid": "CE3" + } + }, + "device_operational_status": 1, + "device_type": "emu-packet-router" + }, + { + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}]}" + } + } + ] + }, + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_id": { + "device_uuid": { + "uuid": "CE4" + } + }, + "device_operational_status": 1, + "device_type": "emu-packet-router" + }, + { + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/2\"}]}" + } + } + ] + }, + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_id": { + "device_uuid": { + "uuid": "PE1" + } + }, + "device_operational_status": 1, + "device_type": "emu-packet-router" + }, + { + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/2\"}]}" + } + } + ] + }, + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_id": { + "device_uuid": { + "uuid": "PE2" + } + }, + "device_operational_status": 1, + "device_type": "emu-packet-router" + }, + { + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/2\"}]}" + } + } + ] + }, + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_id": { + "device_uuid": { + "uuid": "PE3" + } + }, + "device_operational_status": 1, + "device_type": "emu-packet-router" + }, + { + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/2\"}]}" + } + } + ] + }, + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_id": { + "device_uuid": { + "uuid": "PE4" + } + }, + "device_operational_status": 1, + "device_type": "emu-packet-router" + }, + { + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/3\"}]}" + } + } + ] + }, + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_id": { + "device_uuid": { + "uuid": "BB1" + } + }, + "device_operational_status": 1, + "device_type": "emu-packet-router" + }, + { + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/3\"}]}" + } + } + ] + }, + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_id": { + "device_uuid": { + "uuid": "BB2" + } + }, + "device_operational_status": 1, + "device_type": "emu-packet-router" + }, + { + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/3\"}]}" + } + } + ] + }, + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_id": { + "device_uuid": { + "uuid": "BB6" + } + }, + "device_operational_status": 1, + "device_type": "emu-packet-router" + }, + { + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/3\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/4\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/5\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/6\"}]}" + } + } + ] + }, + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_id": { + "device_uuid": { + "uuid": "BB7" + } + }, + "device_operational_status": 1, + "device_type": "emu-packet-router" + }, + { + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/3\"}]}" + } + } + ] + }, + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_id": { + "device_uuid": { + "uuid": "BB3" + } + }, + "device_operational_status": 1, + "device_type": "emu-packet-router" + }, + { + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/3\"}]}" + } + } + ] + }, + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_id": { + "device_uuid": { + "uuid": "BB5" + } + }, + "device_operational_status": 1, + "device_type": "emu-packet-router" + }, + { + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/3\"}]}" + } + } + ] + }, + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_id": { + "device_uuid": { + "uuid": "BB4" + } + }, + "device_operational_status": 1, + "device_type": "emu-packet-router" + } + ], + "links": [ + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "CE1" + } + }, + "endpoint_uuid": { + "uuid": "1/1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "PE1" + } + }, + "endpoint_uuid": { + "uuid": "1/1" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "CE1/1/1==CE1/1/1" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "CE2" + } + }, + "endpoint_uuid": { + "uuid": "1/1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "PE2" + } + }, + "endpoint_uuid": { + "uuid": "1/1" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "CE2/1/1==CE2/1/1" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "CE3" + } + }, + "endpoint_uuid": { + "uuid": "1/1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "PE3" + } + }, + "endpoint_uuid": { + "uuid": "1/1" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "CE3/1/1==CE3/1/1" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "CE4" + } + }, + "endpoint_uuid": { + "uuid": "1/1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "PE4" + } + }, + "endpoint_uuid": { + "uuid": "1/1" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "CE4/1/1==CE4/1/1" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "PE1" + } + }, + "endpoint_uuid": { + "uuid": "2/1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "BB1" + } + }, + "endpoint_uuid": { + "uuid": "1/1" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "PE1/2/1==PE1/1/1" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "PE1" + } + }, + "endpoint_uuid": { + "uuid": "2/2" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "BB2" + } + }, + "endpoint_uuid": { + "uuid": "1/1" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "PE1/2/2==PE1/1/1" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "PE2" + } + }, + "endpoint_uuid": { + "uuid": "2/1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "BB1" + } + }, + "endpoint_uuid": { + "uuid": "1/2" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "PE2/2/1==PE2/1/2" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "PE2" + } + }, + "endpoint_uuid": { + "uuid": "2/2" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "BB2" + } + }, + "endpoint_uuid": { + "uuid": "1/2" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "PE2/2/2==PE2/1/2" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "PE3" + } + }, + "endpoint_uuid": { + "uuid": "2/2" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "BB5" + } + }, + "endpoint_uuid": { + "uuid": "1/1" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "PE3/2/2==PE3/1/1" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "PE3" + } + }, + "endpoint_uuid": { + "uuid": "2/1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "BB4" + } + }, + "endpoint_uuid": { + "uuid": "1/1" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "PE3/2/1==PE3/1/1" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "PE4" + } + }, + "endpoint_uuid": { + "uuid": "2/2" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "BB5" + } + }, + "endpoint_uuid": { + "uuid": "1/2" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "PE4/2/2==PE4/1/2" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "PE4" + } + }, + "endpoint_uuid": { + "uuid": "2/1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "BB4" + } + }, + "endpoint_uuid": { + "uuid": "1/2" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "PE4/2/1==PE4/1/2" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "BB1" + } + }, + "endpoint_uuid": { + "uuid": "2/1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "BB2" + } + }, + "endpoint_uuid": { + "uuid": "2/2" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "BB1/2/1==BB1/2/2" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "BB2" + } + }, + "endpoint_uuid": { + "uuid": "2/1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "BB3" + } + }, + "endpoint_uuid": { + "uuid": "2/2" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "BB2/2/1==BB2/2/2" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "BB3" + } + }, + "endpoint_uuid": { + "uuid": "2/1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "BB4" + } + }, + "endpoint_uuid": { + "uuid": "2/2" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "BB3/2/1==BB3/2/2" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "BB4" + } + }, + "endpoint_uuid": { + "uuid": "2/1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "BB5" + } + }, + "endpoint_uuid": { + "uuid": "2/2" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "BB4/2/1==BB4/2/2" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "BB5" + } + }, + "endpoint_uuid": { + "uuid": "2/1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "BB6" + } + }, + "endpoint_uuid": { + "uuid": "2/2" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "BB5/2/1==BB5/2/2" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "BB6" + } + }, + "endpoint_uuid": { + "uuid": "2/1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "BB1" + } + }, + "endpoint_uuid": { + "uuid": "2/2" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "BB6/2/1==BB6/2/2" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "BB1" + } + }, + "endpoint_uuid": { + "uuid": "2/3" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "BB7" + } + }, + "endpoint_uuid": { + "uuid": "2/1" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "BB1/2/3==BB1/2/1" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "BB2" + } + }, + "endpoint_uuid": { + "uuid": "2/3" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "BB7" + } + }, + "endpoint_uuid": { + "uuid": "2/2" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "BB2/2/3==BB2/2/2" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "BB3" + } + }, + "endpoint_uuid": { + "uuid": "2/3" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "BB7" + } + }, + "endpoint_uuid": { + "uuid": "2/3" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "BB3/2/3==BB3/2/3" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "BB4" + } + }, + "endpoint_uuid": { + "uuid": "2/3" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "BB7" + } + }, + "endpoint_uuid": { + "uuid": "2/4" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "BB4/2/3==BB4/2/4" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "BB5" + } + }, + "endpoint_uuid": { + "uuid": "2/3" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "BB7" + } + }, + "endpoint_uuid": { + "uuid": "2/5" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "BB5/2/3==BB5/2/5" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "BB6" + } + }, + "endpoint_uuid": { + "uuid": "2/3" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "BB7" + } + }, + "endpoint_uuid": { + "uuid": "2/6" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "BB6/2/3==BB6/2/6" + } + } + } + ], + "topologies": [ + { + "device_ids": [], + "link_ids": [], + "topology_id": { + "context_id": { + "context_uuid": { + "uuid": "admin" + } + }, + "topology_uuid": { + "uuid": "admin" + } + } + } + ] +} \ No newline at end of file diff --git a/src/tests/ecoc22/descriptors_emulated-DC_CSGW_TN.json b/src/tests/ecoc22/descriptors_emulated-DC_CSGW_TN.json new file mode 100644 index 000000000..5f40edac2 --- /dev/null +++ b/src/tests/ecoc22/descriptors_emulated-DC_CSGW_TN.json @@ -0,0 +1,1005 @@ +{ + "contexts": [ + { + "context_id": { + "context_uuid": { + "uuid": "admin" + } + }, + "service_ids": [], + "topology_ids": [] + } + ], + "devices": [ + { + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"eth1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"eth2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"int\"}]}" + } + } + ] + }, + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_id": { + "device_uuid": { + "uuid": "DC1-GW" + } + }, + "device_operational_status": 1, + "device_type": "emu-datacenter" + }, + { + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"eth1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"eth2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"int\"}]}" + } + } + ] + }, + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_id": { + "device_uuid": { + "uuid": "DC2-GW" + } + }, + "device_operational_status": 1, + "device_type": "emu-datacenter" + }, + { + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"10/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}]}" + } + } + ] + }, + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_id": { + "device_uuid": { + "uuid": "CS1-GW1" + } + }, + "device_operational_status": 1, + "device_type": "emu-packet-router" + }, + { + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"10/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}]}" + } + } + ] + }, + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_id": { + "device_uuid": { + "uuid": "CS1-GW2" + } + }, + "device_operational_status": 1, + "device_type": "emu-packet-router" + }, + { + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"10/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}]}" + } + } + ] + }, + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_id": { + "device_uuid": { + "uuid": "CS2-GW1" + } + }, + "device_operational_status": 1, + "device_type": "emu-packet-router" + }, + { + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"10/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}]}" + } + } + ] + }, + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_id": { + "device_uuid": { + "uuid": "CS2-GW2" + } + }, + "device_operational_status": 1, + "device_type": "emu-packet-router" + }, + { + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/3\"}]}" + } + } + ] + }, + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_id": { + "device_uuid": { + "uuid": "TN-R1" + } + }, + "device_operational_status": 1, + "device_type": "emu-packet-router" + }, + { + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/3\"}]}" + } + } + ] + }, + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_id": { + "device_uuid": { + "uuid": "TN-R2" + } + }, + "device_operational_status": 1, + "device_type": "emu-packet-router" + }, + { + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/3\"}]}" + } + } + ] + }, + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_id": { + "device_uuid": { + "uuid": "TN-R3" + } + }, + "device_operational_status": 1, + "device_type": "emu-packet-router" + }, + { + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/3\"}]}" + } + } + ] + }, + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_id": { + "device_uuid": { + "uuid": "TN-R4" + } + }, + "device_operational_status": 1, + "device_type": "emu-packet-router" + } + ], + "links": [ + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "DC1-GW" + } + }, + "endpoint_uuid": { + "uuid": "eth1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "CS1-GW1" + } + }, + "endpoint_uuid": { + "uuid": "10/1" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "DC1-GW/eth1==CS1-GW1/10/1" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "DC1-GW" + } + }, + "endpoint_uuid": { + "uuid": "eth2" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "CS1-GW2" + } + }, + "endpoint_uuid": { + "uuid": "10/1" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "DC1-GW/eth2==CS1-GW2/10/1" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "DC2-GW" + } + }, + "endpoint_uuid": { + "uuid": "eth1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "CS2-GW1" + } + }, + "endpoint_uuid": { + "uuid": "10/1" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "DC2-GW/eth1==CS2-GW1/10/1" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "DC2-GW" + } + }, + "endpoint_uuid": { + "uuid": "eth2" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "CS2-GW2" + } + }, + "endpoint_uuid": { + "uuid": "10/1" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "DC2-GW/eth2==CS2-GW2/10/1" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "CS1-GW1" + } + }, + "endpoint_uuid": { + "uuid": "1/1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "TN-R1" + } + }, + "endpoint_uuid": { + "uuid": "1/1" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "CS1-GW1/1/1==TN-R1/1/1" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "CS1-GW2" + } + }, + "endpoint_uuid": { + "uuid": "1/1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "TN-R2" + } + }, + "endpoint_uuid": { + "uuid": "1/1" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "CS1-GW2/1/1==TN-R2/1/1" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "CS1-GW1" + } + }, + "endpoint_uuid": { + "uuid": "1/2" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "TN-R2" + } + }, + "endpoint_uuid": { + "uuid": "1/2" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "CS1-GW1/1/2==TN-R2/1/2" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "CS1-GW2" + } + }, + "endpoint_uuid": { + "uuid": "1/2" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "TN-R1" + } + }, + "endpoint_uuid": { + "uuid": "1/2" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "CS1-GW2/1/2==TN-R1/1/2" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "CS2-GW1" + } + }, + "endpoint_uuid": { + "uuid": "1/1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "TN-R3" + } + }, + "endpoint_uuid": { + "uuid": "1/1" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "CS2-GW1/1/1==TN-R3/1/1" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "CS2-GW2" + } + }, + "endpoint_uuid": { + "uuid": "1/1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "TN-R4" + } + }, + "endpoint_uuid": { + "uuid": "1/1" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "CS2-GW2/1/1==TN-R4/1/1" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "CS2-GW1" + } + }, + "endpoint_uuid": { + "uuid": "1/2" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "TN-R4" + } + }, + "endpoint_uuid": { + "uuid": "1/2" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "CS2-GW1/1/2==TN-R4/1/2" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "CS2-GW2" + } + }, + "endpoint_uuid": { + "uuid": "1/2" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "TN-R3" + } + }, + "endpoint_uuid": { + "uuid": "1/2" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "CS2-GW2/1/2==TN-R3/1/2" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "TN-R1" + } + }, + "endpoint_uuid": { + "uuid": "2/1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "TN-R2" + } + }, + "endpoint_uuid": { + "uuid": "2/2" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "TN-R1/2/1==TN-R2/2/2" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "TN-R2" + } + }, + "endpoint_uuid": { + "uuid": "2/1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "TN-R3" + } + }, + "endpoint_uuid": { + "uuid": "2/2" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "TN-R2/2/1==TN-R3/2/2" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "TN-R3" + } + }, + "endpoint_uuid": { + "uuid": "2/1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "TN-R4" + } + }, + "endpoint_uuid": { + "uuid": "2/2" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "TN-R3/2/1==TN-R4/2/2" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "TN-R4" + } + }, + "endpoint_uuid": { + "uuid": "2/1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "TN-R1" + } + }, + "endpoint_uuid": { + "uuid": "2/2" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "TN-R4/2/1==TN-R1/2/2" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "TN-R1" + } + }, + "endpoint_uuid": { + "uuid": "2/3" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "TN-R3" + } + }, + "endpoint_uuid": { + "uuid": "2/3" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "TN-R1/2/3==TN-R3/2/3" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "TN-R2" + } + }, + "endpoint_uuid": { + "uuid": "2/3" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "TN-R4" + } + }, + "endpoint_uuid": { + "uuid": "2/3" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "TN-R2/2/3==TN-R4/2/3" + } + } + } + ], + "topologies": [ + { + "device_ids": [], + "link_ids": [], + "topology_id": { + "context_id": { + "context_uuid": { + "uuid": "admin" + } + }, + "topology_uuid": { + "uuid": "admin" + } + } + }, + { + "device_ids": [], + "link_ids": [], + "topology_id": { + "context_id": { + "context_uuid": { + "uuid": "admin" + } + }, + "topology_uuid": { + "uuid": "DC1" + } + } + }, + { + "device_ids": [], + "link_ids": [], + "topology_id": { + "context_id": { + "context_uuid": { + "uuid": "admin" + } + }, + "topology_uuid": { + "uuid": "DC2" + } + } + }, + { + "device_ids": [], + "link_ids": [], + "topology_id": { + "context_id": { + "context_uuid": { + "uuid": "admin" + } + }, + "topology_uuid": { + "uuid": "CS1" + } + } + }, + { + "device_ids": [], + "link_ids": [], + "topology_id": { + "context_id": { + "context_uuid": { + "uuid": "admin" + } + }, + "topology_uuid": { + "uuid": "CS2" + } + } + }, + { + "device_ids": [], + "link_ids": [], + "topology_id": { + "context_id": { + "context_uuid": { + "uuid": "admin" + } + }, + "topology_uuid": { + "uuid": "TN" + } + } + } + ] +} \ No newline at end of file diff --git a/src/tests/ecoc22/descriptors_emulated-DC_CSGW_TN_OLS.json b/src/tests/ecoc22/descriptors_emulated-DC_CSGW_TN_OLS.json new file mode 100644 index 000000000..8d8e6fde3 --- /dev/null +++ b/src/tests/ecoc22/descriptors_emulated-DC_CSGW_TN_OLS.json @@ -0,0 +1,985 @@ +{ + "contexts": [ + { + "context_id": { + "context_uuid": { + "uuid": "admin" + } + }, + "service_ids": [], + "topology_ids": [] + } + ], + "devices": [ + { + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"eth1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"eth2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"int\"}]}" + } + } + ] + }, + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_id": { + "device_uuid": { + "uuid": "DC1-GW" + } + }, + "device_operational_status": 1, + "device_type": "emu-datacenter" + }, + { + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"eth1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"eth2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"int\"}]}" + } + } + ] + }, + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_id": { + "device_uuid": { + "uuid": "DC2-GW" + } + }, + "device_operational_status": 1, + "device_type": "emu-datacenter" + }, + { + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"10/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}]}" + } + } + ] + }, + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_id": { + "device_uuid": { + "uuid": "CS1-GW1" + } + }, + "device_operational_status": 1, + "device_type": "emu-packet-router" + }, + { + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"10/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}]}" + } + } + ] + }, + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_id": { + "device_uuid": { + "uuid": "CS1-GW2" + } + }, + "device_operational_status": 1, + "device_type": "emu-packet-router" + }, + { + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"10/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}]}" + } + } + ] + }, + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_id": { + "device_uuid": { + "uuid": "CS2-GW1" + } + }, + "device_operational_status": 1, + "device_type": "emu-packet-router" + }, + { + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"10/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}]}" + } + } + ] + }, + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_id": { + "device_uuid": { + "uuid": "CS2-GW2" + } + }, + "device_operational_status": 1, + "device_type": "emu-packet-router" + }, + { + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/1\"}]}" + } + } + ] + }, + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_id": { + "device_uuid": { + "uuid": "TN-R1" + } + }, + "device_operational_status": 1, + "device_type": "emu-packet-router" + }, + { + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/1\"}]}" + } + } + ] + }, + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_id": { + "device_uuid": { + "uuid": "TN-R2" + } + }, + "device_operational_status": 1, + "device_type": "emu-packet-router" + }, + { + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/1\"}]}" + } + } + ] + }, + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_id": { + "device_uuid": { + "uuid": "TN-R3" + } + }, + "device_operational_status": 1, + "device_type": "emu-packet-router" + }, + { + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/1\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"1/2\"}, {\"sample_types\": [], \"type\": \"copper\", \"uuid\": \"2/1\"}]}" + } + } + ] + }, + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_id": { + "device_uuid": { + "uuid": "TN-R4" + } + }, + "device_operational_status": 1, + "device_type": "emu-packet-router" + }, + { + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "127.0.0.1" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "0" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": "{\"endpoints\": [{\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"a3adcbbcc03f\"}, {\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"9329780033f5\"}, {\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"e8a127ea3ed1\"}, {\"sample_types\": [], \"type\": \"optical\", \"uuid\": \"ef1c58823a49\"}]}" + } + } + ] + }, + "device_drivers": [ + 0 + ], + "device_endpoints": [], + "device_id": { + "device_uuid": { + "uuid": "TN-OLS" + } + }, + "device_operational_status": 1, + "device_type": "emu-open-line-system" + } + ], + "links": [ + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "DC1-GW" + } + }, + "endpoint_uuid": { + "uuid": "eth1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "CS1-GW1" + } + }, + "endpoint_uuid": { + "uuid": "10/1" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "DC1-GW/eth1==CS1-GW1/10/1" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "DC1-GW" + } + }, + "endpoint_uuid": { + "uuid": "eth2" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "CS1-GW2" + } + }, + "endpoint_uuid": { + "uuid": "10/1" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "DC1-GW/eth2==CS1-GW2/10/1" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "DC2-GW" + } + }, + "endpoint_uuid": { + "uuid": "eth1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "CS2-GW1" + } + }, + "endpoint_uuid": { + "uuid": "10/1" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "DC2-GW/eth1==CS2-GW1/10/1" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "DC2-GW" + } + }, + "endpoint_uuid": { + "uuid": "eth2" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "CS2-GW2" + } + }, + "endpoint_uuid": { + "uuid": "10/1" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "DC2-GW/eth2==CS2-GW2/10/1" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "CS1-GW1" + } + }, + "endpoint_uuid": { + "uuid": "1/1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "TN-R1" + } + }, + "endpoint_uuid": { + "uuid": "1/1" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "CS1-GW1/1/1==TN-R1/1/1" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "CS1-GW2" + } + }, + "endpoint_uuid": { + "uuid": "1/1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "TN-R2" + } + }, + "endpoint_uuid": { + "uuid": "1/1" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "CS1-GW2/1/1==TN-R2/1/1" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "CS1-GW1" + } + }, + "endpoint_uuid": { + "uuid": "1/2" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "TN-R2" + } + }, + "endpoint_uuid": { + "uuid": "1/2" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "CS1-GW1/1/2==TN-R2/1/2" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "CS1-GW2" + } + }, + "endpoint_uuid": { + "uuid": "1/2" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "TN-R1" + } + }, + "endpoint_uuid": { + "uuid": "1/2" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "CS1-GW2/1/2==TN-R1/1/2" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "CS2-GW1" + } + }, + "endpoint_uuid": { + "uuid": "1/1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "TN-R3" + } + }, + "endpoint_uuid": { + "uuid": "1/1" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "CS2-GW1/1/1==TN-R3/1/1" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "CS2-GW2" + } + }, + "endpoint_uuid": { + "uuid": "1/1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "TN-R4" + } + }, + "endpoint_uuid": { + "uuid": "1/1" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "CS2-GW2/1/1==TN-R4/1/1" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "CS2-GW1" + } + }, + "endpoint_uuid": { + "uuid": "1/2" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "TN-R4" + } + }, + "endpoint_uuid": { + "uuid": "1/2" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "CS2-GW1/1/2==TN-R4/1/2" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "CS2-GW2" + } + }, + "endpoint_uuid": { + "uuid": "1/2" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "TN-R3" + } + }, + "endpoint_uuid": { + "uuid": "1/2" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "CS2-GW2/1/2==TN-R3/1/2" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "TN-R1" + } + }, + "endpoint_uuid": { + "uuid": "2/1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "TN-OLS" + } + }, + "endpoint_uuid": { + "uuid": "a3adcbbcc03f" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "TN-R1/2/1==TN-OLS/a3adcbbcc03f" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "TN-R2" + } + }, + "endpoint_uuid": { + "uuid": "2/1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "TN-OLS" + } + }, + "endpoint_uuid": { + "uuid": "9329780033f5" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "TN-R2/2/1==TN-OLS/9329780033f5" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "TN-R3" + } + }, + "endpoint_uuid": { + "uuid": "2/1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "TN-OLS" + } + }, + "endpoint_uuid": { + "uuid": "e8a127ea3ed1" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "TN-R3/2/1==TN-OLS/e8a127ea3ed1" + } + } + }, + { + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "TN-R4" + } + }, + "endpoint_uuid": { + "uuid": "2/1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "TN-OLS" + } + }, + "endpoint_uuid": { + "uuid": "ef1c58823a49" + } + } + ], + "link_id": { + "link_uuid": { + "uuid": "TN-R4/2/1==TN-OLS/ef1c58823a49" + } + } + } + ], + "topologies": [ + { + "device_ids": [], + "link_ids": [], + "topology_id": { + "context_id": { + "context_uuid": { + "uuid": "admin" + } + }, + "topology_uuid": { + "uuid": "admin" + } + } + }, + { + "device_ids": [], + "link_ids": [], + "topology_id": { + "context_id": { + "context_uuid": { + "uuid": "admin" + } + }, + "topology_uuid": { + "uuid": "DC1" + } + } + }, + { + "device_ids": [], + "link_ids": [], + "topology_id": { + "context_id": { + "context_uuid": { + "uuid": "admin" + } + }, + "topology_uuid": { + "uuid": "DC2" + } + } + }, + { + "device_ids": [], + "link_ids": [], + "topology_id": { + "context_id": { + "context_uuid": { + "uuid": "admin" + } + }, + "topology_uuid": { + "uuid": "CS1" + } + } + }, + { + "device_ids": [], + "link_ids": [], + "topology_id": { + "context_id": { + "context_uuid": { + "uuid": "admin" + } + }, + "topology_uuid": { + "uuid": "CS2" + } + } + }, + { + "device_ids": [], + "link_ids": [], + "topology_id": { + "context_id": { + "context_uuid": { + "uuid": "admin" + } + }, + "topology_uuid": { + "uuid": "TN" + } + } + } + ] +} \ No newline at end of file diff --git a/src/tests/ecoc22/tests/BuildDescriptors.py b/src/tests/ecoc22/tests/BuildDescriptors.py index fab6f2ceb..b0075c063 100644 --- a/src/tests/ecoc22/tests/BuildDescriptors.py +++ b/src/tests/ecoc22/tests/BuildDescriptors.py @@ -12,15 +12,51 @@ # See the License for the specific language governing permissions and # limitations under the License. -import copy, json, sys -from .Objects_OldBigNet import CONTEXTS, DEVICES, LINKS, TOPOLOGIES +# Execution: +# $ cd src +# $ python -m tests.ecoc22.tests.BuildDescriptors dc-csgw-tn +# $ python -m tests.ecoc22.tests.BuildDescriptors dc-csgw-tn-ols +# $ python -m tests.ecoc22.tests.BuildDescriptors bignet + +import copy, json, os, sys +from enum import Enum +from typing import Dict, Tuple + +class Scenario(Enum): + BIGNET = 'bignet' + DC_CSGW_TN = 'dc-csgw-tn' + DC_CSGW_TN_OLS = 'dc-csgw-tn-ols' + +scenario = None if len(sys.argv) < 2 else sys.argv[1].lower() + +if scenario == Scenario.BIGNET.value: + from .Objects_BigNet import CONTEXTS, DEVICES, LINKS, TOPOLOGIES + FILENAME = 'tests/ecoc22/descriptors_emulated-BigNet.json' +elif scenario == Scenario.DC_CSGW_TN.value: + os.environ['ADD_CONNECT_RULES_TO_DEVICES'] = 'TRUE' + from .Objects_DC_CSGW_TN import CONTEXTS, DEVICES, LINKS, TOPOLOGIES + FILENAME = 'tests/ecoc22/descriptors_emulated-DC_CSGW_TN.json' +elif scenario == Scenario.DC_CSGW_TN_OLS.value: + os.environ['ADD_CONNECT_RULES_TO_DEVICES'] = 'TRUE' + from .Objects_DC_CSGW_TN_OLS import CONTEXTS, DEVICES, LINKS, TOPOLOGIES + FILENAME = 'tests/ecoc22/descriptors_emulated-DC_CSGW_TN_OLS.json' +else: + scenarios = str([s.value for s in Scenario]) + raise Exception('Unsupported Scenario({:s}), choices are: {:s}'.format(scenario, scenarios)) def main(): - with open('tests/ecoc22/descriptors_emulated.json', 'w', encoding='UTF-8') as f: + with open(FILENAME, 'w', encoding='UTF-8') as f: devices = [] - for device,connect_rules in DEVICES: + for item in DEVICES: + if isinstance(item, Dict): + device = item + elif isinstance(item, Tuple) and len(item) == 2: + device,connect_rules = item + else: + raise Exception('Wrongly formatted item: {:s}'.format(str(item))) device = copy.deepcopy(device) - device['device_config']['config_rules'].extend(connect_rules) + if len(item) == 2: + device['device_config']['config_rules'].extend(connect_rules) devices.append(device) f.write(json.dumps({ @@ -28,7 +64,7 @@ def main(): 'topologies': TOPOLOGIES, 'devices': devices, 'links': LINKS - })) + }, sort_keys=True, indent=4)) return 0 if __name__ == '__main__': diff --git a/src/tests/ecoc22/tests/Fixtures.py b/src/tests/ecoc22/tests/Fixtures.py index 5c5fd26e0..70b41bdcb 100644 --- a/src/tests/ecoc22/tests/Fixtures.py +++ b/src/tests/ecoc22/tests/Fixtures.py @@ -3,7 +3,9 @@ from common.Settings import get_setting from compute.tests.mock_osm.MockOSM import MockOSM from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient +#from .Objects_BigNet import WIM_MAPPING, WIM_PASSWORD, WIM_USERNAME from .Objects_DC_CSGW_TN import WIM_MAPPING, WIM_PASSWORD, WIM_USERNAME +#from .Objects_DC_CSGW_TN_OLS import WIM_MAPPING, WIM_PASSWORD, WIM_USERNAME @pytest.fixture(scope='session') def context_client(): diff --git a/src/tests/ecoc22/tests/Objects_OldBigNet.py b/src/tests/ecoc22/tests/Objects_BigNet.py similarity index 99% rename from src/tests/ecoc22/tests/Objects_OldBigNet.py rename to src/tests/ecoc22/tests/Objects_BigNet.py index 33a6ad052..592376ff9 100644 --- a/src/tests/ecoc22/tests/Objects_OldBigNet.py +++ b/src/tests/ecoc22/tests/Objects_BigNet.py @@ -20,7 +20,6 @@ from common.tools.object_factory.Device import ( from common.tools.object_factory.Topology import json_topology, json_topology_id from .Tools import compose_bearer, compose_service_endpoint_id, json_endpoint_ids, link - # ----- Context -------------------------------------------------------------------------------------------------------- CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID) CONTEXT = json_context(DEFAULT_CONTEXT_UUID) diff --git a/src/tests/ecoc22/tests/Objects_DC_CSGW_TN.py b/src/tests/ecoc22/tests/Objects_DC_CSGW_TN.py index 642b12b58..e5504bbf9 100644 --- a/src/tests/ecoc22/tests/Objects_DC_CSGW_TN.py +++ b/src/tests/ecoc22/tests/Objects_DC_CSGW_TN.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID from common.tools.object_factory.Context import json_context, json_context_id from common.tools.object_factory.Device import ( @@ -22,21 +23,26 @@ from common.tools.object_factory.Link import get_link_uuid, json_link, json_link from common.tools.object_factory.Service import get_service_uuid, json_service_l3nm_planned from common.tools.object_factory.Topology import json_topology, json_topology_id -def compose_router(device_uuid, endpoint_uuids, topology_id=None, with_connect_rules=True): +# if true, Device component is present and will infeer the endpoints from connect-rules +# if false, Device component is not present and device objects must contain preconfigured endpoints +ADD_CONNECT_RULES_TO_DEVICES = os.environ.get('ADD_CONNECT_RULES_TO_DEVICES', 'False') +ADD_CONNECT_RULES_TO_DEVICES = ADD_CONNECT_RULES_TO_DEVICES.upper() in {'T', 'TRUE', '1', 'Y', 'YES'} + +def compose_router(device_uuid, endpoint_uuids, topology_id=None): device_id = json_device_id(device_uuid) r_endpoints = [(endpoint_uuid, 'copper', []) for endpoint_uuid in endpoint_uuids] - config_rules = json_device_emulated_connect_rules(r_endpoints) if with_connect_rules else [] + config_rules = json_device_emulated_connect_rules(r_endpoints) if ADD_CONNECT_RULES_TO_DEVICES else [] endpoints = json_endpoints(device_id, r_endpoints, topology_id=topology_id) - j_endpoints = [] if with_connect_rules else endpoints + j_endpoints = [] if ADD_CONNECT_RULES_TO_DEVICES else endpoints device = json_device_emulated_packet_router_disabled(device_uuid, config_rules=config_rules, endpoints=j_endpoints) return device_id, endpoints, device -def compose_datacenter(device_uuid, endpoint_uuids, topology_id=None, with_connect_rules=True): +def compose_datacenter(device_uuid, endpoint_uuids, topology_id=None): device_id = json_device_id(device_uuid) r_endpoints = [(endpoint_uuid, 'copper', []) for endpoint_uuid in endpoint_uuids] - config_rules = json_device_emulated_connect_rules(r_endpoints) if with_connect_rules else [] + config_rules = json_device_emulated_connect_rules(r_endpoints) if ADD_CONNECT_RULES_TO_DEVICES else [] endpoints = json_endpoints(device_id, r_endpoints, topology_id=topology_id) - j_endpoints = [] if with_connect_rules else endpoints + j_endpoints = [] if ADD_CONNECT_RULES_TO_DEVICES else endpoints device = json_device_emulated_datacenter_disabled(device_uuid, config_rules=config_rules, endpoints=j_endpoints) return device_id, endpoints, device diff --git a/src/tests/ecoc22/tests/Objects_DC_CSGW_TN_OLS.py b/src/tests/ecoc22/tests/Objects_DC_CSGW_TN_OLS.py index 5b1d01c30..9d67b1a41 100644 --- a/src/tests/ecoc22/tests/Objects_DC_CSGW_TN_OLS.py +++ b/src/tests/ecoc22/tests/Objects_DC_CSGW_TN_OLS.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import uuid +import os, uuid from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID from common.tools.object_factory.Context import json_context, json_context_id from common.tools.object_factory.Device import ( @@ -23,30 +23,35 @@ from common.tools.object_factory.Link import get_link_uuid, json_link, json_link from common.tools.object_factory.Service import get_service_uuid, json_service_l3nm_planned from common.tools.object_factory.Topology import json_topology, json_topology_id -def compose_router(device_uuid, endpoint_uuids, topology_id=None, with_connect_rules=True): +# if true, Device component is present and will infeer the endpoints from connect-rules +# if false, Device component is not present and device objects must contain preconfigured endpoints +ADD_CONNECT_RULES_TO_DEVICES = os.environ.get('ADD_CONNECT_RULES_TO_DEVICES', 'False') +ADD_CONNECT_RULES_TO_DEVICES = ADD_CONNECT_RULES_TO_DEVICES.upper() in {'T', 'TRUE', '1', 'Y', 'YES'} + +def compose_router(device_uuid, endpoint_uuids, topology_id=None): device_id = json_device_id(device_uuid) r_endpoints = [(endpoint_uuid, 'copper', []) for endpoint_uuid in endpoint_uuids] - config_rules = json_device_emulated_connect_rules(r_endpoints) if with_connect_rules else [] + config_rules = json_device_emulated_connect_rules(r_endpoints) if ADD_CONNECT_RULES_TO_DEVICES else [] endpoints = json_endpoints(device_id, r_endpoints, topology_id=topology_id) - j_endpoints = [] if with_connect_rules else endpoints + j_endpoints = [] if ADD_CONNECT_RULES_TO_DEVICES else endpoints device = json_device_emulated_packet_router_disabled(device_uuid, config_rules=config_rules, endpoints=j_endpoints) return device_id, endpoints, device -def compose_ols(device_uuid, endpoint_uuids, topology_id=None, with_connect_rules=True): +def compose_ols(device_uuid, endpoint_uuids, topology_id=None): device_id = json_device_id(device_uuid) r_endpoints = [(endpoint_uuid, 'optical', []) for endpoint_uuid in endpoint_uuids] - config_rules = json_device_emulated_connect_rules(r_endpoints) if with_connect_rules else [] + config_rules = json_device_emulated_connect_rules(r_endpoints) if ADD_CONNECT_RULES_TO_DEVICES else [] endpoints = json_endpoints(device_id, r_endpoints, topology_id=topology_id) - j_endpoints = [] if with_connect_rules else endpoints + j_endpoints = [] if ADD_CONNECT_RULES_TO_DEVICES else endpoints device = json_device_emulated_tapi_disabled(device_uuid, config_rules=config_rules, endpoints=j_endpoints) return device_id, endpoints, device -def compose_datacenter(device_uuid, endpoint_uuids, topology_id=None, with_connect_rules=True): +def compose_datacenter(device_uuid, endpoint_uuids, topology_id=None): device_id = json_device_id(device_uuid) r_endpoints = [(endpoint_uuid, 'copper', []) for endpoint_uuid in endpoint_uuids] - config_rules = json_device_emulated_connect_rules(r_endpoints) if with_connect_rules else [] + config_rules = json_device_emulated_connect_rules(r_endpoints) if ADD_CONNECT_RULES_TO_DEVICES else [] endpoints = json_endpoints(device_id, r_endpoints, topology_id=topology_id) - j_endpoints = [] if with_connect_rules else endpoints + j_endpoints = [] if ADD_CONNECT_RULES_TO_DEVICES else endpoints device = json_device_emulated_datacenter_disabled(device_uuid, config_rules=config_rules, endpoints=j_endpoints) return device_id, endpoints, device diff --git a/src/tests/ecoc22/tests/test_functional_bootstrap.py b/src/tests/ecoc22/tests/test_functional_bootstrap.py index 2e5e0daf8..defc9a809 100644 --- a/src/tests/ecoc22/tests/test_functional_bootstrap.py +++ b/src/tests/ecoc22/tests/test_functional_bootstrap.py @@ -17,8 +17,9 @@ from common.proto.context_pb2 import Context, ContextId, Device, DeviceId, Empty from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient from .Fixtures import context_client, device_client -#from .Objects_OldBigNet import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES -from .Objects_DC_CSGW_TN import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, OBJECTS_PER_TOPOLOGY, TOPOLOGIES +#from .Objects_BigNet import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES +from .Objects_DC_CSGW_TN import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES +#from .Objects_DC_CSGW_TN_OLS import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES LOGGER = logging.getLogger(__name__) diff --git a/src/tests/ecoc22/tests/test_functional_cleanup.py b/src/tests/ecoc22/tests/test_functional_cleanup.py index 8c7ef7885..81181db1c 100644 --- a/src/tests/ecoc22/tests/test_functional_cleanup.py +++ b/src/tests/ecoc22/tests/test_functional_cleanup.py @@ -18,8 +18,9 @@ from common.tools.object_factory.Context import json_context_id from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient from .Fixtures import context_client, device_client -#from .Objects_OldBigNet import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES +#from .Objects_BigNet import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES from .Objects_DC_CSGW_TN import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES +#from .Objects_DC_CSGW_TN_OLS import CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES LOGGER = logging.getLogger(__name__) diff --git a/src/tests/ecoc22/tests/test_functional_create_service.py b/src/tests/ecoc22/tests/test_functional_create_service.py index ed914bb69..714bb98ab 100644 --- a/src/tests/ecoc22/tests/test_functional_create_service.py +++ b/src/tests/ecoc22/tests/test_functional_create_service.py @@ -18,8 +18,12 @@ from common.tools.grpc.Tools import grpc_message_to_json_string from compute.tests.mock_osm.MockOSM import MockOSM from context.client.ContextClient import ContextClient from .Fixtures import context_client, osm_wim +#from .Objects_BigNet import ( +# CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE) from .Objects_DC_CSGW_TN import ( CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE) +#from .Objects_DC_CSGW_TN_OLS import ( +# CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE) LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) diff --git a/src/tests/ecoc22/tests/test_functional_delete_service.py b/src/tests/ecoc22/tests/test_functional_delete_service.py index c24d70185..c4899d8ba 100644 --- a/src/tests/ecoc22/tests/test_functional_delete_service.py +++ b/src/tests/ecoc22/tests/test_functional_delete_service.py @@ -24,9 +24,12 @@ from common.tools.grpc.Tools import grpc_message_to_json_string from compute.tests.mock_osm.MockOSM import MockOSM from context.client.ContextClient import ContextClient from context.client.EventsCollector import EventsCollector -from .Objects_OldBigNet import ( - CONTEXT_ID, CONTEXTS, DEVICE_O1_UUID, DEVICE_R1_UUID, DEVICE_R3_UUID, DEVICES, LINKS, TOPOLOGIES, WIM_MAPPING, - WIM_PASSWORD, WIM_USERNAME) +#from .Objects_BigNet import ( +# CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE) +from .Objects_DC_CSGW_TN import ( + CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE) +#from .Objects_DC_CSGW_TN_OLS import ( +# CONTEXT_ID, CONTEXTS, DEVICES, LINKS, TOPOLOGIES, WIM_SERVICE_CONNECTION_POINTS, WIM_SERVICE_TYPE) LOGGER = logging.getLogger(__name__) -- GitLab From 476fd7201081e0152b9ddb2be295c01d19133b14 Mon Sep 17 00:00:00 2001 From: PabloArmingolRobles Date: Mon, 12 Sep 2022 08:23:36 +0200 Subject: [PATCH 28/91] L2-VPN --- .../openconfig/templates/Interfaces.py | 6 ++++++ .../interface/subinterface/edit_config.xml | 7 ++++--- .../network_instance/edit_config.xml | 21 +++++++++++++++++++ 3 files changed, 31 insertions(+), 3 deletions(-) diff --git a/src/device/service/drivers/openconfig/templates/Interfaces.py b/src/device/service/drivers/openconfig/templates/Interfaces.py index da1bbd483..3f5b104f2 100644 --- a/src/device/service/drivers/openconfig/templates/Interfaces.py +++ b/src/device/service/drivers/openconfig/templates/Interfaces.py @@ -37,6 +37,10 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: #interface_type = xml_interface.find('oci:config/oci:type', namespaces=NAMESPACES) #add_value_from_tag(interface, 'type', interface_type) + interface_type = xml_interface.find('oci:config/oci:type', namespaces=NAMESPACES) + interface_type.text = interface_type.text.replace('ianaift:','') + add_value_from_tag(interface, 'type', interface_type) + interface_mtu = xml_interface.find('oci:config/oci:mtu', namespaces=NAMESPACES) add_value_from_tag(interface, 'mtu', interface_mtu, cast=int) @@ -50,6 +54,8 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: add_value_from_tag(subinterface, 'name', interface_name) add_value_from_tag(subinterface, 'mtu', interface_mtu) + add_value_from_tag(subinterface, 'type', interface_type) + subinterface_index = xml_subinterface.find('oci:index', namespaces=NAMESPACES) if subinterface_index is None or subinterface_index.text is None: continue diff --git a/src/device/service/drivers/openconfig/templates/interface/subinterface/edit_config.xml b/src/device/service/drivers/openconfig/templates/interface/subinterface/edit_config.xml index f172c1676..63a4f4f6c 100644 --- a/src/device/service/drivers/openconfig/templates/interface/subinterface/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/interface/subinterface/edit_config.xml @@ -4,8 +4,8 @@ {{name}} {{name}} - ianaift:l3ipvlan - {{mtu}} + ianaift:{{type}} + {% if mtu is defined %}{{mtu}}{% endif%} true @@ -24,6 +24,7 @@ + {% if address_ip is defined %} @@ -35,7 +36,7 @@ - + {% endif%} diff --git a/src/device/service/drivers/openconfig/templates/network_instance/edit_config.xml b/src/device/service/drivers/openconfig/templates/network_instance/edit_config.xml index 74424cea9..1944778c6 100644 --- a/src/device/service/drivers/openconfig/templates/network_instance/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/network_instance/edit_config.xml @@ -5,6 +5,7 @@ {{name}} oc-ni-types:{{type}} + {% if type=='L3VRF' %} {% if description is defined %}{{description}}{% endif %} {% if router_id is defined %}{{router_id}}{% endif %} {{route_distinguisher}} @@ -16,6 +17,26 @@ oc-ni-types:INSTANCE_LABEL + {% endif %} + {% if type=='L2VSI' %} + {% if description is defined %}{{description}}{% endif %} + false + 1500 + + + + oc-ni-types:MPLS + + + + + true + 1000 + 300 + + + {% endif %} + {% endif %} -- GitLab From 40f887def9193ffd0f346133947d76d1ecfb3e3e Mon Sep 17 00:00:00 2001 From: PabloArmingolRobles Date: Mon, 12 Sep 2022 10:02:18 +0200 Subject: [PATCH 29/91] Virtual Circuit --- .../openconfig/templates/NetworkInstances.py | 20 ++++++++++++++ .../connection_point/edit_config.xml | 27 +++++++++++++++++++ 2 files changed, 47 insertions(+) create mode 100644 src/device/service/drivers/openconfig/templates/network_instance/connection_point/edit_config.xml diff --git a/src/device/service/drivers/openconfig/templates/NetworkInstances.py b/src/device/service/drivers/openconfig/templates/NetworkInstances.py index 0cd6bbfd3..8399402fa 100644 --- a/src/device/service/drivers/openconfig/templates/NetworkInstances.py +++ b/src/device/service/drivers/openconfig/templates/NetworkInstances.py @@ -27,6 +27,9 @@ XPATH_NI_IIP_AP = ".//ocni:inter-instance-policies/ocni:apply-policy" XPATH_NI_IIP_AP_IMPORT = ".//ocni:config/ocni:import-policy" XPATH_NI_IIP_AP_EXPORT = ".//ocni:config/ocni:export-policy" +XPATH_NI_CPOINTS = ".//ocni:connection-points/ocni:connection-point" +XPATH_NI_CPOINTS_ENDPOINT = ".//ocni:endpoints/ocni:endpoint/ocni:remote/ocni:config" + def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: response = [] for xml_network_instance in xml_data.xpath(XPATH_NETWORK_INSTANCES, namespaces=NAMESPACES): @@ -54,6 +57,20 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: if len(network_instance) == 0: continue response.append(('/network_instance[{:s}]'.format(network_instance['name']), network_instance)) + for xml_cpoints in xml_network_instance.xpath(XPATH_NI_PROTOCOLS, namespaces=NAMESPACES): + cpoint = {} + add_value_from_tag(cpoint, 'name', ni_name) + + connection_point = xml_cpoints.find('ocni:connection-point-id', namespaces=NAMESPACES) + add_value_from_tag(cpoint, 'connection_point', connection_point) + + for xml_endpoint in xml_cpoints.xpath(XPATH_NI_CPOINTS_ENDPOINT, namespaces=NAMESPACES): + remote_system = xml_endpoint.find('ocni:remote-system', namespaces=NAMESPACES) + add_value_from_tag(cpoint, 'remote_system', remote_system) + + VC_ID = xml_endpoint.find('ocni:virtual-circuit-identifier', namespaces=NAMESPACES) + add_value_from_tag(cpoint, 'VC_ID', VC_ID) + for xml_protocol in xml_network_instance.xpath(XPATH_NI_PROTOCOLS, namespaces=NAMESPACES): #LOGGER.info('xml_protocol = {:s}'.format(str(ET.tostring(xml_protocol)))) @@ -128,4 +145,7 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: iip_ap['name'], iip_ap['export_policy']) response.append((resource_key, iip_ap)) + + + return response diff --git a/src/device/service/drivers/openconfig/templates/network_instance/connection_point/edit_config.xml b/src/device/service/drivers/openconfig/templates/network_instance/connection_point/edit_config.xml new file mode 100644 index 000000000..37887e9a9 --- /dev/null +++ b/src/device/service/drivers/openconfig/templates/network_instance/connection_point/edit_config.xml @@ -0,0 +1,27 @@ + + + ELAN-VC:789 + + {{connection_point}} + + {{connection_point}} + + + + {{connection_point}} + + {{connection_point}} + 1 + oc-ni-types:REMOTE + + + + {{VC_ID}} + {{remote_system}} + + + + + + + \ No newline at end of file -- GitLab From b46dfba78a12a0ac56bd9ca201752745163f49ee Mon Sep 17 00:00:00 2001 From: PabloArmingolRobles Date: Tue, 13 Sep 2022 09:47:00 +0200 Subject: [PATCH 30/91] Virrtual Circuit changes --- .../connection_point/edit_config.xml | 40 ++++++++++--------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/src/device/service/drivers/openconfig/templates/network_instance/connection_point/edit_config.xml b/src/device/service/drivers/openconfig/templates/network_instance/connection_point/edit_config.xml index 37887e9a9..d34f462db 100644 --- a/src/device/service/drivers/openconfig/templates/network_instance/connection_point/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/network_instance/connection_point/edit_config.xml @@ -1,27 +1,29 @@ - ELAN-VC:789 - - {{connection_point}} - + {{name}} + + {{connection_point}} - - - - {{connection_point}} - + + {{connection_point}} + + + {{connection_point}} - 1 - oc-ni-types:REMOTE - - - {{VC_ID}} - {{remote_system}} + {{connection_point}} + 1 + oc-ni-types:REMOTE - - - - + + + {{VC_ID}} + {{remote_system}} + + + + + + \ No newline at end of file -- GitLab From 209c6248ac5a96f8792fb72816b1b1fc0082a7d4 Mon Sep 17 00:00:00 2001 From: Lucie Long Date: Tue, 13 Sep 2022 10:48:48 +0000 Subject: [PATCH 31/91] WebUI improvement: Main page: - Change page title to "ETSI TeraFlowSDN Controller" - Add slogan "Open Source for Smart Networks and Services" next to the logo (on top) - Change Logo and Icon About page: - Change title to "ETSI TeraFlowSDN Controller" - Remove link "TeraFlow H2020 webpage" and add link "ETSI Open Source Group for TeraFlowSDN" pointing to: https://tfs.etsi.org/ - Remove partners logo Service detail page : - Show correct Service Type instead of integer - Show service status - Format configurations as in device details - Format constraints as in device details - Add constraints - Format endpoints and add device uuid Service page: -Remove constraints Link detail page: -Add device uuid to the endpoints table --- src/webui/service/service/routes.py | 3 +- ...Flow SDN Logo ScreenColour with Slogan.png | Bin 0 -> 34434 bytes src/webui/service/templates/base.html | 297 +++++++++--------- src/webui/service/templates/link/detail.html | 10 +- src/webui/service/templates/main/about.html | 6 +- .../service/templates/service/detail.html | 258 +++++++++------ src/webui/service/templates/service/home.html | 10 +- 7 files changed, 317 insertions(+), 267 deletions(-) create mode 100644 src/webui/service/static/TeraFlow SDN Logo ScreenColour with Slogan.png diff --git a/src/webui/service/service/routes.py b/src/webui/service/service/routes.py index d62e28ca1..bc05daee3 100644 --- a/src/webui/service/service/routes.py +++ b/src/webui/service/service/routes.py @@ -79,7 +79,8 @@ def detail(service_uuid: str): flash('The system encountered an error and cannot show the details of this service.', 'warning') current_app.logger.exception(e) return redirect(url_for('service.home')) - return render_template('service/detail.html', service=response, connections=connections) + return render_template('service/detail.html', service=response, connections=connections,ste=ServiceTypeEnum, + sse=ServiceStatusEnum) @service.get('/delete') diff --git a/src/webui/service/static/TeraFlow SDN Logo ScreenColour with Slogan.png b/src/webui/service/static/TeraFlow SDN Logo ScreenColour with Slogan.png new file mode 100644 index 0000000000000000000000000000000000000000..218cc713c0a2704f96371fdd2916ef16b44cf667 GIT binary patch literal 34434 zcmdqJbySs6_b!UkB`w{lNF$w65`usd(ryrth7D|5I;0yZX%VDEIz<}k?(Rmq&f4Pl zo!>d*{&C0s^NxYYyLtE8?^Y63E~eq80eL6AcA? zFG-jz2LIi2P?LLxP}EDY0WOeCC0|G)Ae2U6UKt^SYjoS^+71W^WJ>UV_b8qSrhp+n znZMG4YQ0brg4kHG8yVZYHDQNY*@C+f5Jbdbwnh*O6DZAF6H{|*QRcncMrImwV^L)_ZwtjP?_Z}~rmk$nqBdCL8i zoA0S07cUpT;8Ow4|GwexFaOW`U)eZ9%-~ZIb%<5YN9I0#`zkj!-ijdod+Ph`9-kl8v>g z2>ho1ybNsP?-TxidJ15?|A)2w_bFgTw*SLUz->Z*_t(b$m5q(1n3R#VlaUC=|NZj+ zGC_bFl@O<( z5El;(7pD*pmk>A47@5i}0)mgUytL#inEp;Gsv%+PX*)H;tBrd3s7#i}AwEQf<0Xd3 zZnuw|ZXa&fYyNsLUgAAYM1+f6`{6#_i;zFF7HCGdM=sUD$xH0jiuB&60}J+Uwcmxm zf3MxEwOM_U-$WZ=L~{2fTbd1B9^%Amx%*-XNHpX5??v1H-!5V?D54q(eik>l9KDlQ zR2))K*%-{p(XF;u%T*$M^!jTo`%${=4SM?5l+8xViLxk0joW`Kr8+3DP+qzjfK9$! zIp-PUjfmRiJm6SzF!ix)2^M8nK=!XIsEmtqKq$k;MI}}furc# zUufrNOA#jP{TbX2t4expd&9Ths8lR@sZO7uyVKh5M+?o5ZT@WuV~U^5R6Lu`@R`@i zglUduu|BtHZ_1O5{x7QZ6IFIuS4Vx~(P?Syajcpe11I|}I9oP`KWV*Q-oi%pJmiH*mZ!Y&%LRgDlLJqQH zl-0PL_L~Dc;)f5*_-4?3V#@~Wvt53`(uWpTU?rxaFDPzWK7cky%0 zQhe-VHBn}Q$93|R!oH)qtQzW~U0EF)tIWpyJDD#&Mnz0s)C$rT8fCZCCO`7^^^A*< z04p`*^4`I2YjUd2-!l=qli(*kmHQ%j*f#(d(ld<7QTKUfH{j}rwEp>tfy-8)Aotrr zHSHf_dQMg^wNIuhzqtl@-kh5z@mYNecd!^Q$#5FtIYh}bKigk;{K3HM+A&|J(HkqZ zL^|#ET|nLGk?yx!qM{pTi&uI=l=h!reR(Mip>GexI`NPIfo;zh{f^do}#&sl}@gMx2XQSr*It!HO#lILzJaNvtMPZvuMsDkSxt z?(Zfw4>8(KIBm`9In7p=KG80d_PjpUB*L8KF?@7@v^7>d*2EzlP`6c7o1+0%bGx|J z{JY$&5Ds3{!{;{Sbh63FwF!N@oQcS>N^{2JCS<^nM4yBtaEYLaXMWaw^J)p!>&G` zX*5}^8;Sm;Hc9$1X=~xvM3wnR5obyAjsCh%x*o?PLb)Mk(LthEDn}KJsK+w=LvFj( zazfB`8ebF-mK;`%m?>1H*v_L-A@3{dvwo+?>5uzTYHNAFsmd1BpS*Ed3gd@b(q=~F zBVbGpt@?MA7_}@%h~M;^xDeq$GjhTfNDFuT9$Dg~Np1#lrNPA7BdCkJ@xRe`sgMgS z2GJYeUZ0NbS=O2j(3wwFe0!o-^UC#fmrYaO#bos2KNFLAmT7geJuz5rt}eJ7DYbmW z(GXi*fLLAEr)8=XA05gz!LOn?M>j@Y`7y=8mfmHzdX3i>UGEr8<1GV@tUwxH?i+#g zSoP%}p4Y~`U)k(0n>x#&$GMi7*n>P39loa9OmaW++4@d48J(v+YoVLN@?vN6i21Mb z)x1BxLn5Csz1_RLQ9;aEM!L+cZndz_&(9w$Gf^bvH2hF*J|vr9g+u2jL|3yG#y|T- zPsib_uh|!#UnletB6MTA4lgu3XeNQHp*}WF{^$M28-TmRlf4=06N_}uM~do?Rt~`i27&dH@OI^n&~30{~iSCsXCV< z<1szw*S?Wo{EnThnQ+{BWg61UUv&+Na+jr5HJ;sc<+v%vkeMtme&kRs{UCWh4vE+x zLuMnZaQ;%9?s*{}E_Puy;c+^XYIdXhczf=q56+PkqHmye4YGQ zSA$u%J<*NPZ^?i5<_!7_j7W|f&|BXtJjuf96Thu<8q3Clw1@x3KP+V_T$;|SO5xUQV8+i=Ur-!@-I zm;csbn8|s;9>)saMrB^t= zjT5XizKcNHRSXQ1>l$)jTtas|uP=nSI49A+V+?LxiLTs++py>D8W{A78k@RoEY?q( zN=;N5)&JQXQAA7}F3?nw@#P;5`*m>aZzi@^cf^~BJo`!e`%Rov?W-7K_DS_3=VPys z0x#)lMdBT{JZCc+6Ls}v;O7%MW0(uZw`S|zGDaJTAC^WlU+hh*J*i9R%)5%V)L%G` znP6vr8z-v&7!4x}>tD!Ph>kR)m-xk)pi5V~UG|Xx>-%!eR#6Ndy{v+^d<>7!aEwk3 zZ$d$Y^^+IxW8*f4OuKfRr!!{g&?W4*zE!>pQ{g`BtzJtjIGXvUTdmeBc4AVzX**BN1_3O=C-ahx}quw*_$e$+r$D7@xnDLhvkmQtk=TZ8; z6dIM2oxb2ZAcmLj*H&JNVZJ}#EX2Ms=XDz=8%EYy+D=tqv`)@%U3esWeSIoTDg+G* z_>yNq?X#1cY&~clg}lKwKCs{R)aaNrPj*a>iGOdp`n$4|^8SO>!ON}U#^r+IhQX4< z;q#r!IrcFFuX>c*4_2zT4K7@?!UH&@Lv40#n|!306?s=Z$ET-rUKYf9U6SWt5WA>t zEJ>!y*q=H7^h2+)+^av;=;La3Y`EMNz7DGqUGO6)YqJX2>ZHC6CFNNLZ%k%!_LQV&TG?h#FUkb8q3q}(GDKxDH;(b}JO=tJ%uAF>)3 zBh{KC9lRjDsb%FzQ*SJ-=`lvZ|6;UDq+Nf_u9X3mCVa=??b@;qi zldr~18gEF(XVDXz&~pt3GG$zgAkBxb3p9`u^j-7ysoNB=e#OMhP4}5c zpEj#9L|q2xJv>;wHTIn=U-Hx z8t-eQ$@`?T^vi$ODL2bl8g-g=F%<;fe8ubbGQHt<Y_g*f+B>;~vkXKbu=gnktmfwCE{{5GV{`6aZsv^jMSd)a-cvAZuJ8ep1Y7f^S5bGez6Q7A|07#kjCEXLn;yc8k+9@?Xt zCA)E|b=>9Br1Pt~3Ddost+E5eJl;QIkQ*{yv(Ne*1}dJng>&CcRgT$t#A0@HVg4!W zYB)T{4R2d9>@tG`Nq=esp z5iLD`ifSBTuRyme;vUwhA=nsio_?Q!EV->IuR2LjN;U2hvDrZ2wvg6R9F+STJltL| zr`SH@H0!6iW}6VHL~htt{(`J5lF#2k+Tl$=zB$m0xH=~Pa5Hu14Ek1M3)u9V+wbpw ztMx_p33Hqi^WF&HAD5T1Hww3i3+$8+4ZCUOD8$VJ*oK}mueSF)no)5W5}?^0%zw=H z!+J)@_SqAi*@i3S=Sas}V>l`67%7ITdxPM1Qunikz-C~(_yP;WJ?Y}&TUs7JbvEkl z)>57IIic(QP3Y8Cr^8Qo{s5TX4>6_`vG<(fl-V&q@`{;-Eny$OTZnVa6gvZ{kMG7% zUV&y<85SwGv}n@o60Dul+4v~(NIvsM&UL$tFb9eSA}1`XDp3#wb-eRcM2+A@z*=rBk?n0>iKhI9@(c z^m6Yg$+x1`AL>=xuYA_{_U3VT%jMA)ji>os5Z9)!NItdaQ3~z#y>k}oU_QC(M?7nLkx~z$u1}^^D#?YQUn&UV z+kLFi$?T7Q^DD+ZeqFX1Ze6#$k->HYa)3#Xh@u!}%Z-adDax3LTPU53ve>yX_MoKk z>l&J8%4>0R06s23b?l+@8 zwc(I$F)UH)%8L&Nyqly5@wK_07>e8opVMxdsh~x}&LD3Z+^gL$cPbB$#|+jIgOD!TslAMU z^hq1(wRzJOeK;_a_AquSIHLF!^ILvcZOTNDw)5m5{x>Ud;;lNt0ulvvl+JH_kf+>W48{9-+;<^@#e zH~MJnlAPX44Us4%he!h0bmC7$v>KnYKPCsq^7HtkOK168oUma58^^8D5d^_)0z284 z6!VD!dCUGZ?OB22jGsgAuu&9`4BaH>xO*422y1Dmb?CcLg-|IGA`*{IhD=rb>s=5A zdbw$`4%&m-y^kR)F*=7+^&nSp?HMp>#?2Kbi=uMi*}p!BbRuM7&Ahj-kEb2U?u>S_ zUgXPwMA;_3=vU+itF`~CDJ^GRBAUi1;(^BP^#I~%zFcLGA6NQW?`4rR{-w&pP233j zom7tnlPoO35zMK1ZKMiDm+|l|WIHA2mo?1v7#_9D&Y3VZ9B8w#S6+&J$SAQYgSX@L z1q??pPx_fjgIweE9my8-A}XjtdLM;xDG-)LePX#!KKp0=FFHRvmf~K? zq;hfj@Ij`~?sxwKhi>03jEo*Zw1^yt@S$`hS1F$h7t=7uS}n0DtQYrFUF`guCe}5X zq+8=2mZ`FEgLnU&a^6d6a#@N_awt@WK>&3Ir3EF$0JTn1KA}p}z~i;+pY@EN0np9- zye=J^`kW(3X_^5)2cdsd$AJ-xdbk{wi*#+s!0)}1mIpBa>&O>XErukvFKqpia(;0? z9=D&iPDIyciahqWTM@2Q1Nj2L?Bx7hVgq9;k|UUFk#)}jg)`uSn#2=c0Rw65mS>;v zp2!OjIo!hUz08yxF1MM}m%;QFSY%ukwVbJOOq&#G$G7GQo;T3=<#X*|b$Ez`hUQlS zp$<)_N5JWCb1}DTx`(=6=Nf9!@*QPavQn`|jt*DYaWfWLedkP^q%mHVy9`J!zPOUY z5zW5$0QU*3?<|>JOFHNZK(gU=O!5g5Tas{S3jhlV4LQu%6ap_1pUppFMVF;&4+=yq zMk08~Ga(?$9`iQcaa3F9h0t#6z2i2mjC+VE>kNZXN{|MoGTk&RM=K6*Nmq)DVvo(7 zpdI_!^}z~L@MpW*!pJQ=mUenLZgl&_NQVN(%2m?E9?#+qQV=Zqdqk78$8D3y^omcN zeMvk}8(-2T{8if^pzqVre~f3=2Rr=2%(TWdkJjFO{F~d4e2j~#vDp{Z^oQNtkLaSj zx=5p$XxkTe#Gs`jhElo9J?{jh0ZE!bK=|p1%8(2mZ;q%#6850yZ+jGPZ?1n}rtq&L zAoh~mk?b(s)-#Yf$6`Vr()01Rc>Y-*AX~ioiOCVW&=P3jf`>%X9lGSU^J9$nV&VF9 zu2|N-YPN!zg`e(T=^u~>b_3-61%l_GAkCgVv(S9#vW$vEKJeyFJl*s$0Qs*SCCDr93~GtWs| z+G>LM>OLsZHaV)|7n|R42AR1MGUQ9HoR%93d`3T!CLG7~Rwhi-<8Z`Cma(=}mVG6C zvo(v?%DahY7;UOo9&XIkIx_^``cVhyjwgP+(N~*K(^*YGcdsUK=s*ba+*&VrrovAvUhRs!JPLemt(a8 zomvwMuYNWhgb&6aym#80)idDi|3(zSK}9Db^Ys02|1UAY(H+W7d-6rSb_w{Pt#n^PpGBJWEv!J(^<5WN%JJ$-C9*G z;R4ULtorItqtsIkTYx#LVwr4yLXX+dUp<5gaGl_m z2Aochp$4;OhgwMWbFGFQ1%GI9=)p`~eB*o<&053mJK*51rtP|s)Iek2dgd#{6?E(k zX?8dms3f3@r_rf+3BK_BRHoY%SNwTt(jDg2)*#b&S)TQwpM={u2*twCeH75d9Du|Q z>d~rGo~01fp}K&EjLtA;t#*IKCMA0QDgvm+F0D&A1OwlciW3k7x` zSHkbbEN7`=8h7|H9!@0sAMWR=ugEb^gIYmG)V%}4P-<~~Ks2vCDIN%KlcV;?UR?fK zP!yjPA@=4btyb2xfnQahnoF*Mv{5>sx@PP6H`d0{%oFYO3hml0ycEA8H$VTpn@Z+= z>w9T(KDsoP4g|U7sXn;H%aZjM31~soZS43+G}Lvt5*Lg5Y z+za4dD#v)dkl*~dx7cpI`}t|9)@7$fW-UpKKTmI9srXEdb4=KV&T`Da(aK!TR_p78 zQJoM)9bG&z%KDdWefl{lHfxaZ0U9=#Fz2UA;$}si3~XAAAc>GfQ|&s34*TBHTj5#P z&UqPB8t%8~&l)aQ<~&YQF?ts5AMAhSS`b*eCn)xdWQOWoxQ2p?X^{%R-S}iHJdJ^s zgJU1&RI}VrMUR74Vn@;;f-^^v?T-$f+`srGb#SB51Xpb_i!J^WA|<5B4GoX@CCR>O ze4{(ku#6YPta$ zzFK6v96t}!yet;?NnQ~FWNV1qc+Uamld%JVo8$t;^FtGv6X-#Xbv!X`LwZzDf1>7yYgr5@1%iy@+9^o z(JWybQ}6b1t4T;^|xb9D8pZU=exm1UjC0^TAei5pT7 zqk6w2c}a})t~r^#a9aC3?R_k}E=g=C3ctH5p4(EUQ8MYfmb@Qubv`Cw1Vw>=w zaN_%j;X57icr&*T7patnvSzsfplbU;dFIYw=Cm&y0=NdnjW;z;jbWOBH;mnqsFc}OI12%2 zkZl9rLOHQiVdv4KpHIJ!>ARU%{=Q0~|K|z9?&Dc8tadK42_5N#kzeNV=D${D-yYZM z%8if5V&>)?=jOgo#HdLz@M=@zQGs6HjZ-j-bC6i5@%

gw>}}#3*l#B(*&ZL$oec zm{j%Yws#tDZyt{~++46u&Rx$&X>$sC`>WEHc29IrBr z;*0c{{~WbT*{$^F+Q)i4qz>|)u&hL{NcO9g*N?j*vnL5?ytybKn}ALT zffDdj1Dg(>ligJ3Syh{#{>1u;zZGRPWmebT3fBn@v_Er}^?5{7Msg}uorV028%Cd2 z1o6A3-Z^CEo@;R9O>lZqpx|GYug3mVhD`XL`?D#b!RybdIZko*hbL1u1#(Dx_yiIJ z0{25|@p{g3L0$G8a$X0vAHvM>1Mw@{Fcx`RtX{^oK#spt)J|I4bROG0Cb^399L7+1o(i-EpA3RbY zX%g#erNvRKFWHD~m4HCAEI0E^K>U`Uig&ZbYN~>aD7vgfOr7*)Ae_b51^DwqctjG7Ll04mEEN!HhK*Nc5GlwSr8 zzmQzo5}`_3`!av3({CF(tSWLQDsIwgCG6Q}G5Zg5;1WJh7n)71tsB0SPGA+mfp)&F z!_xmI-X1nqD!#<56nlz_6$uI7cvA0rJh&Sx>UJ_*WG(y{#=JRJTpY!Ip2BM$jbgoi zJne8eqyl5l>;6N^V=`W1WDn52`E?mK6@E(guyB6WQmE68hMWRQm%`iHb^F6q_j6I^ zV(hJR4`y{uF_ja%+1|DGy@s93tEC-ZlLeumAIjW*r}7Pg&0LEskot8;j|q2e{MNnn zqR-Wm4cpB6m8yST2^PI6hIIM808+iNpo?|^h8jg$-lCeXfhjZfwSb*}!R)_lw-Pt$ z7`1DR#^>|G5PB)%#9LPyYRf2r5z{$TB#00}r@Dv88l5txZ9hSV)dwmJd)+N!Zk4y7h{o z#YR(v){ARj_vqemz2BgQdKbL<66Fe=WUKj)#2Nf4kgkJeErr(VbX(U^67ha7l z=Li$c5Rs6c`BZKqy&(a$Ugglx4-jQmvG#CER%-Ws@0;DZy1n(EEo0>AK~HN$@%#OQ zKPJ(K%GY~c9mrvq_~4#IT{!)5{ipZ|rS!f}WJSdx@!YSRnQjpf!1?LWjibhMV`Kc2 z6fVbm8H1$69n)`Mqx)^z^`v)Z)D|w25mdp6M77`P#g9*4vwN2zW3`MQus{b^YZWg( zpc~mlNAV0enjZh-3rK{i{Mje}b0cSH9VGLPDF+*yMWX&hS_ig08qV!$A@}6_zqyih ze*Z14qxHpdwoV&pV{(t8ziE}yZSjj$0-erG5LwsaM2V4aymMzXqYx^&2^RC6jezgt zOqstix%?*KxL=OK&X2yg*t4$V$ZMhNdn=-c z9rQUsGc%Yahm$1YVsE^&op~ouY2gf|OrRsOn*CUAwyiHBV*})PcJ$=@%5E6sckpj(bL>v(sRBHEg{h5(Q9M-^U^9xl?;LBXMVLaYzVxVqRXX4Lp)+jF$~I|kI|G^pJ7CAAwp z>nZHc7VH5D-|>;7fu*?aFl5^ElG6Sco}9w__g~q!CcfuAF=!C6aQn|XI_ZW0B4FV7 zH~(D6_dYS!0*!Cc>dJyLa2fc6&eX1+O-GJq=T(;I&-C!K(oS98?Ig; zu&9&=fCd)!esvBn|8LwwNnm)MS8 zU>;q7Uf~!bE{-v6_{8*4!z#8cA4uPl2<6%I6g<-_p3R<{hfih3Qg~h22{mBvO($|e zc!SVgekpEjY)rD1ayDC_l%DLxi1$)^o=J}7=opWD9Wg%hW8|J6b15@FdiZFqOLsQT zWoT6brN%^DNuTM5#mF$t6O*o3RSrs@;*xH4LO*2OHM4`o*2iWC75MHepkO&_PyeR4 zc2|hWi-hE8n?}R`Q#9xW|HpF!D^6 zcc^x8Sl%iG4lBX2`Fuo=h*?X`RLalA}1TK zj@e~x5~50FCi#omRnjX1v2IUvh&Y~Fj1Dh}?>8Yj7V60V8lwylay%GP5zikZ*b-jX1vGjmg zfsD`c%M!(gtfDqXfyjIx;?aHX+&=)gj6pt6aUEm&*WaEWt##`;&oF{Qbrv`^yY}Gt ztUIFlSKItO<)P8aQ6DP+#jkd8vctNT;@nVoJ4E4Pa`wcuUoeZkjNPyy$7+_2TDLa+ z)#G}}8fS5xhHIz9)u^6@hw)~LDBuYN2ZLi}U+8p7r7Ojy=dwF@)3Zu<*g6j9suH=J zFN&JLMR1e2fk-SySJZmaiul8%=r{yagFs5DxiyRV*|?Y9mL0dN$HEy98wHUvb&`@~ zS2tCFxl)Hq{a#x#xfKdFm(mTAJ$=sruNo@5NxH#>La(OXFv^rZn0|C^Je_rWUA|!M zd$Kdt4QLStP}3aqc=*TD`$FgYyRh(y<;l}iM5Jw?0{c@(b{E4A9u4kB!N@tO4yyXG z%j9=L-q*>Lt3FIlRUONmRaW!JdzCr|0#Eg(Ib9h8-4p$1^YfAdK;A+pNF+kBHy}s- zP4*i2Frp&?_eB`IFs@ze^ey+$L_OQ{hyCVjht|FT9foh4noiw3qW&ZmXir7B^hM$c z>-L)o&WF`i@;h9QM>JoTto(4JKlaXB1%NyXo+41Wu4fcle?@-uwd13^To_qg2{R|D z`^|ULzENUC=UJD$6gY90yYE9}to`%lUP=TgA%BTPndJ8QWX|-t$NZ~kFFkq~L@v&vISA?QoU4v1U{E6R!6BUN9w=juLX=h#FI$ErmyhBLK# z#5;PWOk`xdn#oTgb5u-VkL!wK?KaO#WddZVCOyCYTS^fUe`FJLplwKj4$2Esbps0Z zTN;^9h+(!Dr|suh4)r(T6Sd*5zD7RQNm=-$;G*}eY2@1*Tzq8TPRJGgq1Xedf$odv|%BM7mNgNX?_VpQDQ$%(Mf)e@8x!p&N?+UE4p2nm;4 zJuwu2ec-^n|9-OEoGdZ$u-}nWUylfB!Tk~S+yN)ggzy|@K~K49P0Gd9HM67PT_Uxt z6cx-j2+vk&uWrwla9obU__LqAr{PcSf2KTJ+%W8%nPWY2I9UD;d1IhDiDImx$Td$N z$TqbrER)KuXWq2W!tQBJAINUt$x&qJaT>VoJWZ`JjF5f3T)OMFe)hCWr&7UXqSE|z z^>+(abFC~N*>MI5R9Vp(x(y)>HED8_)v8B`vz%>3=PRY3bwj5hmPC-0=h9d?Q@S9$ zf3GQ*T{@DQrwN~+?q1x<6Oi(V`e083owFSaAQnK@%U!>!N=xL4a_}#C9PDWJcTT1C z>I{&{4>O$Mt8gh5yd3NsA_4%&Ch1hHs)9zL%9x$rikf=%yPUEzMtZ$^oWky@Gf7{R z4K3av=IL?NG+)y+7uohc!nQ;{I`RwjCc&ZD3>-_9`_r08*Qn@4Z5b^&uaa<5qfPo;ySCunj$G%7B)! zKo(b`sGCzi2((S4nUbv`M80X2GY^$Mi7;yYaCHRkBEYbG`jV%0~`VGYG%>>FlEq zhfV0sg+igd$;(J~B|uYEXmy0Kmg;WZaQ==>uIfp764*ON=xo-3iQRqnb%0Gv02P9= z(iJy!C_Se6{{8!TKO;`VCIl<-b2ftPoE&83Cn&&c1nC6ECV8l$BA2S*n2_+iQ|ix@ z`B><24zbN-D~rFw?06A$JI;aaA2`>LVgyzbA%8BjLRSPAZ8u!I9!-wyoGpdN#3+lX z`aZ<1$~Nswvj;xV)P+fbEEaDMg}`L!*k-oQ#eTt`SYa(IBA!QCT(8D)i>bY)24K0s zUYT|wcr7~omQ~)wNwxvyT}BFpGf7v|nFzB+HjrAvHO6|Y-K)0`^7idT%zD;jHMMIx zFXlwunVZ@m5u&-Xy5TvTLF}5`XJzv;S)x3t2vkN)dd~NmbdvUi7Q!IYv%~r7j@q6l zsc75jThR!Q{&~)W%;z%6{JKB{MeA|#XFxDB@p}lD1<6d$g6yzZjUV1&T97Z0FnDMn zwU!v^sO`5;=O_{r@d6a=>x7z}OrqC9(s%+)cpYgG8VuQ}Ic3 z+VbD6fqoBTCxA=?fSr0=U!q-Qo1u*Q&*u?xgy<3(CN64ARL$E-WQS1-Yt-|GK5Ah} z7jY>IhrHFV+}FyB#%Nc0%U%TJ2)gYaVHARD^~$gj5}m*xI0!awygnJSd&8L11N(ui z6to*m!2FKjQ|!Gyd@8;+bULLo(TTFshCX5y@@E10gl2M7;l4r)pJmTTLg-@yA!`EW z1VQIu)iwOJtE^cFAl#(BDE^P8JCc8W#BZA+imx2JgK^K(tlrIBup2RCBIApt(@-EW zW$0wzvT?|I+Wkssl{%H9Lsomo6w(&6JoacevtS?R?k?*$=YxtI`D23aJ;SsyYDaXwWe;Us5`7RN zOe9qvTmK6XsnHn^Lf>L$`aX~F0xEgqPA`D{WZJihmXQ|FNJX5K9^qPuiDmNA*(+eg zqT0P`^z;ae?|OyWP*i6|_h}s*1PndtDDj^O&r_spc<)k|5Y*OZYmm~^P+kG67Px@5i8jSBP_Z$&+;zhRLA8o=5hXw#`0R%qyZ?w#I z>EtL=T(WEcVQoyt3GT%-k@-t%p;dsfSifMO^;J8pZO$>isNzbegxt$|RA`SozYL_B zBZd+UyW_;O-Jr+j{jvgq)!lTb9uk1r9<+J#E}&%~hxHK*JY?-hpXFmQ_o3H!9{kCI z>iE2&$E{t&XTjrp%9qbdig@7|@9(yhP?Tvh^w~XnFTf7GzaoyGZ&DooEQy$}TV3WU z2ZeEsRoOwV|ICG*6#^}kvdDpSs_?*4-xToq(4Y1Na(Cd^DlnLjehycHo(zd-sS5*B z8Yvhp&NSVlBK-21amn0LxUjM^0IHUBy&a^h4ZXPfH7#|0UCI`y9b0XIDI_rCUS6;u(Sxn{&U`-3V%%Ehc$t@ zfFl|FVWC;Q60friLI!7PJgR^kjE$pu8R%!WA}u~h@W6$3;db9Ge*Qg)S?d!EGUT!> zROHXT@>CMfY`^PC%uqc#o2v0!dC)8oBu4VUxOq;gM%1_?^4W1e%uceO;OE#8I@5Ht z?~(6*185-129afAGeYS4u$yBAc*L+L|I+vn{G)O~c=Q*%+HY9_yqj7^TlP=pRA@9@ zOdqCw#QqDVg-W$s1VF1FntTIo_N!-bfNTO_-bH=MX7o~!vaJ0}(@<1P&|QPVh(1Q! zEtmjz#$QrS=AGlT&BuM4=76>~5=>Dvd91nPfuk>;7oeBqw?@BxAE6Dn1}W|GjPqO^0PAvq z3I5i-|BkFRLG)x?YRw9>#RWu?D1Drp+Pk2s%Ddg4iCyZA%EHFRW}Q$>?R7PPHB7Oe z8Rjxj)Nt`08v`F(Zwpjn$ce0hPfqsC>!v*3j8Szrw44V^`4x|`h2x2UiZ3coJY0^P zFy(Sb?y-^ zf`7>cmqHALrswPpXK#6w3H*elB2(?V9zFKS^5;CRf*rgCQi@9DAP>O*m}e``>2Ys+ z^HgB#e{k3>s@Z&;+DrRnQtJi)%FQ#r5(ksNXIS-Ydw+(0Yd9bDtQ-cx6+MXWX{OJ9 zX?Xy}&&ZNtfcY*=su94WL!f=^fTv`0Ehys0K?1UgKohMvCdD~K6tNovXm^c;)q0x+ zV?xTC6g>H8m#HeIEAC!wszG7{ch$Vk5{EmXDfqnp%-=&5RyybFSrNS1=b*0s7$C25 zETH7^zzVva%x$(CS2~$1dFaxm;k3TuozZ=-fNuDTWUX`27M-D z8jo(lsw0=R-hK(JZ7lE-pG($lK*CWavS88y`9rMW7!x6;n9iwSyOofWdjj5?N? zW$e(8*EsEvX-A{4A#nWdqnWEmxYNyZVxX!s;MI-qQ=#0NGei_Oh7hrKX`7}pg3h!b z7z^)5zOJxLyN`9kd%Kv|Km$Sy;3{9PVFS_Fo-u&XI*zHl7{t9}pj`AtFQ8>DtcDsC z#IXR6-S;CE^Ue|dY?0PM;^1}XKEyeGUO%XJ2yTwV5j&GWmH;Yc-om(?d#@f6k&DEIISZ^ zjvzK0K9C{SV*q|7!SmF%H?ckP;IrXc^PS7Hh`jRADiuymoN`QsWhP zWse?6VAi^_ER03=l2w2<>-B*wRH04;wy5-kkV+p)@N%{8sCURN&4^2GrV)_0lQ|>3 zzJE`&>_SOPFrV~27-X15=!;FAc?u$yd(9M=ei(ZM%rXtOh#odRku4w7@lc20fr35A zwzk}D0JF#$p+o!gJkv8maFqn!lJtFqXUzym$Awn=(>4D&Il~DfY4WyZ&;ZEWgp9&6 zum{)h%jT*6@sr`1<Y5M)}!)mHJ3H%IP% zKs%{XtpBZ2y!)AhT7d>zHK=Uoc-`KR36_Co;0YvO zaT`GSMgb~GMJ8QU!1g)?Xobm?n8<3VfYY6NJiqn zIFDnRTDsxBgC;M>lv)AYUioKtMu%Rd?bCKr>1%k>5`vO7pV3e)`&hH+2~Z1q^4X9d z_t_w#V7;`-`xgalw{bJW*qwow4R!B+AZrRv-my&`nh~ubAx@o^L{n5z^(xtm(nQc? zDss8~{h|_m-zU(a*%6+p?-Y?0LEws?_lZ z33|3P@iuE*E!2Tck#8?f^}N`K%9)_8!kW>4!Atl+SNwM}|5$J@t@})&PNgzn1q<$n zUsO8Wr4>K<5l+23EicPM{KX?Epds z2Dl@IcNCM2`#36=K(_Y2+b8c_Kqd#A>Wj_#KXaJm z9QY3emvONbeAZDK_do-#xnXpsKs+TLM|gzjiI(7YNlV!XPowHBU;@K`d*5*}stDUJ zGk=qRGU<*VnP>1M?d%|zUj60fvJ$7+1wa5BI4Q;K9F|h44zjTwppf;oo%u~U9vk%f z;+Tm91Kn$eCJ~OxC{5Rk=|!wkB0!_+_Cb#fe$E+!E$zL zj6I>|w@&FzZ9l(Dzc>$jo!BNZCJj%|lW=j5?eS)>f#jw0@Xeth(AHz0>yKw*j{{M- zywN~F;^%@~96ksk)i97aAOGzF^2BLGYR0?-q(>L1xzH=#r9Po`C3?XXW*PJK=I2DX zo^HSH!CHnR7AEmBreWVny4&W4@t%eA0ts*7ihtOgoYoT(8B9knc>Dwe8=m$dHNYX! z0QbuDsjHTwws0~IREsfMc+OFOQFu}sEx)PP#BL% z-u0&BH?o0_T|~MkS|v1!(hTnf0*>?mljn5ag0Lb?AaLXXuScj|xeDW>ErM9Mc_!gS zko}-)KR-Q_L}m3TViso(51#@e-$Lys?*qwY<8-EiO#f_GnSud{7Bn=&F-GY@5#wgq zWluQTsa{)${q3rl#WH2hBWDns)}G9Otk<}>{v=w+akE2Jj*>_lGUYAj35tY5ue*Ub zm|hV(Yu;tv8}-SVE>;x|h0AhOluak(bTboDi)$r~eg#J#H23jnH=_L)1ROVCr7CX5 zsw)rRE~5#J_U*jD+=_&Kr~{75z8RUggR7l_9`H9sH#fl6U4%ucLK*)DU1=_ zpndvJEp(&ob*cmX9;oVp=|;i}K*|CONLG#5cm6b$jZy`Dg;jQyLdTSj&4r zO)UD|?@#i+DA<2onG`nm7h32o$bz4JW=S)EjakE)o*3tsKcESS+n;c9xZIIW_w;`di2xHt@%ErHQ>^^ zY`or;e8HN9F+MajvM-d&D#k=F#$OTFZj+{&smPulJ+4+*c?%a@?8Q|O^qghBKei38wg zjijUdYh9vez7HscEMIGK)b0DMrlvcr^c{mRe*6Y&j69gTpZ&_&3kyW7;AO)m&`y}? zD5Wpi@8V7XbzxJLx$}TH{|!G;IUUffUaAPu_rP zD#=8qu>-&nC@((o2D>XhxOI2@DO|?D2iFB2Q^DrFJEE_S;12r78K_Ci#mBneoHpK? zi;_;jmDj$YRZ6B|rT4_P_y$C%k)m5r9i&rKKw&I9wjW{!neroeN6?Q2+Zb_Bn;$XL zt#@_I#oGv-2aRQ0;BXBO99Ox7Q;UnSMwn>@c)NiXn}UA&IZ&czB;k7NM>g`mYCG$% zs=6=SD@sX&v>duSq(d5{BovS?>F!35k`C#T5)|p~l5Xkl?m9Ggo%emef508%UVmc@ z$AP`i-fPYI%+K?zyCGVhN@Ckab~&c8WuP~vR}EdFn{>(m6r#K8ZnRLV#zxawdE{bc z_^JN5gYLc0{Zh?>OKNqLaUjy3Ey`$@kGU&&&urg;BCF^=ACN$xTSONIZWpf!6Clql zPXi~{Hxsk4+6|^b*vx zW*IcI)xNP<)^q($vvb7&Ix;hs?ak_RiE`>@S@JrIaPcg9_uYR8XEVO}m9vwnW?Tk^ z?~pdjBa=3xGx0vt(33=1!=o*ZLtC^Cc)pRt7sUpW4*g#QKYQ({h$S{?+0;qiTzD#= z3vTOcmOA8Y3OM%in@=`=QyjSm(8F7>f5N)@iBjkg!GxEW?i}aP_7qF@;c5iaHAej} zAArOLe!xg0uGcTmG~-2t)j<%w+BswUKQ)O!qL$v`cI5d}~Fy?L;=GCU7QdwX_*nt|xfk3e??HIzf>o>(fe$m3mwJkYQj z{0;#G2r8HFLiQLu*uh9G<8!bVSF`-!`!6mi@~k7U1yjmZ{H?WLt8Fwsz*V+ZMeefoP&B2TwvcC%xB(~OybxIi= z1A@eoip%%VuVruI*AMG#DrD|m9HX>WK1hPHD_APVJXq)tzCP@SfB#If@{taS0PzMk zByJD_hGYu43i0!g`5%@Sp171^Gvok{ed{&bMG7|kR32}2ZTmcg zXIFOPV(#wKE#>-uq%^BpIG2>V1`?%3QfVyz!qt^aV+_LDZ-|zzEJ*GL?IEqe zydrY2=06;1Bc%>BuottaiGM;cSYrzqbzg2g|D50#Jb3v$V5Uv_;q?myIN6sxQMg+v zi7(IT*c^gQX`hlJ?fY$wvsF{Olq^=q%P0}+1cty#IMLVB!1ji6164BHM=tfZ0y#oV z5QYS~7`}e)zG2nG2HQNYSgMv=gzhDS4s489L7Wu1KKC9s`q!WCuxpfRbd5<0)SAQP zlPW&uGix)_4<%i$sI=VdGTx(-c3i$NS^o@+?x+Ji?!{msK3=opPO}WYD$T+toy=J3 z#sk0&Wo47qM_xAr<$wF(4!Vlu9Do?1B>2Y(CtGmzSYU$>*ofcCd#>{G+0>JeyxX5 zG^^{lz`HUnH!RI}i`B8?P5i9Z9Lbj0yi2{>rfE2{5No!UtHxM9U5o=3Rw&~B$6Y$s zm+RnvP0^VQ^>fm+P&uZ#)8lVZ#cY{|zO;VXM@)qjcymGA(}+uKa`N*xvCzh_-Q$36 zy)H;ak(sN$E!aNe@w7#aqeU*k?6(*%`0l7=?c&luzfpXevm4{wYkRUgK>XIZM)=Ac z-sL5xIoQMIV65*aMgd<(WzNZCAQp|Y{e3_fsH5C?K35Uh!al}a1QM5K4`O+$R=n+B zrLlH#V?#*3rjD$*(W7L(B3@&J@ha&8K{oQEBZ@m?dks=}HK(2aM(f(|6lb3YNy+ja zTB8|#;9UY{DNkcrD~(|Mw*%(5XsqSiKNoKrvDWss z+(`S_pBArC-jGRL@%yx9u(#NK^ypr*Ty*bL=$y zc=Px+wJQih!$nHo4(RN$*q@DfflJ8w25pGMWZgCsU{NR*PR(vL=O&&$mq z9z>iJyQkO0?1XK9+Uf6R=WGn%M{%5ZufB6~k_P|9@T!&%J*$}y|3T833A6PC)l*j^ z0~A`PK*g0xxRchMHRhP2dRKyK4ilNHgf%k1Bb{`is~pYe+=X$12Qm5`be~7ClAP8I zLpvZIu>=?DbZYo0xVMx3Qtlo3Zt3@JieWX9BBiF0Prbng49$Cng0(+J8N;n*b0CZlz0i^S|myPv^-FpKK-6Z(X_1K;*J+_u@@sSUKmwgj!*kJQ7dW!s~h?F^0K1 z$h-WZ_46}IIf=~gOlw}9Z~_r~5j#!Otlgm+CKhEH!(+z%@NPq__1hHI720c+&t~!5 zj|U=6@E!E2Uzm1U#y0@ugVqinbzYK?Nw=IzqnU=8cjY8A;L_Cx|CJjw#Hw)Npd+Bh zH)(K^2)UQ=+vf&l(~S)g*~9!WY@~W8K6X2vO?~m%{N;jRA5`?(y<01Hh}UpW)|xkE z0QnmJuD#GnXIy=H!{`>lhPh|w)Dq|^Q<$~AogC2k?4;f;L(GV(=NAUnCUN^^52f<< z!0rCQUnN%L1yUiDpl?Por&1|nsL$Xw?C>={OGo80y-I9&Y96mVOQ;Ozy*S#t(DTgH z4Sr}OL`%B&jZi(Y<9Dej)DHHBRE=c_)lA<;oCrb~BpG>hC5@c;I)PK-+5sb=r$$~1 z^8zNBElN*?0n`7qLg#&BJ8hM+T8+EF*H!& z_%yjHZz-~w&p8K~y{oc#pRTeDK(VnU&=ZIW>5&Mygs;bk;-I9ELJcxVfbq5}A;9B! z3e}xo8E1wfPNGj~QTqo~Wa8TClJ}~6GOr0F)HDw{B^gV=A3{lltGG(wyl?4=(^@`hzr~y_1UY9rkuR+bsDz zsRbA>{v?x?QG0FD&r8~@&A44s={JfMU3|BLezw^U}>k$6b zD`$myyOyR81;yPia+Rvk+J3XglpU4wBt1cUsr7hgQMQAPkeKG6f#MNRPL zqTefw(>BM|?u-zyL)ac_%`hG2*FiSc@9#mHUAAeon*^&;yJsQaXfX(EimtJ&-Vm;a za-*W=$CC{(cPGo-fZsj-M%2k6Zk=4dRvUu;aOM5*Ab`)&^(2V5TNTr~ut1X7ZeBYz z{&jsC&1`uzmRpjYI7Y`S^>Nb{pL+_q@{WM?r8At5F1Qa14jn~Kft(yd&cB@!rY|_Rr}KD1drHU z3}dG1&$b|xG&hU!15LXivtrWA%rz8=r}LR}Yk#vDY;HD)Vh}`*AHg$;H42$EJ0B-9 zV3V3b2oRPp-;ky=(%*!iC1QOe7$P@>KU~1Z7g`NkD;ZyLH7VOIG<986i8z@aUsQ{X zu(IX4_R9fFVwA{lTTfKE+)rm|aGGem1>O|t0I_f85)FyYr#lk4W4>j*r1}#ouZ>l= zMC(u}Z4svTG$N2C=65&PSoO`8Mz=-+4tmShMnX0LJT$qKWXXFYiFFY`rYKb@S`M{X zmVDaXR)sSNs``=^@>wD6Os2IuTpiPuv#Shc>0s}u5okr7PY;+^__TnTn395blSQ$ggBfCf;nK&b%tfkM+0yMG)TxRetybN7|Icx&DbEZNSUi=Sxyd9vtLnY=WEF-Reru>#;B zX`Ixd*b-KD>xxyUXdm?SGW?7sDeNX^xYA@1Pp|BTZl^Qy&_Sj306wU30g~~1KNUa4 zzyaI35vi3o+;o4V7s7l@NV;?rE)A5-YfR+Bmi0eHO~R zF{JAH&9!7kFLI$Fl1Zmg=C-o|vEh~He%UH2<)Eqbm-<{0mNk@ZJ^5|dsV<6^2gH6E z#b`4TYuApco>X~^R$js`K3FK@?b8Z$dVbO&{C-%9-+}8sWLvWz1-zckm;XFH_{<-3 zy|c`1m-|W4x({vqX1g;T_yWGa>sOt&+pk3I@~K6XGA_hY@bIX=9CYncFqRz}VQH=T zAz=`XGk}AH{pRhcV~0B|Fj1(>xQp~DRMSo>%7^AFr=n9M#uV3@Wg0u? zwTWfz2ZFWM=`R{~BqyHPYr!wDyOem#7-4yf;G^wGe}U}N+F)OB`*|^1z01ky(4D)J zbWEeo^+~$$_*U?)srd*{V*fUk17+-n(30t`U>bS;=OKc`D(02NWxm7OHK)6NknaY@ zyqw_`r$$Y5N=#{@saxECcqtFb%etC4AS0plRe5Qx5lr_{AW}}J+c}W_CpS!Ym3rty zU#s-nDh;+Im@LxF$qYnW3m~6y306ppjV+(JPpEfj7d;Z6*FuRkKAZ^u(Gg_Wq;cF> z0JNHn*;V!!`XIvyz0T%WV0!QhKol|5e4HSxs)}!|D#q+og=L?jM=q5iCVOxxtx!!7 zk2bK)%eL_(pGmj!&Fqni<`7@O_q0kSokEpQ72%MxV2}|Z4jWbBye}u=5w^W^YxFZ} zx1mshW!Y>gcmNU2WGw~P;T-aMeLjG=5J|pQ#xdeTfMj_9Uerg>ud$V`x68%ncw-ac z6zI8ol3pOWXYY;TBk@I={Cz3Q+|@x3%Ck+F$VEo`K$9q+F-N3FBV0FXsHP1<+HOWx z$#zX^J`zgtKc`w|2I zBOz)#3EykX2zGm!SWijU0g~XT0Rdfu&?oa@-Sh5D0PkV8z4k4_DbxO~v5mf}8*c{C z`n17ThnSu-j&r0bn(9AaeAHN72uqeStu)KY;LB{J6EnWvu7&81QH+$KwOjRb_l7}% zG^bNCQ)<@niOLP}(nzt5<;9Do$T(})%BNO2+qVbmF>l!U6lmi}wJ1kDUt7l{GLD-F zp3dT#OIKECWDhF+Sfgy*LV#L-skfc0i%E3AqE6Xb1I2gZdH)DDs1}7hc6VyLA#e@n}n1cNko&<4-1jVV9%6ha%;E*Np#zTpJj9f)rMT$F%N!$ zxRTS(wWz(p5H={tWE#Pl1o>=D)8s#byCeNdw1}uNk{<4FEze08b5&{#3K(OTx&X+@ z!i&8XWu_z1L#fTR1s_n5wU>(ctotx3FYEaFT96(GLT8h2E4i6&30Rbe57D>qEqbPZ&{wBGINp4v0* zIuOjZ%_fVI7w2-&Xmo#`w1P40@m}=!w1R?ztGW<&(_I)7hv}u(#$0 z$;GKqNXS>|=5xVS6Xi;?4z{3Dv$+K8*n4c`IHO413p{pdD!J^Jrz#>d_@CUrXU&p8 zl&pTF9Wq~2*Lc1j!3%9nR;>eHU76AL%%A@7ZG6rU?)@Tw4DggwnI2ag{vz|k0V{eo zGjKvEdB&f_QY0rnmArk)@}_`{;i>adjjbN;r`kB>Cg6F_J+D1|3!8>lg}tzi+qv9E zyDy!hB}zD>X#jzy4Cn9W)b?vSf_m?v3$*IG_JKHou04PlE%J}#;Q-9hBwLp@0sP6E z1<3IhVn-}WvR$RT8I0ulxxM-u@#4?3UQgPNUe>MOoGVI&dP9gdHbabEnC=x(X)FoG zC)cBB0p8LgY-a(i#MhKVNQHM`wY6-ZYOKW1g4cv#I7>E2OE`8McZ=~MTMh}M7`3eT zUUWu;o^>rTtg z@IW8TMq%HtwdZ-yiPbOe(C<;AQP8OBZby2eeZhJ+*ieM%5TYbo+byQ60WcRAg&)61 z`w_}I!cg*6{QNCk-5j>F6IFfS%SzG?C*Eic>^vY`n}?56?3 zb($ixRw^QXM;9+xAJ2oJ!&7VK-CX;*HxZ?lEaLV6mxZYQ!Tv9x3x?0p;PDAA`fDWw z6{v=&;m|}oI^vav$hRg?>f66K4imqr_hK`B5hUS*lO0FQ!HPx4$%B0~$o%3?Avsvc zHUkzZe(=w(yS_x4zjtyQ`+`aQ>Wy9#)vP{M4^i>C)@Ereh^I zk;jW*Pn^egvvK7QQV{ACnGhixlV1kf9Gvb5+hM5Dy2rWxx!+L`A;!`|G<&&ZAZe6X zV!Fif)>F|T_hoBVsPyL08Z(nF?&7hyXj(7`kb#+)>&mafU*j8ycDS$R;VzP82!)Gp zYQKYmsZPm5dTP_V>CV$#{Vn}vb|_J21HRqe`SWZAW$rHsXDaqb8}<3n5rCebP2`sb zn~}Or2?CAGa`MW@iXC%)R~N-O--){BjNpGa;l z+FPVcRFl3$#X0I>_9nJ%H8A(BLrEWW)w(g8Y#(c|w&z)gkV3rr2`T)7I{d^bMk&3i zijNy5g?B$Rs)6kM2!9XQ{G_^vDhA2p3zlg*j4mptIvUY+S#VZIn>hFq!D2K-a%x(f zn^p^=LYYqLYqNk4g2*6VrY=;hQ^1r-qWNChXSbLv zN4l$Bdcg_7-dL6sFD9?76ps zwL`p_5D{FYYY8B`P+j0!>Q-s2S812&r-Y_cIznL+S^I;WgZ96hOB^S%JZy^$%ZpUy z5l4wB_^fbqR}#&&8}4&rBS<$RLB2c=wAy;Y8{;HwH&C}dTs)u0v|E3f6$h&@khZf` z&5J7Irci{CRGyD894Sj#al#v;f-^I zc(;|fs_p88oBd!YNyQ{{jDT4dM~>+>Q7bLo;eN7JUTfch#$!gj^4q7;lfGL?-D~#c z6c2<=(9o^vm@fL5D6om5x804;L*DL{A7Zp8LQ||?v60)iW2>hv_9W7tvtP><#?TVj zowQBe>Irt+HAEXJoYV)rTI%6zn8ew967VYOH4(CT*_1|UKpdWJXE>qr=&@)`e ztw%N-fC=6))LH?`cya@{h)6$%c`54ayE%`|)kDZ(w-)OVY4gv0K3O2|LSv)ZTAX*# z4-rG8P<(4)wxFokefmCbS*vHWNx}75ax6iw7mV2(9Rn;+!MPM8Wl5FPK>}287ph>= zQ#*>>gh^N6B0YoMgn|2V6l&q?n=?eub6@~iIMp^K;z)dEE=^2pRL-nhX0qAX9iTps ztBWRv(d!ZLcV2Wl557ww~ktT@(N_U&jOv5<)&yI_dqn+lOU*WG|-6xT1K+whArNnUy?D5;Cf?j6&?ZmVwqgw89 z-Kj27(CNS}FNR{w1vD=wK4IjK%e0*l@Lfz@##g^ZsBS)pl<3o8J!wbTm|qsu=f$0H z?@?O7<#y`*({sL6SK1TuJi4csx6AL+_V3vXDdX&g-?4#D%qxlg2|E2<0!2eL-r^lK z$*oVHpq4`XQI%qkh;~kNsp~^W;h8*Ctm)9LW0xt;CCK`mv+V|~c)(F2Uv%L{4;?HW zqnBK^+B}tx!i{MXhFQV)n#GKFc^|PzeojR07k3ntT4dHC*HirHca@Nv*i*%J4u%2!*RF;*;n^>$Qqv9Ppg}}o$3hkxMlt_4~!a^9?}J)4hR6EywKqn;t>eHU`)Un7t$6UVmuF@-B{Y_YjCRS z8aN0PnqnhSOt`>M(z#GkVvH{OKp`pjr#Zr-J*B;367>Mk)$5l2<9`?(ow%2}79Ms4H7fynI$pK&m{B1fF#`H{jDX*&Pt8{T{~>xpZHIQa?yt(#Jgb465+ z1Un?VR6o`%l6g@BU(7xjmhjO0{ZmSy+p(u;6Z^2`!Unz|Fvi%7eMmM--FwQel0?Qu zHu?HlBUPK-=_+-JU4(sc&akU!+ZB@tt3KEB*R2f?K;7AWepuGpbLkimxKc}wWfX6T zZU07x!Yx~pn5;dSBGmEKLFgGMS_!*`pW);Estj-=9OH zc7^PIJ?l7s9_kV0Kv4p-Vh7SxHG1_V!rUS|rOtoaz8G>*&fvv|8I|Q}RVO zt9c)<^2BuezNvb`&5F4H5%h6wgE_J12->B1tY_mzpM1tq5zQ3#_6N)olO2y8+QFcM zD^3*Qi|ODy+I0Ia!Zv`D1pqfAe3K^>-D0Ykg!eP{`Q8+3xr+Fmu?$ zN&S$MQBasDJpj2JsGGx?B^Q$bOBFiYjvj)v^T(BNvcQ>sF+;K!6UQLrpsGEnp5s#E zY$@px@9|yM@J8BVEUu-v0&>JN+P(lV>U=(QHEwr0)EZGlvh9PkH+%zB4c+B%^mKkd z^H;h6{1GaFnJztKTMXdu%u*QH!6t_53cR+nNo;1 zts;eGC+%qUW@qvtCI7m})m|xZ>a@XFE>seJwYgQmeGtLM(;mg?Szn9NNqoQ_kYdwO z=GbhOSeB(wL0TOO4`uhEZS(?y0Zkm*btaJbcllqT@Q(h3n(QW6M!maJjNi6ryMF>5 z1a|SgatS*)r6cEgVgyuzEGP->mviA>E_^@;jxFWbQ}8R1Kd$WlDw*W4XEn_PUFO84 zVw0}NPM(Ke@6POtxGB%qS9=kcV5upA2TB1FObqe%xd58c(fdkSrE>DewonG^idU2e zuOTO54*@tfH>o(QH@Dzqf=Xr;sr()S5M*aOC=aGL*RKG#!Qb?3ielrHkVmB=id`T5 zk8PpoJ!9a!j$wZ2fUVln=OB_;=+ci?s~Oy2Q8!X+n@`JF8QWZ~9oBpA=C=kx16t{a#j z2wYjH!u27!qjT)0!vNo-w6q=s4Cs3#10*Ti{_+5Hei^iQvzU3n==|R74MPml24@n~ z^pQS`bHqgpY6ClS?ZgdLClAI$soM}5D(@~i#Jx}ewF)F(!*ZcQnPkz!v%qOjwpB_h zV7xV+^c8+C4dh$L*lKmKv$rTSVOkT|6BHS+C9n(Ul<;7~oq`$CFijEp%;}FGKlbF3 z>=p_3Pl`9#Dqj~b%`ZznyA}rRUkvpOjF}jZMk9R|%D*xVPNu-s(>GyPkdScu|g?VUSYQ)))aLDM@f4+1{;ZBS>rOOg?NRbU9{CLVJDYF+&+gI?=svZ zneh-@hQ8p&fT~JpIQdW=3WOAzVO9V~oy)qG6~p_VxsDi89YO)+-SsUhK_JZm5ioT= z#s_J=!%Zpm>)Q7B61H+a0CkD1#=#iI-zABOLw)sZ?vDWavTz4K&^x_>bMqJu?F8aI zL{Hi+!`Duw#q#_HKxK6k^;E@J#pH(yWpsZ;Y$So3A}6W!l^E)-D~3$OlkO`z(`{*= z1)_Q$SQM-HbDY8m76H`o5^$Ur>;XcSrl6F0qQkY*+*K`M-rt{Q=N?Fpmc;Y*%}XZ? z@>qP6sEHa8A~=E(_&^(la7`lt=t#sJ5ox`nvQ7PcZE!Jq2CcCp#jX5HQ?KoLSEu`I zVZw$r$WV-4O^{B*N2ZCT{StIvB4 z&Wq^gruJA^M_BldBr@ca6c=O9Ig}K3_mOL4q92*!AY}{HnPjhH9MUYHFG%3KAzmQB zo@ZpS);6uVI}}A25yZclqPU(>&OB?ZIKsV6qsXJ$^-9@x9C3@2b!0H|tnD6;GwmdF zy?eT)-G2*t4Sh#gMyH&0*4@UM4DY&5{qLXD`rHPQspggzT(!h^hdY&-cadQm9vd@n zp1eBjn6GjC`OCOp@&PQLJen{(rDoC{}QKyl*8Hoa~S}O#s7mJ%G_sB zDW6tbAyeI`z)p^nOU{&R!>f60)Gp3jI4Mh><1eS;qtn?Ldu&=FPxQO7d;G?N`*G-` z?y&&ll2FPwtn4ZiXFU4nn4QrB50jTx`@ zy!@0*X8f1OR(a*m_qeYkPT(R z+VOZN$9FdJf#JgoEjPPlpUXLb?%WvGv~+sBc9clvHtm@Su1Y98OK7l8+%BW(0G5PY zi$4wy;!`f=d`4k5nF$%>M-fR(;txsGoOr+c&e!Xl`)k)MnjF#Ue#&XhcqYzzfJ4zT z<eas6+OaC>pm!||R=51Kw1 zysUC;)ZuebEKi;UTO9}|esQd8OKR?_^Gcs{Y%{rzztuHg%;v){-70_WEs4b{+5^gO zj0x{!LVFsrq3^s^VYkV%G+;1asHugKTezpE*(l(4sx-caWphL(D}DXA-Wxu=;aHLtL8G7rG{F7Td?(vz#u-2cBDi4VJx=wwMWcMWPE ziWT~AGMf3sEhc_bIR8=~KiVc1#-dxaA4D`7X{Vnp98_;~)B25nz0d*OLHW?vH$j~J zJo`3KewCUw{L)fa>qF53om~1cQ=H1jCR;w|n04Ar740?`20ba4g}NZ^^DBG~tAUo- z&GPprTalD@?bdQ1>!0sv%^z|^X~oU4*!drJ9MSe$(2g&c?1U9YhY&dRPOKab?#YDR zOq{#^Yg3~XrU9=CIue$|xL!p<-)7|?1NR7V$oBv(o{=o##{oU^oJUPM)lHVf_aM|( zGHmYCMtuZ_@62pbrT#U)=F#ap`?U`g?9G;KU!7+rwHh6ojE${E@1N}*dyl1Ul+&+P zo#xgDw9QQ!)ZA$0U9ue1n2PT>yg|91LT=_@iTY}D_SQy#_Vq#_DG0W|O*PqA$UZ%^ z{KP;#Xt__-qL{*}7nM?&@w!EFs27-BP0q#k+s-Y`vC54{`}belbIG1p`D8zg6s9UP<{mT@9LBHf7j0M(?cvW zHW-O{j6&zH<}qnM_27?EjSYqV78v4_`N@yfsZn+K2Z(;;`L0 zyTJYaC&zLmPM*Qw+j<)=?b!*&Z;54p@%vlC)c1V&9^N0i*?N2De*GzryK8rSFj|>E zKkY2?LzdG?2Rozsq2n>(yU{1}>!(T!4+<8&4Xznf3cDX-&=kWLZzrQp=a$YomC#xV zTDqzjdhZimV!!*|F(wL~e3OBH4#1sz)nfUd=PlQ~ditGEeO+lgjL>oIKARuZ?C`AD?Fg$i$>y+T?ERG%Dc~YJV^KI=_9G zI;$p# zN_o}HADO~8eF<{kc5(ZTCp(a_zNBg$c23FDI!J*N2&_}a$L+P4hV@All2bV^t+z8P zj5|q_7a1pK;;S|JNvFm5kEXa(rwh#bw0+0NB|&&J&yu-HVj?5uGb#BP9hQAY1=ej! zg(2Vfs+>?L%T^Nw>EmhC;opsQ7v+x)4zb6v_k!ll&)o(ot}Wf?%W7;jS<*mO9NX+ZnYu;oLtA&k$vG9#?#t`c0B*@f`@=FJeU+NT< z?`g9!6(TG@#C|~gwYTJCO+|jZTeoKMvF4|0O0vm-zgIe@xZjXowNx`iNVV9*Cn@cs z^@n2P{egu3+sULB>aY)cCmxusi2IB0`gp$+2n+xB!gIr(Y6M-m@XThJCVfr+-q2D@ zrCIgjov{y*=2H?jd`74mLAG7irU>wcXqem0h8)s~_!G1qlT$f+CeD zmsIHU)^Cb$Wm9RY$}jsp?wVZ0>+d@6EXYTnIR(qxx6|cWS$ud)T@b9*tkqx7Kgr~& zE*aeK*jDb$VcpjETSASK_LQhDo?4NTZFpAU#{unYhWD+K1*znl<(+uG%TJ$OiY+mVY_Sz#8pp`HW*U2;K*yOXE-`sQa=F+WOSiP>d?NIW^kP z5AhB*_wnJaBm(VYCEE_GwK~m~leV$yh*6Fv3ruUDSg_PJW1KmvDBSn`vz(0~f&Y4I zV{raijyUm(V`VuLEu5s^=sTGyt9Q(KRKVXHNTD8_L|Qpk#QdHfrWsx{B@rY_Pe z@=S~On3R?r8nxZ_b?Yr;ZVNFt$hc;yl_@Eg%NQ%nD_&JvJ`>s;q1knwTdiKHFbUW_ zE$;UmS)lE&m~u&AR9ciqw==A8;G;dK%UOEoiqqqfNc;Ddd}a;B+-Ez23(NPhK`doI z|GO%If37M(dN5vL$H{8o`A!=_2}dD4Ht%=;CxmPcUtKWPZ5w59=S7%_K1>z+hQs%# zqz-tqAHvsqe0>O zYtjfJWTZTP#qj;9>|z{)ygY;SK9wN*Pj_3@_xn?X9QH$}@-bW0C0a(bb!Yp}xJr9{ zPZ@9;wdB;Tdr+%LPtKq2fpj|2hL2+t_8-+!n(gxB(`&S&MiE6<;zW4(VtCDGvzb0i z>S}T4$<0lo<(hO@916&3@I(g|r!5>_%dmL5glM0hPsN;n6Xaj!(X96fu#r!C?5g@@ zrml=Jm(Ppk=JI2~$)@&O_*&RYMM>k?PkbR{R5qzShX1WA_*NO*9U*XS=SSHS<--%U zd?x|h$EOt0yBqy)6VV%F<`OQ9M|yfs=i&p2-bpk87)pBrY+Nq((uEouzad-)qu-gfuQ=qMbf9C?QzJB^I`+PP^U4FL$&4Xk6H_p$yWyf4+ATW3?b| zHBukixL-`Cm2JRtRufc#Os1_rUyG?hkCyz!Z83tG0B^qIaIZfw(BqJ z_Nrn|=f))!OHE#oP%u_oX{*|}!WH=ASgns{Nl_Q>J`9af->qPaZA32+_sq!SFDC}; zYvC#Am8wkgwj~^>L&ZNz1u+>u)V|-V$dmjvWqEbK`<}7Z`>3-oc}_xY$~F$aCl+^4 zJS_dmd|{E(Y=!b>d5?qeuG*Yzxzi7RObEZrfc4*Y1x+w45(#TdYVd+H{Xqo9N(%d%c&g zzs=R4d`QVL>Fu)(llk3;o2Qy8<$;&QE;vA7TVNMujd>5(m4@8dLWbzWpBj=6wF6jaL5_d7+<$ObDc&pW}Yt z4IP=bKkC{I6dQk4V~gXyeJ8%Y(vKb|?rKY>@bkS|$=Kn=&%PPyeU|%&Bi@-36B1lm ztgYC{xu4p3{!;qSsonj&0b{1F@6ZeTZQA+Vk;F&;yvv>N4R+Y}W{++2O?SpQ8b;YZN>ay{ocigIJ zb!%_!3oGRcZJJU9zJ03(MJ}4@KI@W*PePi@JGq(`6%O-5hr(?i+f|7ok?KvfpS`2S zklw}r)i$fsN!%SsE0R-Rorz7vXMJNRvGhTLEZH06HkpKy8M>XrqqiIVX1Sd|6&sfi zKGv}7+)S(=VvzB+;}izC<*tvJRT4_mVOg`NPJTUPWV4j25em~jbJUnzk3)^m%IM!3}DT{|HmK7 cB-bCG3OPynug - - - - - - - - - - - - - - - TeraFlow OFC 2022 Demo - - -

- - - -
-
-
- {% with messages = get_flashed_messages(with_categories=true) %} - {% if messages %} - {% for category, message in messages %} - - - {% endfor %} - {% endif %} - {% endwith %} -
-
-
-
- {% block content %}{% endblock %} + +
-
- -
-
+ + +
-

© 2021-2023

+ {% with messages = get_flashed_messages(with_categories=true) %} + {% if messages %} + {% for category, message in messages %} + + + {% endfor %} + {% endif %} + {% endwith %}
-
-

This project has received funding from the European Union's Horizon 2020 research and innovation programme under grant agreement No 101015857.

+
+ {% block content %}{% endblock %} +
+
+
+ +
+
+
+
+

© 2021-2023

+
-
- 5g ppp EU logo +
+
+

This project has received funding from the European Union's Horizon 2020 research and innovation programme under grant agreement No 101015857.

+
+
+ 5g ppp EU logo +
-
-
- - - - - - - - - - - \ No newline at end of file +
+ + + + + + + + + + + \ No newline at end of file diff --git a/src/webui/service/templates/link/detail.html b/src/webui/service/templates/link/detail.html index 8b49a65eb..7df9ddce6 100644 --- a/src/webui/service/templates/link/detail.html +++ b/src/webui/service/templates/link/detail.html @@ -36,7 +36,7 @@ Endpoints - Type + Device @@ -46,7 +46,13 @@ {{ end_point.endpoint_uuid.uuid }} - {{ end_point.endpoint_uuid.uuid }} + + {{ end_point.device_id.device_uuid.uuid }} + + + + + {% endfor %} diff --git a/src/webui/service/templates/main/about.html b/src/webui/service/templates/main/about.html index 4ba3a5845..80d61891c 100644 --- a/src/webui/service/templates/main/about.html +++ b/src/webui/service/templates/main/about.html @@ -16,10 +16,10 @@ {% extends 'base.html' %} {% block content %} -

TeraFlow OS

+

ETSI TeraFlowSDN Controller

-

For more information, visit the TeraFlow H2020 webpage.

+

For more information, visit the ETSI Open Source Group for TeraFlowSDN.

- Consortium + {% endblock %} \ No newline at end of file diff --git a/src/webui/service/templates/service/detail.html b/src/webui/service/templates/service/detail.html index 3a0f0f7d0..9d2c1e736 100644 --- a/src/webui/service/templates/service/detail.html +++ b/src/webui/service/templates/service/detail.html @@ -17,120 +17,170 @@ {% extends 'base.html' %} {% block content %} -

Service {{ service.service_id.service_uuid.uuid }}

- -
-
- -
- -
- - -
+

Service {{ service.service_id.service_uuid.uuid }}

+ +
+
+
- -
-
UUID:
-
- {{ service.service_id.service_uuid.uuid }} -
-
Type:
-
- {{ service.service_type }} -
+ -
- Endpoints: -
-
    - {% for endpoint in service.service_endpoint_ids %} -
  • {{ endpoint.endpoint_uuid.uuid }}: {{ endpoint.endpoint_type }}
  • - {% endfor %} -
-
-
-
- Configurations: -
-
    - {% for config in service.service_config.config_rules %} - {% if config.WhichOneof('config_rule') == 'custom' %} -
  • {{ config.custom.resource_key }}: -
      - {% for key, value in (config.custom.resource_value | from_json).items() %} -
    • {{ key }}: {{ value }}
    • - {% endfor %} -
    -
  • - {% endif %} - {% endfor %} -
-
+
+ +
+
- - Constraints: @@ -77,63 +92,75 @@ {% for constraint in slice.slice_constraints %} {% if constraint.WhichOneof('constraint')=='custom' %} - - - + + + - {% endif %} - {% if constraint.WhichOneof('constraint')=='endpoint_location' %} + {% elif constraint.WhichOneof('constraint')=='endpoint_location' %} - + - {% endif %} - {% if constraint.WhichOneof('constraint')=='endpoint_priority' %} + {% elif constraint.WhichOneof('constraint')=='endpoint_priority' %} - + + + + {% elif constraint.WhichOneof('constraint')=='sla_availability' %} + + + + {% else %} + + + + + {% endif %} - {% if constraint.WhichOneof('constraint')=='sla_availability' %} + {% endfor %} + +
- Custom - - {{ constraint.custom.constraint_type }} - -
    - {{ constraint.custom.constraint_value }} -
-
Custom{{ constraint.custom.constraint_type }}{{ constraint.custom.constraint_value }}
- Endpoint Location - Endpoint Location {{ constraint.endpoint_location.endpoint_id.device_id.device_uuid.uuid }} / {{ constraint.endpoint_location.endpoint_id.endpoint_uuid.uuid }} -
    - {{ constraint.endpoint_location.location }} -
+ {% if constraint.endpoint_location.location.WhichOneof('location')=='region' %} + Region: {{ constraint.endpoint_location.location.region }} + {% elif constraint.endpoint_location.location.WhichOneof('location')=='gps_position' %} + Position (lat/long): + {{ constraint.endpoint_location.location.gps_position.latitude }} / + {{ constraint.endpoint_location.location.gps_position.longitude }} + {% endif %}
- Endpoint Priority - Endpoint Priority {{ constraint.endpoint_priority.endpoint_id.device_id.device_uuid.uuid }} / {{ constraint.endpoint_priority.endpoint_id.endpoint_uuid.uuid }} {{ constraint.endpoint_priority.priority }}
SLA Availability- -
    - {{ constraint.endpoint_priority.priority }} -
+ {{ constraint.sla_availability.num_disjoint_paths }} disjoint paths; + {% if constraint.sla_availability.all_active %}all{% else %}single{% endif %}active
--{{ constraint }}
+Configurations: + + + + + + + + + {% for config in slice.slice_config.config_rules %} + {% if config.WhichOneof('config_rule') == 'custom' %} - @@ -141,75 +168,48 @@ {% endfor %}
KeyValue
- SLA Availability - - / + {{ config.custom.resource_key }}
    -
  • num_disjoint_paths: {{ constraint.sla_availability.num_disjoint_paths }}
  • -
  • all_active: {{ constraint.sla_availability.all_active }}
  • + {% for key, value in (config.custom.resource_value | from_json).items() %} +
  • {{ key }}: {{ value }}
  • + {% endfor %}
-Configurations: - - - - - - - - - {% for config in slice.slice_config.config_rules %} - {% if config.WhichOneof('config_rule') == 'custom' %} - - - - - {% endif %} - {% endfor %} - -
KeyValue
- {{ config.custom.resource_key }} - -
    - {% for key, value in (config.custom.resource_value | from_json).items() %} -
  • {{ key }}: {{ value }}
  • - {% endfor %} -
-
-
- - - - - - - - {% for services in services.services %} - - - - {% endfor %} - -
Service Id
- - {{ services.service_id.service_uuid.uuid }} - - - - - -
-
-
- - - - - - - - {% for services in services.services %} - - - - {% endfor %} - -
Sub-slice
- {{ services.sub_slice_ids|map(attribute='slice_uuid')|map(attribute='uuid')|join(', ') }} -
-
+
+ + + + + + + + {% for services in services.services %} + + + + {% endfor %} + +
Service Id
+ + {{ services.service_id.service_uuid.uuid }} + + + + + +
+
+
+ + + + + + + + {% for services in services.services %} + + + + {% endfor %} + +
Sub-slice
+ {{ services.sub_slice_ids|map(attribute='slice_uuid')|map(attribute='uuid')|join(', ') }} +
+
{% endblock %} \ No newline at end of file -- GitLab From 03564d05e9433bbddf2534978808b4ddafff0c61 Mon Sep 17 00:00:00 2001 From: PabloArmingolRobles Date: Fri, 16 Sep 2022 10:00:39 +0200 Subject: [PATCH 40/91] L2VPN changes --- .../templates/interface/subinterface/edit_config.xml | 5 +++++ .../openconfig/templates/network_instance/edit_config.xml | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/src/device/service/drivers/openconfig/templates/interface/subinterface/edit_config.xml b/src/device/service/drivers/openconfig/templates/interface/subinterface/edit_config.xml index 63a4f4f6c..6eb1fbc86 100644 --- a/src/device/service/drivers/openconfig/templates/interface/subinterface/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/interface/subinterface/edit_config.xml @@ -14,7 +14,11 @@ {{index}} {{description}} + {% if vlan_id is not defined %} + true + {% endif%} + {% if vlan_id is defined %} @@ -24,6 +28,7 @@ + {% endif%} {% if address_ip is defined %} diff --git a/src/device/service/drivers/openconfig/templates/network_instance/edit_config.xml b/src/device/service/drivers/openconfig/templates/network_instance/edit_config.xml index 1944778c6..17b07df72 100644 --- a/src/device/service/drivers/openconfig/templates/network_instance/edit_config.xml +++ b/src/device/service/drivers/openconfig/templates/network_instance/edit_config.xml @@ -20,7 +20,7 @@ {% endif %} {% if type=='L2VSI' %} {% if description is defined %}{{description}}{% endif %} - false + true 1500 -- GitLab From 67789ca35609f8ec2d0c08e453c2d4c1e5b3de4d Mon Sep 17 00:00:00 2001 From: Lluis Gifre Date: Sat, 17 Sep 2022 09:53:00 +0000 Subject: [PATCH 41/91] Multiple improvements: ECOC'22 test: - added CTTC OLS and Automation component deployment to script Common: - corrected DeviceType name for transponder Compute: - added bearer definitions for ECOC'22 demo - removed hard errors and replaced by warning log messages - forced to use VPLS, i.e., always slices - added definition of L2-VPN service settings Device component: - forced to use emulated devices for openconfig devices (just for demo purposes, for the sake of reducing demo time) - added commit_per_delete setting in OpenConfigDriver - improved detection of PORT components for ADVA devices PathComp-Frontend component: - added default settings for TAPI connections - corrected reply composition; subservices were added wrongly - added log lines for debug purposes Service component: - corrected ConfigRules for L2NM Emulated Service Handler - adapted TAPI Service Handler to new framework - added log lines in TaskScheduler compose_from_service (debugging) - TaskScheduler compose_from_service does not retrieve all resources (debugging) - removed ConnectionExpander from Tasks ConfigureConnection and DeconfigureConnection WebUI component: - minor improvements --- src/common/DeviceTypes.py | 2 +- .../nbi_plugins/ietf_l2vpn/Constants.py | 11 +- .../nbi_plugins/ietf_l2vpn/L2VPN_Service.py | 5 +- .../nbi_plugins/ietf_l2vpn/L2VPN_Services.py | 6 +- .../ietf_l2vpn/L2VPN_SiteNetworkAccesses.py | 27 +- src/device/service/drivers/__init__.py | 41 +-- .../drivers/openconfig/OpenConfigDriver.py | 40 ++- .../drivers/openconfig/templates/EndPoints.py | 9 +- .../algorithms/KDisjointPathAlgorithm.py | 8 +- .../frontend/service/algorithms/_Algorithm.py | 19 +- .../algorithms/tools/ConstantsMappings.py | 2 +- .../l2nm_emulated/ConfigRules.py | 290 +++++------------- .../tapi_tapi/TapiServiceHandler.py | 104 +++---- .../service/task_scheduler/TaskScheduler.py | 12 +- .../tasks/Task_ConnectionConfigure.py | 7 +- .../tasks/Task_ConnectionDeconfigure.py | 7 +- src/tests/ecoc22/deploy_specs.sh | 4 +- src/webui/service/templates/base.html | 4 +- src/webui/service/templates/main/home.html | 2 +- 19 files changed, 260 insertions(+), 340 deletions(-) diff --git a/src/common/DeviceTypes.py b/src/common/DeviceTypes.py index 4d67ff661..c5ea4c54f 100644 --- a/src/common/DeviceTypes.py +++ b/src/common/DeviceTypes.py @@ -21,7 +21,7 @@ class DeviceTypeEnum(Enum): DATACENTER = 'datacenter' MICROVAWE_RADIO_SYSTEM = 'microwave-radio-system' OPTICAL_ROADM = 'optical-roadm' - OPTICAL_TRANDPONDER = 'optical-trandponder' + OPTICAL_TRANSPONDER = 'optical-transponder' OPEN_LINE_SYSTEM = 'open-line-system' PACKET_ROUTER = 'packet-router' PACKET_SWITCH = 'packet-switch' diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/Constants.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/Constants.py index 41d58caa4..daa9f4fe3 100644 --- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/Constants.py +++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/Constants.py @@ -17,6 +17,8 @@ DEFAULT_ADDRESS_FAMILIES = ['IPV4'] DEFAULT_BGP_AS = 65000 DEFAULT_BGP_ROUTE_TARGET = '{:d}:{:d}'.format(DEFAULT_BGP_AS, 333) +# TODO: improve definition of bearer mappings + # Bearer mappings: # device_uuid:endpoint_uuid => ( # device_uuid, endpoint_uuid, router_id, route_distinguisher, sub_if_index, address_ip, address_prefix) @@ -56,8 +58,9 @@ BEARER_MAPPINGS = { #'R4@D2:3/3': ('R4@D2', '3/3', '10.0.2.4', '65002:104', 100, '2.4.3.3', 24), # ECOC'22 - 'DC1-GW:CS1-GW1': ('CS1-GW1', '10/1', '10.0.1.101', '65000:101', 300, None, None), - 'DC1-GW:CS1-GW2': ('CS1-GW2', '10/1', '10.0.2.101', '65000:102', 300, None, None), - 'DC2-GW:CS2-GW1': ('CS2-GW1', '10/1', '10.0.1.102', '65000:103', 300, None, None), - 'DC2-GW:CS2-GW2': ('CS2-GW2', '10/1', '10.0.2.102', '65000:104', 300, None, None), + # bearer_ref => device_uuid, endpoint_uuid, sub_if_index, router_id, remote_router, circuit_id + 'DC1-GW:CS1-GW1': ('CS1-GW1', '10/1', 0, '5.5.1.1', '5.5.2.1', 111), + 'DC1-GW:CS1-GW2': ('CS1-GW2', '10/1', 0, '5.5.1.2', '5.5.2.2', 222), + 'DC2-GW:CS2-GW1': ('CS2-GW1', '10/1', 0, '5.5.2.1', '5.5.1.1', 111), + 'DC2-GW:CS2-GW2': ('CS2-GW2', '10/1', 0, '5.5.2.2', '5.5.1.2', 222), } diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py index 9e4527f80..224ebf094 100644 --- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py +++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Service.py @@ -91,7 +91,10 @@ class L2VPN_Service(Resource): response.status_code = HTTP_NOCONTENT return response - raise Exception('VPN({:s}) not found in database'.format(str(vpn_id))) + LOGGER.warning('VPN({:s}) not found in database. Nothing done.'.format(str(vpn_id))) + response = jsonify({}) + response.status_code = HTTP_NOCONTENT + return response except Exception as e: # pylint: disable=broad-except LOGGER.exception('Something went wrong Deleting VPN({:s})'.format(str(vpn_id))) response = jsonify({'error': str(e)}) diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Services.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Services.py index 7b959b289..50b1c2abb 100644 --- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Services.py +++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_Services.py @@ -44,7 +44,11 @@ class L2VPN_Services(Resource): vpn_services : List[Dict] = request_data['ietf-l2vpn-svc:vpn-service'] for vpn_service in vpn_services: try: - vpn_service_type = vpn_service['vpn-svc-type'] + # By now, assume requests from OSM always need transport slices + # TODO: think how to differentiate + #vpn_service_type = vpn_service['vpn-svc-type'] + vpn_service_type = 'vpls' + if vpn_service_type == 'vpws': # pylint: disable=no-member service_request = Service() diff --git a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py index 2e0900534..0dea17697 100644 --- a/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py +++ b/src/compute/service/rest_server/nbi_plugins/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py @@ -65,7 +65,9 @@ def process_site_network_access(context_client : ContextClient, site_id : str, s if mapping is None: msg = 'Specified Bearer({:s}) is not configured.' raise Exception(msg.format(str(bearer_reference))) - device_uuid,endpoint_uuid,router_id,route_distinguisher,sub_if_index,address_ip,address_prefix = mapping + #device_uuid,endpoint_uuid,router_id,route_dist,sub_if_index,address_ip,address_prefix = mapping + route_dist, address_ip, address_prefix = None, None, None + device_uuid, endpoint_uuid, sub_if_index, router_id, remote_router, circuit_id = mapping target : Union[Service, Slice, None] = None if target is None: target = get_slice (context_client, vpn_id) @@ -88,20 +90,21 @@ def process_site_network_access(context_client : ContextClient, site_id : str, s service_settings_key = '/settings' update_config_rule_custom(config_rules, service_settings_key, { 'mtu' : (DEFAULT_MTU, True), - 'address_families': (DEFAULT_ADDRESS_FAMILIES, True), - 'bgp_as' : (DEFAULT_BGP_AS, True), - 'bgp_route_target': (DEFAULT_BGP_ROUTE_TARGET, True), + #'address_families': (DEFAULT_ADDRESS_FAMILIES, True), + #'bgp_as' : (DEFAULT_BGP_AS, True), + #'bgp_route_target': (DEFAULT_BGP_ROUTE_TARGET, True), }) endpoint_settings_key = '/device[{:s}]/endpoint[{:s}]/settings'.format(device_uuid, endpoint_uuid) - field_updates = { - 'router_id' : (router_id, True), - 'route_distinguisher': (route_distinguisher, True), - 'sub_interface_index': (sub_if_index, True), - 'vlan_id' : (cvlan_id, True), - } - if address_ip is not None: field_updates['address_ip' ] = (address_ip, True) - if address_prefix is not None: field_updates['address_prefix' ] = (address_prefix, True) + field_updates = {} + if router_id is not None: field_updates['router_id' ] = (router_id, True) + if route_dist is not None: field_updates['route_distinguisher'] = (route_dist, True) + if sub_if_index is not None: field_updates['sub_interface_index'] = (sub_if_index, True) + if cvlan_id is not None: field_updates['vlan_id' ] = (cvlan_id, True) + if address_ip is not None: field_updates['address_ip' ] = (address_ip, True) + if address_prefix is not None: field_updates['address_prefix' ] = (address_prefix, True) + if remote_router is not None: field_updates['remote_router' ] = (remote_router, True) + if circuit_id is not None: field_updates['circuit_id' ] = (circuit_id, True) update_config_rule_custom(config_rules, endpoint_settings_key, field_updates) field_updates = {} diff --git a/src/device/service/drivers/__init__.py b/src/device/service/drivers/__init__.py index 6188a385d..75e315b37 100644 --- a/src/device/service/drivers/__init__.py +++ b/src/device/service/drivers/__init__.py @@ -31,6 +31,7 @@ DRIVERS = [ FilterFieldEnum.DEVICE_TYPE: [ DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM, DeviceTypeEnum.EMULATED_PACKET_ROUTER, + DeviceTypeEnum.PACKET_ROUTER, # temporal ECOC'22 ], FilterFieldEnum.DRIVER : [ ORM_DeviceDriverEnum.UNDEFINED, @@ -39,13 +40,13 @@ DRIVERS = [ ], } ]), - (OpenConfigDriver, [ - { - # Real Packet Router, specifying OpenConfig Driver => use OpenConfigDriver - FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.PACKET_ROUTER, - FilterFieldEnum.DRIVER : ORM_DeviceDriverEnum.OPENCONFIG, - } - ]), + #(OpenConfigDriver, [ # temporal ECOC'22 + # { + # # Real Packet Router, specifying OpenConfig Driver => use OpenConfigDriver + # FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.PACKET_ROUTER, + # FilterFieldEnum.DRIVER : ORM_DeviceDriverEnum.OPENCONFIG, + # } + #]), (TransportApiDriver, [ { # Real OLS, specifying TAPI Driver => use TransportApiDriver @@ -53,17 +54,17 @@ DRIVERS = [ FilterFieldEnum.DRIVER : ORM_DeviceDriverEnum.TRANSPORT_API, } ]), - (P4Driver, [ - { - # Real P4 Switch, specifying P4 Driver => use P4Driver - FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.P4_SWITCH, - FilterFieldEnum.DRIVER : ORM_DeviceDriverEnum.P4, - } - ]), - (IETFApiDriver, [ - { - FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.MICROVAWE_RADIO_SYSTEM, - FilterFieldEnum.DRIVER : ORM_DeviceDriverEnum.IETF_NETWORK_TOPOLOGY, - } - ]), + #(P4Driver, [ # temporal ECOC'22 + # { + # # Real P4 Switch, specifying P4 Driver => use P4Driver + # FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.P4_SWITCH, + # FilterFieldEnum.DRIVER : ORM_DeviceDriverEnum.P4, + # } + #]), + #(IETFApiDriver, [ # temporal ECOC'22 + # { + # FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.MICROVAWE_RADIO_SYSTEM, + # FilterFieldEnum.DRIVER : ORM_DeviceDriverEnum.IETF_NETWORK_TOPOLOGY, + # } + #]), ] diff --git a/src/device/service/drivers/openconfig/OpenConfigDriver.py b/src/device/service/drivers/openconfig/OpenConfigDriver.py index dd41096ec..c35ae9b9d 100644 --- a/src/device/service/drivers/openconfig/OpenConfigDriver.py +++ b/src/device/service/drivers/openconfig/OpenConfigDriver.py @@ -66,6 +66,7 @@ class NetconfSessionHandler: self.__look_for_keys = settings.get('look_for_keys', True) self.__allow_agent = settings.get('allow_agent', True) self.__force_running = settings.get('force_running', False) + self.__commit_per_delete = settings.get('delete_rule', False) self.__device_params = settings.get('device_params', {}) self.__manager_params = settings.get('manager_params', {}) self.__nc_params = settings.get('nc_params', {}) @@ -90,6 +91,9 @@ class NetconfSessionHandler: @property def use_candidate(self): return self.__candidate_supported and not self.__force_running + @property + def commit_per_rule(self): return self.__commit_per_delete + @RETRY_DECORATOR def get(self, filter=None, with_defaults=None): # pylint: disable=redefined-builtin with self.__lock: @@ -181,8 +185,8 @@ def do_sampling(samples_cache : SamplesCache, resource_key : str, out_samples : LOGGER.exception('Error retrieving samples') def edit_config( - netconf_handler : NetconfSessionHandler, resources : List[Tuple[str, Any]], delete=False, target='running', - default_operation='merge', test_option=None, error_option=None, format='xml' # pylint: disable=redefined-builtin + netconf_handler : NetconfSessionHandler, resources : List[Tuple[str, Any]], delete=False, commit_per_rule= False, + target='running', default_operation='merge', test_option=None, error_option=None, format='xml' # pylint: disable=redefined-builtin ): str_method = 'DeleteConfig' if delete else 'SetConfig' LOGGER.info('[{:s}] resources = {:s}'.format(str_method, str(resources))) @@ -202,6 +206,8 @@ def edit_config( netconf_handler.edit_config( config=str_config_message, target=target, default_operation=default_operation, test_option=test_option, error_option=error_option, format=format) + if commit_per_rule: + netconf_handler.commit() results[i] = True except Exception as e: # pylint: disable=broad-except str_operation = 'preparing' if target == 'candidate' else ('deleting' if delete else 'setting') @@ -278,12 +284,15 @@ class OpenConfigDriver(_Driver): with self.__lock: if self.__netconf_handler.use_candidate: with self.__netconf_handler.locked(target='candidate'): - results = edit_config(self.__netconf_handler, resources, target='candidate') - try: - self.__netconf_handler.commit() - except Exception as e: # pylint: disable=broad-except - LOGGER.exception('[SetConfig] Exception commiting resources: {:s}'.format(str(resources))) - results = [e for _ in resources] # if commit fails, set exception in each resource + if self.__netconf_handler.commit_per_rule: + results = edit_config(self.__netconf_handler, resources, target='candidate', commit_per_rule= True) + else: + results = edit_config(self.__netconf_handler, resources, target='candidate') + try: + self.__netconf_handler.commit() + except Exception as e: # pylint: disable=broad-except + LOGGER.exception('[SetConfig] Exception commiting resources: {:s}'.format(str(resources))) + results = [e for _ in resources] # if commit fails, set exception in each resource else: results = edit_config(self.__netconf_handler, resources) return results @@ -294,12 +303,15 @@ class OpenConfigDriver(_Driver): with self.__lock: if self.__netconf_handler.use_candidate: with self.__netconf_handler.locked(target='candidate'): - results = edit_config(self.__netconf_handler, resources, target='candidate', delete=True) - try: - self.__netconf_handler.commit() - except Exception as e: # pylint: disable=broad-except - LOGGER.exception('[DeleteConfig] Exception commiting resources: {:s}'.format(str(resources))) - results = [e for _ in resources] # if commit fails, set exception in each resource + if self.__netconf_handler.commit_per_rule: + results = edit_config(self.__netconf_handler, resources, target='candidate', delete=True, commit_per_rule= True) + else: + results = edit_config(self.__netconf_handler, resources, target='candidate', delete=True) + try: + self.__netconf_handler.commit() + except Exception as e: # pylint: disable=broad-except + LOGGER.exception('[DeleteConfig] Exception commiting resources: {:s}'.format(str(resources))) + results = [e for _ in resources] # if commit fails, set exception in each resource else: results = edit_config(self.__netconf_handler, resources, delete=True) return results diff --git a/src/device/service/drivers/openconfig/templates/EndPoints.py b/src/device/service/drivers/openconfig/templates/EndPoints.py index c11b1669d..718a02d19 100644 --- a/src/device/service/drivers/openconfig/templates/EndPoints.py +++ b/src/device/service/drivers/openconfig/templates/EndPoints.py @@ -20,7 +20,7 @@ from .Tools import add_value_from_collection, add_value_from_tag LOGGER = logging.getLogger(__name__) -XPATH_PORTS = "//ocp:components/ocp:component/ocp:state[ocp:type='PORT']/.." +XPATH_PORTS = "//ocp:components/ocp:component" XPATH_IFACE_COUNTER = "//oci:interfaces/oci:interface[oci:name='{:s}']/state/counters/{:s}" def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: @@ -28,6 +28,13 @@ def parse(xml_data : ET.Element) -> List[Tuple[str, Dict[str, Any]]]: for xml_component in xml_data.xpath(XPATH_PORTS, namespaces=NAMESPACES): #LOGGER.info('xml_component = {:s}'.format(str(ET.tostring(xml_component)))) + component_type = xml_component.find('ocp:state/ocp:type', namespaces=NAMESPACES) + if component_type is None or component_type.text is None: continue + component_type = component_type.text + if component_type not in {'PORT', 'oc-platform-types:PORT'}: continue + + LOGGER.info('PORT xml_component = {:s}'.format(str(ET.tostring(xml_component)))) + endpoint = {} component_name = xml_component.find('ocp:name', namespaces=NAMESPACES) diff --git a/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py b/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py index 70b50dae5..76b49bc8b 100644 --- a/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py +++ b/src/pathcomp/frontend/service/algorithms/KDisjointPathAlgorithm.py @@ -194,9 +194,13 @@ class KDisjointPathAlgorithm(_Algorithm): grpc_services[service_key] = self.add_service_to_reply(reply, context_uuid, service_uuid) for num_path,service_path_ero in enumerate(paths): + self.logger.warning('num_path={:d}'.format(num_path)) + self.logger.warning('service_path_ero={:s}'.format(str(service_path_ero))) if service_path_ero is None: continue path_hops = eropath_to_hops(service_path_ero, self.endpoint_to_link_dict) + self.logger.warning('path_hops={:s}'.format(str(path_hops))) connections = convert_explicit_path_hops_to_connections(path_hops, self.device_dict, service_uuid) + self.logger.warning('connections={:s}'.format(str(connections))) for connection in connections: connection_uuid,device_layer,path_hops,_ = connection @@ -221,8 +225,8 @@ class KDisjointPathAlgorithm(_Algorithm): grpc_connection = self.add_connection_to_reply(reply, connection_uuid, grpc_service, path_hops) grpc_connections[connection_uuid] = grpc_connection - for service_uuid in dependencies: - sub_service_key = (context_uuid, service_uuid) + for sub_service_uuid in dependencies: + sub_service_key = (context_uuid, sub_service_uuid) grpc_sub_service = grpc_services.get(sub_service_key) if grpc_sub_service is None: raise Exception('Service({:s}) not found'.format(str(sub_service_key))) diff --git a/src/pathcomp/frontend/service/algorithms/_Algorithm.py b/src/pathcomp/frontend/service/algorithms/_Algorithm.py index bb96ff354..b798813a8 100644 --- a/src/pathcomp/frontend/service/algorithms/_Algorithm.py +++ b/src/pathcomp/frontend/service/algorithms/_Algorithm.py @@ -12,11 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json, logging, requests, uuid +import json, logging, requests from typing import Dict, List, Optional, Tuple -from common.proto.context_pb2 import Connection, Device, DeviceList, EndPointId, Link, LinkList, Service, ServiceStatusEnum, ServiceTypeEnum +from common.proto.context_pb2 import ( + ConfigRule, Connection, Device, DeviceList, EndPointId, Link, LinkList, Service, ServiceStatusEnum, + ServiceTypeEnum) from common.proto.pathcomp_pb2 import PathCompReply, PathCompRequest -from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.ConfigRule import json_config_rule_set from pathcomp.frontend.Config import BACKEND_URL from pathcomp.frontend.service.algorithms.tools.ConstantsMappings import DEVICE_LAYER_TO_SERVICE_TYPE, DeviceLayerEnum from .tools.EroPathToHops import eropath_to_hops @@ -156,6 +158,17 @@ class _Algorithm: raise Exception(MSG.format(str(device_layer))) service.service_type = service_type + if service_type == ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE: + json_tapi_settings = { + 'capacity_value' : 50.0, + 'capacity_unit' : 'GHz', + 'layer_proto_name': 'PHOTONIC_MEDIA', + 'layer_proto_qual': 'tapi-photonic-media:PHOTONIC_LAYER_QUALIFIER_NMC', + 'direction' : 'UNIDIRECTIONAL', + } + config_rule = ConfigRule(**json_config_rule_set('/settings', json_tapi_settings)) + service.service_config.config_rules.append(config_rule) + service.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PLANNED if path_hops is not None and len(path_hops) > 0: diff --git a/src/pathcomp/frontend/service/algorithms/tools/ConstantsMappings.py b/src/pathcomp/frontend/service/algorithms/tools/ConstantsMappings.py index 5e4f54083..2ff97b96c 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/ConstantsMappings.py +++ b/src/pathcomp/frontend/service/algorithms/tools/ConstantsMappings.py @@ -92,7 +92,7 @@ DEVICE_TYPE_TO_LAYER = { DeviceTypeEnum.OPEN_LINE_SYSTEM.value : DeviceLayerEnum.OPTICAL_CONTROLLER, DeviceTypeEnum.OPTICAL_ROADM.value : DeviceLayerEnum.OPTICAL_DEVICE, - DeviceTypeEnum.OPTICAL_TRANDPONDER.value : DeviceLayerEnum.OPTICAL_DEVICE, + DeviceTypeEnum.OPTICAL_TRANSPONDER.value : DeviceLayerEnum.OPTICAL_DEVICE, } DEVICE_LAYER_TO_SERVICE_TYPE = { diff --git a/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py b/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py index be0f1fda5..d173f3f27 100644 --- a/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py +++ b/src/service/service/service_handlers/l2nm_emulated/ConfigRules.py @@ -13,7 +13,6 @@ # limitations under the License. from typing import Dict, List -from common.proto.context_pb2 import EndPointId, Service from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set from service.service.service_handler_api.AnyTreeTools import TreeNode @@ -22,136 +21,57 @@ def setup_config_rules( service_settings : TreeNode, endpoint_settings : TreeNode ) -> List[Dict]: - connection_short_uuid = connection_uuid.split('-')[-1] - network_instance_name = '{:s}-NetInst'.format(connection_short_uuid) - network_interface_desc = '{:s}-NetIf'.format(connection_uuid) - network_subinterface_desc = '{:s}-NetSubIf'.format(connection_uuid) + json_settings : Dict = {} if service_settings is None else service_settings.value + json_endpoint_settings : Dict = {} if endpoint_settings is None else endpoint_settings.value - if service_settings is None: - # MSG = 'Unable to retrieve settings for Service({:s})' - #raise Exception(MSG.format(connection_uuid)) - mtu = 1450 - bgp_as = 0 - bgp_route_target = '0:0' - else: - json_settings : Dict = service_settings.value - mtu = json_settings.get('mtu', 1450 ) # 1512 - #address_families = json_settings.get('address_families', [] ) # ['IPV4'] - bgp_as = json_settings.get('bgp_as', 0 ) # 65000 - bgp_route_target = json_settings.get('bgp_route_target', '0:0') # 65000:333 + mtu = json_settings.get('mtu', 1450 ) # 1512 + #address_families = json_settings.get('address_families', [] ) # ['IPV4'] + #bgp_as = json_settings.get('bgp_as', 0 ) # 65000 + #bgp_route_target = json_settings.get('bgp_route_target', '0:0') # 65000:333 - if endpoint_settings is None: - #MSG = 'Unable to retrieve settings for device({:s}/endpoint({:s}) in service({:s})' - #raise Exception(MSG.format(device_uuid, endpoint_uuid, connection_uuid)) - route_distinguisher = '0:0' - sub_interface_index = 0 - vlan_id = 1 - address_ip = '0.0.0.0' - address_prefix = 24 - if_subif_name = '{:s}.{:d}'.format(endpoint_uuid, vlan_id) - else: - json_endpoint_settings : Dict = endpoint_settings.value - #router_id = json_endpoint_settings.get('router_id', '0.0.0.0') # '10.95.0.10' - route_distinguisher = json_endpoint_settings.get('route_distinguisher', '0:0' ) # '60001:801' - sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0 ) # 1 - vlan_id = json_endpoint_settings.get('vlan_id', 1 ) # 400 - address_ip = json_endpoint_settings.get('address_ip', '0.0.0.0') # '2.2.2.1' - address_prefix = json_endpoint_settings.get('address_prefix', 24 ) # 30 - if_subif_name = '{:s}.{:d}'.format(endpoint_uuid, vlan_id) + router_id = json_endpoint_settings.get('router_id', '0.0.0.0') # '10.95.0.10' + #route_distinguisher = json_endpoint_settings.get('route_distinguisher', '0:0' ) # '60001:801' + sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0 ) # 1 + vlan_id = json_endpoint_settings.get('vlan_id', 1 ) # 400 + #address_ip = json_endpoint_settings.get('address_ip', '0.0.0.0') # '2.2.2.1' + #address_prefix = json_endpoint_settings.get('address_prefix', 24 ) # 30 + remote_router = json_endpoint_settings.get('remote_router', '0.0.0.0') # '5.5.5.5' + circuit_id = json_endpoint_settings.get('circuit_id', '000' ) # '111' + + if_cirid_name = '{:s}.{:s}'.format(endpoint_uuid, str(circuit_id)) + network_instance_name = 'ELAN-AC:{:s}'.format(str(circuit_id)) + connection_point_id = 'VC-1' json_config_rules = [ json_config_rule_set( - '/network_instance[{:s}]'.format(network_instance_name), { - 'name': network_instance_name, 'description': network_interface_desc, 'type': 'L3VRF', - 'route_distinguisher': route_distinguisher, - #'router_id': router_id, 'address_families': address_families, - }), - json_config_rule_set( - '/interface[{:s}]'.format(endpoint_uuid), { - 'name': endpoint_uuid, 'description': network_interface_desc, 'mtu': mtu, - }), - json_config_rule_set( - '/interface[{:s}]/subinterface[{:d}]'.format(endpoint_uuid, sub_interface_index), { - 'name': endpoint_uuid, 'index': sub_interface_index, - 'description': network_subinterface_desc, 'vlan_id': vlan_id, - 'address_ip': address_ip, 'address_prefix': address_prefix, - }), - json_config_rule_set( - '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_subif_name), { - 'name': network_instance_name, 'id': if_subif_name, 'interface': endpoint_uuid, - 'subinterface': sub_interface_index, - }), - json_config_rule_set( - '/network_instance[{:s}]/protocols[BGP]'.format(network_instance_name), { - 'name': network_instance_name, 'identifier': 'BGP', 'protocol_name': 'BGP', 'as': bgp_as, - }), - json_config_rule_set( - '/network_instance[{:s}]/table_connections[STATIC][BGP][IPV4]'.format(network_instance_name), { - 'name': network_instance_name, 'src_protocol': 'STATIC', 'dst_protocol': 'BGP', - 'address_family': 'IPV4', #'default_import_policy': 'REJECT_ROUTE', - }), - json_config_rule_set( - '/network_instance[{:s}]/table_connections[DIRECTLY_CONNECTED][BGP][IPV4]'.format( - network_instance_name), { - 'name': network_instance_name, 'src_protocol': 'DIRECTLY_CONNECTED', 'dst_protocol': 'BGP', - 'address_family': 'IPV4', #'default_import_policy': 'REJECT_ROUTE', - }), - json_config_rule_set( - '/routing_policy/bgp_defined_set[{:s}_rt_import]'.format(network_instance_name), { - 'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name), - }), - json_config_rule_set( - '/routing_policy/bgp_defined_set[{:s}_rt_import][route-target:{:s}]'.format( - network_instance_name, bgp_route_target), { - 'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name), - 'ext_community_member' : 'route-target:{:s}'.format(bgp_route_target), - }), - json_config_rule_set( - '/routing_policy/policy_definition[{:s}_import]'.format(network_instance_name), { - 'policy_name': '{:s}_import'.format(network_instance_name), - }), - json_config_rule_set( - '/routing_policy/policy_definition[{:s}_import]/statement[{:s}]'.format( - network_instance_name, '3'), { - 'policy_name': '{:s}_import'.format(network_instance_name), 'statement_name': '3', - 'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name), - 'match_set_options': 'ANY', 'policy_result': 'ACCEPT_ROUTE', - }), + '/network_instance[default]', + {'name': 'default', 'type': 'DEFAULT_INSTANCE', 'router_id': router_id}), + json_config_rule_set( - # pylint: disable=duplicate-string-formatting-argument - '/network_instance[{:s}]/inter_instance_policies[{:s}_import]'.format( - network_instance_name, network_instance_name), { - 'name': network_instance_name, 'import_policy': '{:s}_import'.format(network_instance_name), - }), + '/network_instance[default]/protocols[OSPF]', + {'name': 'default', 'identifier': 'OSPF', 'protocol_name': 'OSPF'}), + json_config_rule_set( - '/routing_policy/bgp_defined_set[{:s}_rt_export]'.format(network_instance_name), { - 'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name), - }), + '/network_instance[default]/protocols[STATIC]', + {'name': 'default', 'identifier': 'STATIC', 'protocol_name': 'STATIC'}), + json_config_rule_set( - '/routing_policy/bgp_defined_set[{:s}_rt_export][route-target:{:s}]'.format( - network_instance_name, bgp_route_target), { - 'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name), - 'ext_community_member' : 'route-target:{:s}'.format(bgp_route_target), - }), + '/network_instance[{:s}]'.format(network_instance_name), + {'name': network_instance_name, 'type': 'L2VSI'}), + json_config_rule_set( - '/routing_policy/policy_definition[{:s}_export]'.format(network_instance_name), { - 'policy_name': '{:s}_export'.format(network_instance_name), - }), + '/interface[{:s}]/subinterface[0]'.format(if_cirid_name, sub_interface_index), + {'name': if_cirid_name, 'type': 'l2vlan', 'index': sub_interface_index, 'vlan_id': vlan_id}), + json_config_rule_set( - '/routing_policy/policy_definition[{:s}_export]/statement[{:s}]'.format( - network_instance_name, '3'), { - 'policy_name': '{:s}_export'.format(network_instance_name), 'statement_name': '3', - 'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name), - 'match_set_options': 'ANY', 'policy_result': 'ACCEPT_ROUTE', - }), + '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_cirid_name), + {'name': network_instance_name, 'id': if_cirid_name, 'interface': if_cirid_name, 'subinterface': 0}), + json_config_rule_set( - # pylint: disable=duplicate-string-formatting-argument - '/network_instance[{:s}]/inter_instance_policies[{:s}_export]'.format( - network_instance_name, network_instance_name), { - 'name': network_instance_name, 'export_policy': '{:s}_export'.format(network_instance_name), - }), + '/network_instance[{:s}]/connection_point[{:s}]'.format(network_instance_name, connection_point_id), + {'name': network_instance_name, 'connection_point': connection_point_id, 'VC_ID': circuit_id, + 'remote_system': remote_router}), ] - return json_config_rules def teardown_config_rules( @@ -159,110 +79,54 @@ def teardown_config_rules( service_settings : TreeNode, endpoint_settings : TreeNode ) -> List[Dict]: - connection_short_uuid = connection_uuid.split('-')[-1] - network_instance_name = '{:s}-NetInst'.format(connection_short_uuid) + json_settings : Dict = {} if service_settings is None else service_settings.value + json_endpoint_settings : Dict = {} if endpoint_settings is None else endpoint_settings.value - if service_settings is None: - # MSG = 'Unable to retrieve settings for Service({:s})' - #raise Exception(MSG.format(connection_uuid)) - bgp_route_target = '0:0' - else: - json_settings : Dict = service_settings.value - bgp_route_target = json_settings.get('bgp_route_target', '0:0') # 65000:333 + mtu = json_settings.get('mtu', 1450 ) # 1512 + #address_families = json_settings.get('address_families', [] ) # ['IPV4'] + #bgp_as = json_settings.get('bgp_as', 0 ) # 65000 + #bgp_route_target = json_settings.get('bgp_route_target', '0:0') # 65000:333 - if endpoint_settings is None: - #MSG = 'Unable to retrieve settings for device({:s}/endpoint({:s}) in service({:s})' - #raise Exception(MSG.format(device_uuid, endpoint_uuid, connection_uuid)) - sub_interface_index = 0 - vlan_id = 1 - if_subif_name = '{:s}.{:d}'.format(endpoint_uuid, vlan_id) - else: - json_endpoint_settings : Dict = endpoint_settings.value - sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0 ) # 1 - vlan_id = json_endpoint_settings.get('vlan_id', 1 ) # 400 - if_subif_name = '{:s}.{:d}'.format(endpoint_uuid, vlan_id) + router_id = json_endpoint_settings.get('router_id', '0.0.0.0') # '10.95.0.10' + #route_distinguisher = json_endpoint_settings.get('route_distinguisher', '0:0' ) # '60001:801' + sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0 ) # 1 + vlan_id = json_endpoint_settings.get('vlan_id', 1 ) # 400 + #address_ip = json_endpoint_settings.get('address_ip', '0.0.0.0') # '2.2.2.1' + #address_prefix = json_endpoint_settings.get('address_prefix', 24 ) # 30 + remote_router = json_endpoint_settings.get('remote_router', '0.0.0.0') # '5.5.5.5' + circuit_id = json_endpoint_settings.get('circuit_id', '000' ) # '111' + + if_cirid_name = '{:s}.{:s}'.format(endpoint_uuid, str(circuit_id)) + network_instance_name = 'ELAN-AC:{:s}'.format(str(circuit_id)) + connection_point_id = 'VC-1' json_config_rules = [ json_config_rule_delete( - '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_subif_name), { - 'name': network_instance_name, 'id': if_subif_name, - }), - json_config_rule_delete( - '/interface[{:s}]/subinterface[{:d}]'.format(endpoint_uuid, sub_interface_index), { - 'name': endpoint_uuid, 'index': sub_interface_index, - }), - json_config_rule_delete( - '/interface[{:s}]'.format(endpoint_uuid), { - 'name': endpoint_uuid, - }), - json_config_rule_delete( - '/network_instance[{:s}]/table_connections[DIRECTLY_CONNECTED][BGP][IPV4]'.format( - network_instance_name), { - 'name': network_instance_name, 'src_protocol': 'DIRECTLY_CONNECTED', 'dst_protocol': 'BGP', - 'address_family': 'IPV4', - }), - json_config_rule_delete( - '/network_instance[{:s}]/table_connections[STATIC][BGP][IPV4]'.format(network_instance_name), { - 'name': network_instance_name, 'src_protocol': 'STATIC', 'dst_protocol': 'BGP', - 'address_family': 'IPV4', - }), - json_config_rule_delete( - '/network_instance[{:s}]/protocols[BGP]'.format(network_instance_name), { - 'name': network_instance_name, 'identifier': 'BGP', 'protocol_name': 'BGP', - }), - json_config_rule_delete( - # pylint: disable=duplicate-string-formatting-argument - '/network_instance[{:s}]/inter_instance_policies[{:s}_import]'.format( - network_instance_name, network_instance_name), { - 'name': network_instance_name, - }), - json_config_rule_delete( - '/routing_policy/policy_definition[{:s}_import]/statement[{:s}]'.format( - network_instance_name, '3'), { - 'policy_name': '{:s}_import'.format(network_instance_name), 'statement_name': '3', - }), - json_config_rule_delete( - '/routing_policy/policy_definition[{:s}_import]'.format(network_instance_name), { - 'policy_name': '{:s}_import'.format(network_instance_name), - }), - json_config_rule_delete( - '/routing_policy/bgp_defined_set[{:s}_rt_import][route-target:{:s}]'.format( - network_instance_name, bgp_route_target), { - 'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name), - 'ext_community_member' : 'route-target:{:s}'.format(bgp_route_target), - }), - json_config_rule_delete( - '/routing_policy/bgp_defined_set[{:s}_rt_import]'.format(network_instance_name), { - 'ext_community_set_name': '{:s}_rt_import'.format(network_instance_name), - }), + '/network_instance[{:s}]/connection_point[{:s}]'.format(network_instance_name, connection_point_id), + {'name': network_instance_name, 'connection_point': connection_point_id}), + json_config_rule_delete( - # pylint: disable=duplicate-string-formatting-argument - '/network_instance[{:s}]/inter_instance_policies[{:s}_export]'.format( - network_instance_name, network_instance_name), { - 'name': network_instance_name, - }), + '/network_instance[{:s}]/interface[{:s}]'.format(network_instance_name, if_cirid_name), + {'name': network_instance_name, 'id': if_cirid_name, 'interface': if_cirid_name, 'subinterface': 0}), + json_config_rule_delete( - '/routing_policy/policy_definition[{:s}_export]/statement[{:s}]'.format( - network_instance_name, '3'), { - 'policy_name': '{:s}_export'.format(network_instance_name), 'statement_name': '3', - }), + '/interface[{:s}]/subinterface[0]'.format(if_cirid_name, sub_interface_index), + {'name': if_cirid_name, 'index': sub_interface_index}), + json_config_rule_delete( - '/routing_policy/policy_definition[{:s}_export]'.format(network_instance_name), { - 'policy_name': '{:s}_export'.format(network_instance_name), - }), + '/network_instance[{:s}]'.format(network_instance_name), + {'name': network_instance_name}), + json_config_rule_delete( - '/routing_policy/bgp_defined_set[{:s}_rt_export][route-target:{:s}]'.format( - network_instance_name, bgp_route_target), { - 'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name), - 'ext_community_member' : 'route-target:{:s}'.format(bgp_route_target), - }), + '/network_instance[default]/protocols[STATIC]', + {'name': 'default', 'identifier': 'STATIC', 'protocol_name': 'STATIC'}), + json_config_rule_delete( - '/routing_policy/bgp_defined_set[{:s}_rt_export]'.format(network_instance_name), { - 'ext_community_set_name': '{:s}_rt_export'.format(network_instance_name), - }), + '/network_instance[default]/protocols[OSPF]', + {'name': 'default', 'identifier': 'OSPF', 'protocol_name': 'OSPF'}), + json_config_rule_delete( - '/network_instance[{:s}]'.format(network_instance_name), { - 'name': network_instance_name - }), + '/network_instance[default]', + {'name': 'default', 'type': 'DEFAULT_INSTANCE', 'router_id': router_id}), ] return json_config_rules diff --git a/src/service/service/service_handlers/tapi_tapi/TapiServiceHandler.py b/src/service/service/service_handlers/tapi_tapi/TapiServiceHandler.py index 1249af0ae..aeba6a26a 100644 --- a/src/service/service/service_handlers/tapi_tapi/TapiServiceHandler.py +++ b/src/service/service/service_handlers/tapi_tapi/TapiServiceHandler.py @@ -14,58 +14,54 @@ import anytree, json, logging from typing import Any, Dict, List, Optional, Tuple, Union -from common.orm.Database import Database from common.orm.HighLevel import get_object -from common.orm.backend.Tools import key_to_str -from common.proto.context_pb2 import Device +from common.proto.context_pb2 import ConfigActionEnum, ConfigRule, Device, DeviceId, Service from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set +from common.tools.object_factory.Device import json_device_id from common.type_checkers.Checkers import chk_type -from context.client.ContextClient import ContextClient -from device.client.DeviceClient import DeviceClient -from service.service.database.ConfigModel import ORM_ConfigActionEnum, get_config_rules -from service.service.database.ContextModel import ContextModel from service.service.database.DeviceModel import DeviceModel -from service.service.database.ServiceModel import ServiceModel from service.service.service_handler_api._ServiceHandler import _ServiceHandler from service.service.service_handler_api.AnyTreeTools import TreeNode, delete_subnode, get_subnode, set_subnode_value +from service.service.task_scheduler.TaskExecutor import TaskExecutor LOGGER = logging.getLogger(__name__) class TapiServiceHandler(_ServiceHandler): def __init__( # pylint: disable=super-init-not-called - self, db_service : ServiceModel, database : Database, context_client : ContextClient, - device_client : DeviceClient, **settings + self, service : Service, task_executor : TaskExecutor, **settings ) -> None: - self.__db_service = db_service - self.__database = database - self.__context_client = context_client # pylint: disable=unused-private-member - self.__device_client = device_client - - self.__db_context : ContextModel = get_object(self.__database, ContextModel, self.__db_service.context_fk) - str_service_key = key_to_str([self.__db_context.context_uuid, self.__db_service.service_uuid]) - db_config = get_config_rules(self.__database, str_service_key, 'running') + self.__service = service + self.__task_executor = task_executor # pylint: disable=unused-private-member self.__resolver = anytree.Resolver(pathattr='name') self.__config = TreeNode('.') - for action, resource_key, resource_value in db_config: - if action == ORM_ConfigActionEnum.SET: + for config_rule in service.service_config.config_rules: + action = config_rule.action + if config_rule.WhichOneof('config_rule') != 'custom': continue + resource_key = config_rule.custom.resource_key + resource_value = config_rule.custom.resource_value + if action == ConfigActionEnum.CONFIGACTION_SET: try: resource_value = json.loads(resource_value) except: # pylint: disable=bare-except pass set_subnode_value(self.__resolver, self.__config, resource_key, resource_value) - elif action == ORM_ConfigActionEnum.DELETE: + elif action == ConfigActionEnum.CONFIGACTION_DELETE: delete_subnode(self.__resolver, self.__config, resource_key) - def SetEndpoint(self, endpoints : List[Tuple[str, str, Optional[str]]]) -> List[Union[bool, Exception]]: + def SetEndpoint( + self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None + ) -> List[Union[bool, Exception]]: + LOGGER.info('[SetEndpoint] endpoints={:s}'.format(str(endpoints))) + LOGGER.info('[SetEndpoint] connection_uuid={:s}'.format(str(connection_uuid))) chk_type('endpoints', endpoints, list) if len(endpoints) != 2: return [] - service_uuid = self.__db_service.service_uuid - service_settings : TreeNode = get_subnode(self.__resolver, self.__config, 'settings', None) - if service_settings is None: raise Exception('Unable to settings for Service({:s})'.format(str(service_uuid))) + service_uuid = self.__service.service_id.service_uuid.uuid + settings : TreeNode = get_subnode(self.__resolver, self.__config, '/settings', None) + if settings is None: raise Exception('Unable to retrieve settings for Service({:s})'.format(str(service_uuid))) - json_settings : Dict = service_settings.value - capacity_value = json_settings.get('capacity_value', 1) + json_settings : Dict = settings.value + capacity_value = json_settings.get('capacity_value', 50.0) capacity_unit = json_settings.get('capacity_unit', 'GHz') layer_proto_name = json_settings.get('layer_proto_name', 'PHOTONIC_MEDIA') layer_proto_qual = json_settings.get('layer_proto_qual', 'tapi-photonic-media:PHOTONIC_LAYER_QUALIFIER_NMC') @@ -74,46 +70,44 @@ class TapiServiceHandler(_ServiceHandler): results = [] try: device_uuid = endpoints[0][0] - db_device : DeviceModel = get_object(self.__database, DeviceModel, device_uuid, raise_if_not_found=True) - json_device = db_device.dump(include_config_rules=False, include_drivers=True, include_endpoints=True) - json_device_config : Dict = json_device.setdefault('device_config', {}) - json_device_config_rules : List = json_device_config.setdefault('config_rules', []) - json_device_config_rules.extend([ - json_config_rule_set('/service[{:s}]'.format(service_uuid), { - 'uuid' : service_uuid, - 'input_sip' : endpoints[0][1], - 'output_sip' : endpoints[1][1], - 'capacity_unit' : capacity_unit, - 'capacity_value' : capacity_value, - 'layer_protocol_name' : layer_proto_name, - 'layer_protocol_qualifier': layer_proto_qual, - 'direction' : direction, - }), - ]) - self.__device_client.ConfigureDevice(Device(**json_device)) + device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) + json_config_rule = json_config_rule_set('/service[{:s}]'.format(service_uuid), { + 'uuid' : service_uuid, + 'input_sip' : endpoints[0][1], + 'output_sip' : endpoints[1][1], + 'capacity_unit' : capacity_unit, + 'capacity_value' : capacity_value, + 'layer_protocol_name' : layer_proto_name, + 'layer_protocol_qualifier': layer_proto_qual, + 'direction' : direction, + }) + del device.device_config.config_rules[:] + device.device_config.config_rules.append(ConfigRule(**json_config_rule)) + self.__task_executor.configure_device(device) results.append(True) except Exception as e: # pylint: disable=broad-except - LOGGER.exception('Unable to SetEndpoint for Service({:s})'.format(str(service_uuid))) + LOGGER.exception('Unable to configure Service({:s})'.format(str(service_uuid))) results.append(e) return results - def DeleteEndpoint(self, endpoints : List[Tuple[str, str, Optional[str]]]) -> List[Union[bool, Exception]]: + def DeleteEndpoint( + self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None + ) -> List[Union[bool, Exception]]: + LOGGER.info('[DeleteEndpoint] endpoints={:s}'.format(str(endpoints))) + LOGGER.info('[DeleteEndpoint] connection_uuid={:s}'.format(str(connection_uuid))) + chk_type('endpoints', endpoints, list) if len(endpoints) != 2: return [] - service_uuid = self.__db_service.service_uuid + service_uuid = self.__service.service_id.service_uuid.uuid results = [] try: device_uuid = endpoints[0][0] - db_device : DeviceModel = get_object(self.__database, DeviceModel, device_uuid, raise_if_not_found=True) - json_device = db_device.dump(include_config_rules=False, include_drivers=True, include_endpoints=True) - json_device_config : Dict = json_device.setdefault('device_config', {}) - json_device_config_rules : List = json_device_config.setdefault('config_rules', []) - json_device_config_rules.extend([ - json_config_rule_delete('/service[{:s}]'.format(service_uuid), {'uuid': service_uuid}) - ]) - self.__device_client.ConfigureDevice(Device(**json_device)) + device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) + json_config_rule = json_config_rule_delete('/service[{:s}]'.format(service_uuid), {'uuid': service_uuid}) + device.device_config.config_rules.append(ConfigRule(**json_config_rule)) + self.__task_executor.configure_device(device) results.append(True) except Exception as e: # pylint: disable=broad-except LOGGER.exception('Unable to DeleteEndpoint for Service({:s})'.format(str(service_uuid))) diff --git a/src/service/service/task_scheduler/TaskScheduler.py b/src/service/service/task_scheduler/TaskScheduler.py index 9f102f558..1df13a0f4 100644 --- a/src/service/service/task_scheduler/TaskScheduler.py +++ b/src/service/service/task_scheduler/TaskScheduler.py @@ -147,6 +147,7 @@ class TasksScheduler: if isinstance(item, Service): str_item_key = grpc_message_to_json_string(item.service_id) + LOGGER.info('Exploring Service: {:s}'.format(str_item_key)) if str_item_key in explored_items: continue include_service(item.service_id) @@ -160,6 +161,7 @@ class TasksScheduler: elif isinstance(item, ServiceId): str_item_key = grpc_message_to_json_string(item) + LOGGER.info('Exploring ServiceId: {:s}'.format(str_item_key)) if str_item_key in explored_items: continue include_service(item) @@ -173,16 +175,22 @@ class TasksScheduler: elif isinstance(item, Connection): str_item_key = grpc_message_to_json_string(item.connection_id) + LOGGER.info('Exploring Connection: {:s}'.format(str_item_key)) if str_item_key in explored_items: continue connection_key = include_connection(item.connection_id, item.service_id) self._add_connection_to_executor_cache(connection) + + #_,service_key_done = include_service(item.service_id) self._executor.get_service(item.service_id) + #self._dag.add(service_key_done, connection_key) pending_items_to_explore.put(item.service_id) + for sub_service_id in connection.sub_service_ids: _,service_key_done = include_service(sub_service_id) self._executor.get_service(sub_service_id) - self._dag.add(connection_key, service_key_done) + self._dag.add(service_key_done, connection_key) + pending_items_to_explore.put(sub_service_id) explored_items.add(str_item_key) @@ -200,8 +208,10 @@ class TasksScheduler: results = [] for task_key in ordered_task_keys: + LOGGER.info('Task {:s} - begin'.format(str(task_key))) task = self._tasks.get(task_key) succeeded = True if dry_run else task.execute() + LOGGER.info('Task {:s} - succeeded={:s}'.format(str(task_key), str(succeeded))) results.append(succeeded) LOGGER.info('execute_all results={:s}'.format(str(results))) diff --git a/src/service/service/task_scheduler/tasks/Task_ConnectionConfigure.py b/src/service/service/task_scheduler/tasks/Task_ConnectionConfigure.py index ea9692142..7a99ccbde 100644 --- a/src/service/service/task_scheduler/tasks/Task_ConnectionConfigure.py +++ b/src/service/service/task_scheduler/tasks/Task_ConnectionConfigure.py @@ -47,9 +47,10 @@ class Task_ConnectionConfigure(_Task): service_handler_settings = {} service_handler = self._task_executor.get_service_handler(connection, service, **service_handler_settings) - connection_expander = ConnectionExpander() - traversed_endpoint_ids = connection_expander.get_endpoints_traversed(connection) - endpointids_to_set = endpointids_to_raw(traversed_endpoint_ids) + #connection_expander = ConnectionExpander() + #traversed_endpoint_ids = connection_expander.get_endpoints_traversed(connection) + #endpointids_to_set = endpointids_to_raw(traversed_endpoint_ids) + endpointids_to_set = endpointids_to_raw(connection.path_hops_endpoint_ids) connection_uuid = connection.connection_id.connection_uuid.uuid results_setendpoint = service_handler.SetEndpoint(endpointids_to_set, connection_uuid=connection_uuid) diff --git a/src/service/service/task_scheduler/tasks/Task_ConnectionDeconfigure.py b/src/service/service/task_scheduler/tasks/Task_ConnectionDeconfigure.py index fc849560e..bef5f85dd 100644 --- a/src/service/service/task_scheduler/tasks/Task_ConnectionDeconfigure.py +++ b/src/service/service/task_scheduler/tasks/Task_ConnectionDeconfigure.py @@ -47,9 +47,10 @@ class Task_ConnectionDeconfigure(_Task): service_handler_settings = {} service_handler = self._task_executor.get_service_handler(connection, service, **service_handler_settings) - connection_expander = ConnectionExpander() - traversed_endpoint_ids = connection_expander.get_endpoints_traversed(connection) - endpointids_to_delete = endpointids_to_raw(traversed_endpoint_ids) + #connection_expander = ConnectionExpander() + #traversed_endpoint_ids = connection_expander.get_endpoints_traversed(connection) + #endpointids_to_delete = endpointids_to_raw(traversed_endpoint_ids) + endpointids_to_delete = endpointids_to_raw(connection.path_hops_endpoint_ids) connection_uuid = connection.connection_id.connection_uuid.uuid results_deleteendpoint = service_handler.DeleteEndpoint(endpointids_to_delete, connection_uuid=connection_uuid) diff --git a/src/tests/ecoc22/deploy_specs.sh b/src/tests/ecoc22/deploy_specs.sh index 03d17f428..cdcee08bb 100644 --- a/src/tests/ecoc22/deploy_specs.sh +++ b/src/tests/ecoc22/deploy_specs.sh @@ -2,7 +2,7 @@ export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/" # Set the list of components, separated by spaces, you want to build images for, and deploy. -export TFS_COMPONENTS="context device service pathcomp slice compute webui" # automation +export TFS_COMPONENTS="context device service automation pathcomp slice compute webui" # Set the tag you want to use for your images. export TFS_IMAGE_TAG="dev" @@ -11,7 +11,7 @@ export TFS_IMAGE_TAG="dev" export TFS_K8S_NAMESPACE="tfs" # Set additional manifest files to be applied after the deployment -export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" +export TFS_EXTRA_MANIFESTS="manifests/cttc-ols/cttc-ols.yaml manifests/nginx_ingress_http.yaml" # Set the neew Grafana admin password export TFS_GRAFANA_PASSWORD="admin123+" diff --git a/src/webui/service/templates/base.html b/src/webui/service/templates/base.html index 5d7801d11..9804e4afd 100644 --- a/src/webui/service/templates/base.html +++ b/src/webui/service/templates/base.html @@ -83,9 +83,9 @@ Slice {% endif %} -