diff --git a/proto/context.proto b/proto/context.proto index 3d2789ea74f78b49b95ed8130b9fe406e762eed2..3b5604f97e4e62b8a4848f7cae278f7382e0a43a 100644 --- a/proto/context.proto +++ b/proto/context.proto @@ -231,6 +231,7 @@ enum DeviceDriverEnum { DEVICEDRIVER_NCE = 15; DEVICEDRIVER_SMARTNIC = 16; DEVICEDRIVER_MORPHEUS = 17; + DEVICEDRIVER_RYU = 18; } enum DeviceOperationalStatusEnum { diff --git a/src/common/DeviceTypes.py b/src/common/DeviceTypes.py index 2b8f7e3dc8963b8f9137a7e811ea608dfc8ff0ab..933dbad2fae5daba8e9110a894eb784653b0ed9d 100644 --- a/src/common/DeviceTypes.py +++ b/src/common/DeviceTypes.py @@ -52,6 +52,7 @@ class DeviceTypeEnum(Enum): QKD_NODE = 'qkd-node' OPEN_ROADM = 'openroadm' MORPHEUS = 'morpheus' + OPENFLOW_RYU_CONTROLLER = 'openflow-ryu-controller' # ETSI TeraFlowSDN controller TERAFLOWSDN_CONTROLLER = 'teraflowsdn' diff --git a/src/context/service/database/models/enums/DeviceDriver.py b/src/context/service/database/models/enums/DeviceDriver.py index 380495f7083e5366a9367d0ceeb427b54cf9e0af..274c72b1b1c59ce8aaf915eb32e2ea3e56b67098 100644 --- a/src/context/service/database/models/enums/DeviceDriver.py +++ b/src/context/service/database/models/enums/DeviceDriver.py @@ -40,6 +40,7 @@ class ORM_DeviceDriverEnum(enum.Enum): QKD = DeviceDriverEnum.DEVICEDRIVER_QKD SMARTNIC = DeviceDriverEnum.DEVICEDRIVER_SMARTNIC MORPHEUS = DeviceDriverEnum.DEVICEDRIVER_MORPHEUS + RYU = DeviceDriverEnum.DEVICEDRIVER_RYU grpc_to_enum__device_driver = functools.partial( grpc_to_enum, DeviceDriverEnum, ORM_DeviceDriverEnum) diff --git a/src/device/service/Tools.py b/src/device/service/Tools.py index ee683853676b34f256ff335fffe3600823e2c070..229070fe2a089da69a2da94f249bd2905e8b916c 100644 --- a/src/device/service/Tools.py +++ b/src/device/service/Tools.py @@ -100,11 +100,6 @@ def check_no_endpoints(device_endpoints) -> None: def get_device_controller_uuid(device : Device) -> Optional[str]: controller_uuid = device.controller_id.device_uuid.uuid if len(controller_uuid) > 0: return controller_uuid - #for config_rule in device.device_config.config_rules: - # if config_rule.WhichOneof('config_rule') != 'custom': continue - # if config_rule.custom.resource_key != '_controller': continue - # device_controller_id = json.loads(config_rule.custom.resource_value) - # return device_controller_id['uuid'] return None def populate_endpoints( @@ -161,14 +156,18 @@ def populate_endpoints( _sub_device.device_type = resource_value['type'] _sub_device.device_operational_status = resource_value['status'] - # Sub-devices should not have a driver assigned. Instead, they should have - # a config rule specifying their controller. - #_sub_device.device_drivers.extend(resource_value['drivers']) # pylint: disable=no-member - #controller_config_rule = _sub_device.device_config.config_rules.add() - #controller_config_rule.action = ConfigActionEnum.CONFIGACTION_SET - #controller_config_rule.custom.resource_key = '_controller' - #controller = {'uuid': device_uuid, 'name': device_name} - #controller_config_rule.custom.resource_value = json.dumps(controller, indent=0, sort_keys=True) + # Sub-devices might not have a driver assigned. + if 'drivers' in resource_value: + drivers = resource_value['drivers'] + if isinstance(drivers, (list, set)): + _sub_device.device_drivers.extend(drivers) # pylint: disable=no-member + elif isinstance(drivers, (int, str)): + _sub_device.device_drivers.append(drivers) # pylint: disable=no-member + else: + MSG = 'Unsupported drivers definition in sub-device({:s}, {:s})' + raise Exception(MSG.format(str(resource_key), str(resource_value))) + + # Sub-devices should always have a controller associated. _sub_device.controller_id.device_uuid.uuid = device_uuid new_sub_devices[_sub_device_uuid] = _sub_device diff --git a/src/device/service/drivers/__init__.py b/src/device/service/drivers/__init__.py index fec407df96c0ae8650baf8492a8795c732c5c104..5f3fb0b3bfc77885085eb65c58eae4aea61e8e13 100644 --- a/src/device/service/drivers/__init__.py +++ b/src/device/service/drivers/__init__.py @@ -193,6 +193,16 @@ if LOAD_ALL_DEVICE_DRIVERS: FilterFieldEnum.DRIVER : DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY, } ])) +if LOAD_ALL_DEVICE_DRIVERS: + from .ryu.RyuDriver import RyuDriver + DRIVERS.append( + (RyuDriver, [ + { + FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.OPENFLOW_RYU_CONTROLLER, + FilterFieldEnum.DRIVER : DeviceDriverEnum.DEVICEDRIVER_RYU, + } + ]) + ) if LOAD_ALL_DEVICE_DRIVERS: from .xr.XrDriver import XrDriver # pylint: disable=wrong-import-position diff --git a/src/device/service/drivers/ietf_l2vpn/TfsApiClient.py b/src/device/service/drivers/ietf_l2vpn/TfsApiClient.py index 6699134d3a8f96f1761fa07b97b99e3476719052..900d844765ea1fc1acec1c7996cc0e2a39f47045 100644 --- a/src/device/service/drivers/ietf_l2vpn/TfsApiClient.py +++ b/src/device/service/drivers/ietf_l2vpn/TfsApiClient.py @@ -45,6 +45,8 @@ MAPPING_DRIVER = { 'DEVICEDRIVER_IETF_SLICE' : 14, 'DEVICEDRIVER_NCE' : 15, 'DEVICEDRIVER_SMARTNIC' : 16, + 'DEVICEDRIVER_MORPHEUS' : 17, + 'DEVICEDRIVER_RYU' : 18, } LOGGER = logging.getLogger(__name__) diff --git a/src/device/service/drivers/ietf_l3vpn/TfsApiClient.py b/src/device/service/drivers/ietf_l3vpn/TfsApiClient.py index 6efe3712f327af35a114434e09efa5d4996ee848..bf75d76505851b2389148127b73d4479f717056d 100644 --- a/src/device/service/drivers/ietf_l3vpn/TfsApiClient.py +++ b/src/device/service/drivers/ietf_l3vpn/TfsApiClient.py @@ -45,6 +45,9 @@ MAPPING_DRIVER = { 'DEVICEDRIVER_IETF_L3VPN' : 13, 'DEVICEDRIVER_IETF_SLICE' : 14, 'DEVICEDRIVER_NCE' : 15, + 'DEVICEDRIVER_SMARTNIC' : 16, + 'DEVICEDRIVER_MORPHEUS' : 17, + 'DEVICEDRIVER_RYU' : 18, } LOGGER = logging.getLogger(__name__) diff --git a/src/device/service/drivers/optical_tfs/TfsApiClient.py b/src/device/service/drivers/optical_tfs/TfsApiClient.py index 49c5a9e4f07026d8bcd5851770e9b225ab37fe63..b8fd4c551982a8074b462c94ca0de79a2c6dfacb 100644 --- a/src/device/service/drivers/optical_tfs/TfsApiClient.py +++ b/src/device/service/drivers/optical_tfs/TfsApiClient.py @@ -52,6 +52,9 @@ MAPPING_DRIVER = { 'DEVICEDRIVER_IETF_L3VPN' : 13, 'DEVICEDRIVER_IETF_SLICE' : 14, 'DEVICEDRIVER_NCE' : 15, + 'DEVICEDRIVER_SMARTNIC' : 16, + 'DEVICEDRIVER_MORPHEUS' : 17, + 'DEVICEDRIVER_RYU' : 18, } LOGGER = logging.getLogger(__name__) diff --git a/src/device/service/drivers/ryu/RyuApiClient.py b/src/device/service/drivers/ryu/RyuApiClient.py new file mode 100644 index 0000000000000000000000000000000000000000..ea73a4b7e13121792ad76619541b976ad45415bc --- /dev/null +++ b/src/device/service/drivers/ryu/RyuApiClient.py @@ -0,0 +1,241 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging, requests +from requests.auth import HTTPBasicAuth +from typing import Dict, List, Optional, Union +from common.proto.context_pb2 import DeviceDriverEnum, DeviceOperationalStatusEnum + + +CHECK_CRED_URL = '{:s}://{:s}:{:d}' +GET_DEVICES_URL = '{:s}://{:s}:{:d}/v1.0/topology/switches' +GET_LINKS_URL = '{:s}://{:s}:{:d}/v1.0/topology/links' +ADD_FLOW_ENTRY_URL = '{:s}://{:s}:{:d}/stats/flowentry/add' +DEL_FLOW_ENTRY_URL = '{:s}://{:s}:{:d}/stats/flowentry/delete_strict' + +TIMEOUT = 30 + +HTTP_OK_CODES = { + 200, # OK + 201, # Created + 202, # Accepted + 204, # No Content +} + +MSG_ERROR = 'Could not retrieve devices in remote Ryu instance({:s}). status_code={:s} reply={:s}' + +LOGGER = logging.getLogger(__name__) + +class RyuApiClient: + def __init__( + self, address : str, port : int, scheme : str = 'http', + username : Optional[str] = None, password : Optional[str] = None, + timeout : int = TIMEOUT + ) -> None: + self._check_cred_url = CHECK_CRED_URL .format(scheme, address, port) + self._get_devices_url = GET_DEVICES_URL .format(scheme, address, port) + self._get_links_url = GET_LINKS_URL .format(scheme, address, port) + self._add_flow_entry_url = ADD_FLOW_ENTRY_URL.format(scheme, address, port) + self._del_flow_entry_url = DEL_FLOW_ENTRY_URL.format(scheme, address, port) + self._auth = None if username is None or password is None else HTTPBasicAuth(username, password) + self._timeout = timeout + + def check_credentials(self) -> bool: + try: + response = requests.get(self._check_cred_url, timeout=self._timeout, verify=False, auth=self._auth) + response.raise_for_status() + return True + except requests.exceptions.Timeout: + LOGGER.exception('Timeout connecting to {:s}'.format(str(self._check_cred_url))) + return False + except requests.exceptions.RequestException as e: + LOGGER.exception('Exception connecting to {:s}'.format(str(self._check_cred_url))) + return False + + def get_devices_endpoints(self) -> List[Dict]: + LOGGER.debug('[get_devices_endpoints] begin') + + reply_switches = requests.get(self._get_devices_url, timeout=self._timeout, verify=False, auth=self._auth) + if reply_switches.status_code not in HTTP_OK_CODES: + msg = MSG_ERROR.format(str(self._get_devices_url), str(reply_switches.status_code), str(reply_switches)) + LOGGER.error(msg) + raise Exception(msg) + + json_reply_switches = reply_switches.json() + LOGGER.debug('[get_devices_endpoints] json_reply_switches={:s}'.format(json.dumps(json_reply_switches))) + + result = list() + for json_switch in json_reply_switches: + device_uuid : str = json_switch['dpid'] + device_url = '/devices/device[{:s}]'.format(device_uuid) + device_data = { + 'uuid': device_uuid, + 'name': device_uuid, + 'type': 'packet-switch', + 'status': DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED, + 'drivers': DeviceDriverEnum.DEVICEDRIVER_RYU, + } + result.append((device_url, device_data)) + + device_ports = json_switch.get('ports', []) + for port in device_ports: + port_name = port.get('name', '') + #device_name = port_name.split('-')[0] + #port_no = port.get('port_no', '') + #hw_address = port.get('hw_addr', '') + #port_no = port.get('port_no','') + endpoint_uuid = port_name + endpoint_url = '/endpoints/endpoint[{:s}]'.format(endpoint_uuid) + endpoint_data = { + 'device_uuid': device_uuid, + 'uuid': port_name, + 'name': port_name, + 'type': 'copper', + } + result.append((endpoint_url, endpoint_data)) + + reply_links = requests.get(self._get_links_url, timeout=self._timeout, verify=False, auth=self._auth) + if reply_links.status_code not in HTTP_OK_CODES: + msg = MSG_ERROR.format(str(self._get_links_url), str(reply_links.status_code), str(reply_links)) + LOGGER.error(msg) + raise Exception(msg) + + json_reply_links = reply_links.json() + LOGGER.debug('[get_devices_endpoints] json_reply_links={:s}'.format(json.dumps(json_reply_links))) + + for json_link in json_reply_links: + dpid_src = json_link.get('src', {}).get('dpid', '') + dpid_dst = json_link.get('dst', {}).get('dpid', '') + port_src_name = json_link.get('src', {}).get('name', '') + #port_name_secondpart = port_src_name.split('-')[1] + port_dst_name = json_link.get('dst', {}).get('name', '') + #port_name_second = port_dst_name.split('-')[1] + #switch_name_src = port_src_name.split('-')[0] + #switch_name_dest = port_dst_name.split('-')[0] + link_name = f"{dpid_src}/{port_src_name}=={dpid_dst}/{port_dst_name}" + link_uuid = f"{port_src_name}=={port_dst_name}" + link_endpoint_ids = [ + (dpid_src, port_src_name), + (dpid_dst, port_dst_name), + ] + + LOGGER.debug('link_endpoint_ids = {:s}'.format(str(link_endpoint_ids))) + link_url = '/links/link[{:s}]'.format(link_uuid) + link_data = { + 'uuid': link_uuid, + 'name': link_name, + 'endpoints': link_endpoint_ids, + } + result.append((link_url, link_data)) + + LOGGER.debug('[get_devices_endpoints] topology; returning') + return result + + def add_flow_rule( + self, dpid : int, in_port : int, out_port : int, + eth_type : int, ip_src_addr : str, ip_dst_addr : str, + priority : int = 65535, + ) -> Union[bool, Exception]: + flow_entry = { + "dpid" : dpid, + "priority": priority, + "match" : { + "in_port" : in_port, + "eth_type": eth_type, + "ipv4_src": ip_src_addr, + "ipv4_dst": ip_dst_addr, + }, + "instructions": [ + { + "type": "APPLY_ACTIONS", + "actions": [ + { + "max_len": 65535, + "type": "OUTPUT", + "port": out_port + } + ] + } + ] + } + + LOGGER.debug("[add_flow_rule] flow_entry = {:s}".format(str(flow_entry))) + + try: + response = requests.post( + self._add_flow_entry_url, json=flow_entry, + timeout=self._timeout, verify=False, auth=self._auth + ) + response.raise_for_status() + LOGGER.info("Successfully posted flow entry: {:s}".format(str(flow_entry))) + return True + except requests.exceptions.Timeout as e: + MSG = "Timeout adding flow rule {:s} {:s}" + LOGGER.exception(MSG.format(str(self._add_flow_entry_url), str(flow_entry))) + return e + except requests.exceptions.RequestException as e: + MSG = "Error adding flow rule {:s} {:s}" + LOGGER.exception(MSG.format(str(self._add_flow_entry_url), str(flow_entry))) + return e + + + def del_flow_rule( + self, dpid : int, in_port : int, out_port : int, + eth_type : int, ip_src_addr : str, ip_dst_addr : str, + priority : int = 65535, + ) -> Union[bool, Exception]: + flow_entry = { + "dpid" : dpid, + "priority": priority, + "match" : { + "in_port" : in_port, + "eth_type": eth_type, + "ipv4_src": ip_src_addr, + "ipv4_dst": ip_dst_addr, + "table_id" : 0, + "cookie" : 0, + "cookie_mask": 0, + }, + "instructions": [ + { + "type": "APPLY_ACTIONS", + "actions": [ + { + "max_len": 65535, + "type": "OUTPUT", + "port": out_port + } + ] + } + ] + } + + LOGGER.debug("[del_flow_rule] flow_entry = {:s}".format(str(flow_entry))) + + try: + response = requests.post( + self._del_flow_entry_url, json=flow_entry, + timeout=self._timeout, verify=False, auth=self._auth + ) + response.raise_for_status() + LOGGER.info("Successfully posted flow entry: {:s}".format(str(flow_entry))) + return True + except requests.exceptions.Timeout as e: + MSG = "Timeout deleting flow rule {:s} {:s}" + LOGGER.exception(MSG.format(str(self._del_flow_entry_url), str(flow_entry))) + return e + except requests.exceptions.RequestException as e: + MSG = "Error deleting flow rule {:s} {:s}" + LOGGER.exception(MSG.format(str(self._del_flow_entry_url), str(flow_entry))) + return e diff --git a/src/device/service/drivers/ryu/RyuDriver.py b/src/device/service/drivers/ryu/RyuDriver.py new file mode 100644 index 0000000000000000000000000000000000000000..c7adac3a5c1ca9fb9ed5b067b703940342e1b1d0 --- /dev/null +++ b/src/device/service/drivers/ryu/RyuDriver.py @@ -0,0 +1,174 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging, threading +from typing import Any, List, Tuple, Union +from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method +from common.type_checkers.Checkers import chk_string, chk_type +from device.service.driver_api._Driver import _Driver, RESOURCE_ENDPOINTS +from device.service.drivers.ryu import ALL_RESOURCE_KEYS +from .RyuApiClient import RyuApiClient + +LOGGER = logging.getLogger(__name__) + +DRIVER_NAME = 'ryu' +METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': DRIVER_NAME}) + +class RyuDriver(_Driver): + def __init__(self, address: str, port: int, **settings) -> None: + super().__init__(DRIVER_NAME, address, port, **settings) + self.__lock = threading.Lock() + self.__started = threading.Event() + self.__terminate = threading.Event() + self.rac = RyuApiClient( + self.address, int(self.port), + scheme = self.settings.get('scheme', 'http'), + username = self.settings.get('username'), + password = self.settings.get('password'), + timeout = self.settings.get('timeout', 30), + ) + + def Connect(self) -> bool: + with self.__lock: + connected = self.rac.check_credentials() + if connected: self.__started.set() + return connected + + def Disconnect(self) -> bool: + with self.__lock: + self.__terminate.set() + return True + + @metered_subclass_method(METRICS_POOL) + def GetInitialConfig(self) -> List[Tuple[str, Any]]: + with self.__lock: + return [] + + @metered_subclass_method(METRICS_POOL) + def GetConfig(self, resource_keys: List[str] = []) -> List[Tuple[str, Union[Any, None, Exception]]]: + chk_type('resources', resource_keys, list) + results = [] + with self.__lock: + if len(resource_keys) == 0: resource_keys = ALL_RESOURCE_KEYS + for i, resource_key in enumerate(resource_keys): + str_resource_name = 'resource_key[#{:d}]'.format(i) + try: + chk_string(str_resource_name, resource_key, allow_empty=False) + if resource_key == RESOURCE_ENDPOINTS: + results.extend(self.rac.get_devices_endpoints()) + except Exception as e: + LOGGER.exception('Unhandled error processing resource_key({:s})'.format(str(resource_key))) + results.append((resource_key, e)) + return results + + @metered_subclass_method(METRICS_POOL) + def SetConfig(self, resources: List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + results = [] + if not resources: return results + with self.__lock: + for resource in resources: + try: + resource_key, resource_value = resource + if not resource_key.startswith("/device[") or not "/flow[" in resource_key: + LOGGER.error(f"Invalid resource_key format: {resource_key}") + results.append(Exception(f"Invalid resource_key format: {resource_key}")) + continue + + try: + resource_value_dict = json.loads(resource_value) + LOGGER.debug('resource_value_dict = {:s}'.format(str(resource_value_dict))) + dpid = int(resource_value_dict["dpid"], 16) + in_port = int(resource_value_dict["in-port"].split("-")[1][3:]) + out_port = int(resource_value_dict["out-port"].split("-")[1][3:]) + ip_src_addr = resource_value_dict.get("ip_address_source", "") + ip_dst_addr = resource_value_dict.get("ip_address_destination", "") + + if "h1-h3" in resource_key: + priority = 1000 + elif "h3-h1" in resource_key: + priority = 1000 + elif "h2-h4" in resource_key: + priority = 1500 + elif "h4-h2" in resource_key: + priority = 1500 + else: + priority = 65535 + except (KeyError, ValueError, IndexError) as e: + MSG = "Error processing resource {:s}" + LOGGER.exception(MSG.format(str(resource))) + results.append(e) + continue + + results.append(self.rac.add_flow_rule( + dpid, in_port, out_port, 0x0800, ip_src_addr, ip_dst_addr, + priority=priority + )) + except Exception as e: + MSG = "Error processing resource {:s}" + LOGGER.exception(MSG.format(str(resource))) + results.append(e) + return results + + @metered_subclass_method(METRICS_POOL) + def DeleteConfig(self, resources: List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + LOGGER.info("[DeleteConfig] resources = {:s}".format(str(resources))) + + results = [] + if not resources: + return results + with self.__lock: + for resource in resources: + try: + resource_key, resource_value = resource + + if not resource_key.startswith("/device[") or not "/flow[" in resource_key: + LOGGER.error(f"Invalid resource_key format: {resource_key}") + results.append(Exception(f"Invalid resource_key format: {resource_key}")) + continue + + try: + resource_value_dict = json.loads(resource_value) + LOGGER.debug('resource_value_dict = {:s}'.format(str(resource_value_dict))) + dpid = int(resource_value_dict["dpid"], 16) + in_port = int(resource_value_dict["in-port"].split("-")[1][3:]) + out_port = int(resource_value_dict["out-port"].split("-")[1][3:]) + ip_src_addr = resource_value_dict.get("ip_address_source", "") + ip_dst_addr = resource_value_dict.get("ip_address_destination", "") + + if "h1-h3" in resource_key: + priority = 1000 + elif "h3-h1" in resource_key: + priority = 1000 + elif "h2-h4" in resource_key: + priority = 1500 + elif "h4-h2" in resource_key: + priority = 1500 + else: + priority = 65535 + except (KeyError, ValueError, IndexError) as e: + MSG = "Error processing resource {:s}" + LOGGER.exception(MSG.format(str(resource))) + results.append(e) + continue + + results.append(self.rac.del_flow_rule( + dpid, in_port, out_port, 0x0800, ip_src_addr, ip_dst_addr, + priority=priority + )) + except Exception as e: + MSG = "Error processing resource {:s}" + LOGGER.exception(MSG.format(str(resource))) + results.append(e) + + return results diff --git a/src/device/service/drivers/ryu/__init__.py b/src/device/service/drivers/ryu/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ba812804f72023551f3b69b6afa58851fa178c2b --- /dev/null +++ b/src/device/service/drivers/ryu/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from device.service.driver_api._Driver import RESOURCE_ENDPOINTS + +ALL_RESOURCE_KEYS = [ + RESOURCE_ENDPOINTS, +] diff --git a/src/nbi/service/ietf_l3vpn/Handlers.py b/src/nbi/service/ietf_l3vpn/Handlers.py index 0e8b8013ef5cba1305cdd040ea68efc653eefa5e..8078d9d49afb9de5880d159258003f0db3acfa3e 100644 --- a/src/nbi/service/ietf_l3vpn/Handlers.py +++ b/src/nbi/service/ietf_l3vpn/Handlers.py @@ -133,7 +133,7 @@ def process_site_network_access( MSG = 'Site Network Access IPv4 Allocation Type: {:s}' raise NotImplementedError(MSG.format(str(ipv4_allocation['address-allocation-type']))) ipv4_allocation_addresses = ipv4_allocation['addresses'] - ipv4_provider_address = ipv4_allocation_addresses['provider-address'] + ipv4_provider_address = ipv4_allocation_addresses.get('provider-address') ipv4_customer_address = ipv4_allocation_addresses['customer-address'] ipv4_prefix_length = ipv4_allocation_addresses['prefix-length' ] diff --git a/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py b/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py index db0c552487363e4bb283832c7e40a4e7623e994c..73a66077af2555b2c4d9be08b0bb124df9a5de92 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py +++ b/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py @@ -70,11 +70,6 @@ def get_device_controller_uuid( ) -> Optional[str]: controller_uuid = device.controller_id.device_uuid.uuid if len(controller_uuid) > 0: return controller_uuid - #for config_rule in device.device_config.config_rules: - # if config_rule.WhichOneof('config_rule') != 'custom': continue - # if config_rule.custom.resource_key != '_controller': continue - # device_controller_id = json.loads(config_rule.custom.resource_value) - # return device_controller_id['uuid'] return None def _map_device_type(device : Device) -> DeviceTypeEnum: diff --git a/src/service/service/service_handler_api/FilterFields.py b/src/service/service/service_handler_api/FilterFields.py index 170f34a61742a03db7e74c7b9ab0e8614c738058..431a04d977285f691830173f602407c52c63c402 100644 --- a/src/service/service/service_handler_api/FilterFields.py +++ b/src/service/service/service_handler_api/FilterFields.py @@ -52,6 +52,7 @@ DEVICE_DRIVER_VALUES = { DeviceDriverEnum.DEVICEDRIVER_QKD, DeviceDriverEnum.DEVICEDRIVER_IETF_L3VPN, DeviceDriverEnum.DEVICEDRIVER_SMARTNIC, + DeviceDriverEnum.DEVICEDRIVER_RYU, } # Map allowed filter fields to allowed values per Filter field. If no restriction (free text) None is specified diff --git a/src/service/service/service_handlers/__init__.py b/src/service/service/service_handlers/__init__.py index 7c00d5a850c8484db5cc83e17f2272dc678a419b..c0cf24895843686467e040112d8f657279fc9887 100644 --- a/src/service/service/service_handlers/__init__.py +++ b/src/service/service/service_handlers/__init__.py @@ -35,6 +35,7 @@ from .tapi_xr.TapiXrServiceHandler import TapiXrServiceHandler from .optical_tfs.OpticalTfsServiceHandler import OpticalTfsServiceHandler from .oc.OCServiceHandler import OCServiceHandler from .qkd.qkd_service_handler import QKDServiceHandler +from .l3nm_ryu.L3NMRyuServiceHandler import L3NMRyuServiceHandler SERVICE_HANDLERS = [ (L2NMEmulatedServiceHandler, [ @@ -106,7 +107,10 @@ SERVICE_HANDLERS = [ (MicrowaveServiceHandler, [ { FilterFieldEnum.SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_L2NM, - FilterFieldEnum.DEVICE_DRIVER : [DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY, DeviceDriverEnum.DEVICEDRIVER_ONF_TR_532], + FilterFieldEnum.DEVICE_DRIVER : [ + DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY, + DeviceDriverEnum.DEVICEDRIVER_ONF_TR_532 + ], } ]), (P4DummyL1ServiceHandler, [ @@ -168,5 +172,11 @@ SERVICE_HANDLERS = [ FilterFieldEnum.SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_QKD, FilterFieldEnum.DEVICE_DRIVER : [DeviceDriverEnum.DEVICEDRIVER_QKD], } + ]), + (L3NMRyuServiceHandler, [ + { + FilterFieldEnum.SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_L3NM, + FilterFieldEnum.DEVICE_DRIVER : [DeviceDriverEnum.DEVICEDRIVER_RYU], + } ]) ] diff --git a/src/service/service/service_handlers/l3nm_ryu/L3NMRyuServiceHandler.py b/src/service/service/service_handlers/l3nm_ryu/L3NMRyuServiceHandler.py new file mode 100644 index 0000000000000000000000000000000000000000..426c14d31337a3b5df0cacd5d7f5794d974f0d50 --- /dev/null +++ b/src/service/service/service_handlers/l3nm_ryu/L3NMRyuServiceHandler.py @@ -0,0 +1,272 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging, re +from typing import List, Optional, Tuple, Union +from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method +from common.proto.context_pb2 import ConfigRule, Device, DeviceId, EndPoint, Service +from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set +from common.tools.object_factory.Device import json_device_id +from common.type_checkers.Checkers import chk_type +from service.service.service_handler_api.Tools import get_device_endpoint_uuids, get_endpoint_matching +from service.service.service_handler_api._ServiceHandler import _ServiceHandler +from service.service.service_handler_api.SettingsHandler import SettingsHandler +from service.service.task_scheduler.TaskExecutor import TaskExecutor + +logging.basicConfig(level=logging.DEBUG) +LOGGER = logging.getLogger(__name__) + +METRICS_POOL = MetricsPool('Service', 'Handler', labels={'handler': 'l3nm_ryu'}) + +class L3NMRyuServiceHandler(_ServiceHandler): + def __init__( # pylint: disable=super-init-not-called + self, service : Service, task_executor : TaskExecutor, **settings + ) -> None: + self.__service = service + self.__task_executor = task_executor + self.__settings_handler = SettingsHandler(service.service_config, **settings) + + def _get_endpoint_details( + self, endpoint : Tuple[str, str, Optional[str]] + ) -> Tuple[Device, EndPoint]: #Dict]: + device_uuid, endpoint_uuid = get_device_endpoint_uuids(endpoint) + device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) + endpoint_obj = get_endpoint_matching(device_obj, endpoint_uuid) + device_name = device_obj.name + endpoint_name = endpoint_obj.name + return device_obj, endpoint_obj + + @metered_subclass_method(METRICS_POOL) + def SetEndpoint( + self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None + ) -> List[Union[bool, Exception]]: + LOGGER.debug('endpoints = {:s}'.format(str(endpoints))) + chk_type('endpoints', endpoints, list) + + if len(endpoints) < 2: + LOGGER.warning('nothing done: not enough endpoints') + return [] + + service_uuid = self.__service.service_id.service_uuid.uuid + service_name= self.__service.name + service_configuration_rules=self.__service.service_config.config_rules + LOGGER.debug('service_configuration_rules = {:s}'.format(str(service_configuration_rules))) + ip_addresses = [] + flow_rules = [] + + for rule in service_configuration_rules: + try: + custom_field = rule.custom + resource_value_str = custom_field.resource_value + resource_value = json.loads(resource_value_str) + resource_key_str = custom_field.resource_key + LOGGER.debug(f"resource_key_str = {resource_key_str}") + match = re.search(r"/device\[(.*?)\]/", resource_key_str) + if match: + device_name = match.group(1) + flow_rules.append(device_name) + ip_address = resource_value.get("ip_address") + ip_addresses.append(ip_address) + + except Exception as e: + LOGGER.exception("Error in Rules") + + LOGGER.debug('ip_address = {:s}'.format(str(ip_addresses))) + LOGGER.debug('flow_rules = {:s}'.format(str(flow_rules))) + if len(flow_rules) < 2: + LOGGER.warning('Not enough devices to construct flow rules') + return [] + if len(ip_addresses) < 2: + LOGGER.warning('Not enough IP addresses found') + return [] + + results = [] + try: + src_device, src_endpoint, = self._get_endpoint_details(endpoints[0]) + dst_device, dst_endpoint, = self._get_endpoint_details(endpoints[-1]) + src_controller = self.__task_executor.get_device_controller(src_device) + del src_controller.device_config.config_rules[:] + + for index in range(len(endpoints) - 1): + current_device, current_endpoint = self._get_endpoint_details(endpoints[index]) + next_device, next_endpoint = self._get_endpoint_details(endpoints[index + 1]) + if current_device.name == next_device.name: + in_port_forward = current_endpoint.name + out_port_forward = next_endpoint.name + dpid_src = int(current_device.name) + LOGGER.debug(f"DPID source: {dpid_src}") + dpid_dst = int(next_device.name) + LOGGER.debug(f"DPID destination: {dpid_dst}") + flow_rule_forward = f"{flow_rules[0]}-{flow_rules[1]}" + flow_rule_reverse = f"{flow_rules[1]}-{flow_rules[0]}" + ip_address_source = ip_addresses[0] + ip_address_destination = ip_addresses[1] + forward_resource_value = ({"dpid": current_device.name, + "in-port": in_port_forward, + "out-port": out_port_forward, + "ip_address_source": ip_address_source, + "ip_address_destination": ip_address_destination, + }) + forward_rule = json_config_rule_set ( + resource_key=f"/device[{current_endpoint.name.split('-')[0]}]/flow[{flow_rule_forward}]", + resource_value=forward_resource_value + ) + LOGGER.debug(f"Forward configuration rule: {forward_rule}") + src_controller.device_config.config_rules.append(ConfigRule(**forward_rule)) + in_port_reverse = next_endpoint.name + out_port_reverse = current_endpoint.name + reverse_resource_value = { + "dpid": current_device.name, + "in-port": in_port_reverse, + "out-port": out_port_reverse, + "ip_address_source": ip_address_destination, + "ip_address_destination": ip_address_source, + } + reverse_rule = json_config_rule_set( + resource_key=f"/device[{current_endpoint.name.split('-')[0]}]/flow[{flow_rule_reverse}]", + resource_value=reverse_resource_value + ) + LOGGER.debug(f"Reverse configuration rule: {reverse_rule}") + src_controller.device_config.config_rules.append(ConfigRule(**reverse_rule)) + self.__task_executor.configure_device(src_controller) + results.append(True) + + def get_config_rules(controller): + try: + config_rules = controller.device_config.config_rules + for rule in config_rules: + if rule.HasField("custom"): + resource_key = rule.custom.resource_key + resource_value = rule.custom.resource_value + LOGGER.debug(f"Resource key in config: {resource_key}, Resource value in config: {resource_value}") + except Exception as e: + LOGGER.exception("Error in Configuration Rules") + get_config_rules(src_controller) + LOGGER.debug(f"Configuration rules: {src_controller.device_config.config_rules}") + return results + + except Exception as e: + LOGGER.exception("Error in SetEndpoint") + return [e] + + @metered_subclass_method(METRICS_POOL) + def DeleteEndpoint( + self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None + ) -> List[Union[bool, Exception]]: + LOGGER.debug('endpoints_delete = {:s}'.format(str(endpoints))) + chk_type('endpoints', endpoints, list) + if len(endpoints) < 2: + LOGGER.warning('nothing done: not enough endpoints') + return [] + service_uuid = self.__service.service_id.service_uuid.uuid + service_name= self.__service.name + service_configuration_rules=self.__service.service_config.config_rules + LOGGER.debug('service_configuration_rules = {:s}'.format(str(service_configuration_rules))) + ip_addresses = [] + flow_rules = [] + for rule in service_configuration_rules: + try: + custom_field = rule.custom + resource_value_str = custom_field.resource_value + resource_value = json.loads(resource_value_str) + resource_key_str = custom_field.resource_key + LOGGER.debug(f"resource_key_str = {resource_key_str}") + match = re.search(r"/device\[(.*?)\]/", resource_key_str) + if match: + device_name = match.group(1) + else: + device_name = None + flow_rules.append(device_name) + ip_address = resource_value.get("ip_address") + ip_addresses.append(ip_address) + + except Exception as e: + LOGGER.exception("Error in Rules") + LOGGER.debug('ip_address = {:s}'.format(str(ip_addresses))) + LOGGER.debug('flow_rules = {:s}'.format(str(flow_rules))) + results = [] + try: + src_device, src_endpoint, = self._get_endpoint_details(endpoints[0]) + dst_device, dst_endpoint, = self._get_endpoint_details(endpoints[-1]) + src_controller = self.__task_executor.get_device_controller(src_device) + del src_controller.device_config.config_rules[:] + for index in range(len(endpoints) - 1): + current_device, current_endpoint = self._get_endpoint_details(endpoints[index]) + next_device, next_endpoint = self._get_endpoint_details(endpoints[index + 1]) + if current_device.name == next_device.name: + in_port_forward = current_endpoint.name + out_port_forward = next_endpoint.name + dpid_src = int(current_device.name) + LOGGER.debug(f"DPID source: {dpid_src}") + dpid_dst = int(next_device.name) + LOGGER.debug(f"DPID destination: {dpid_dst}") + flow_rule_forward = f"{flow_rules[0]}-{flow_rules[1]}" + flow_rule_reverse = f"{flow_rules[1]}-{flow_rules[0]}" + ip_address_source = ip_addresses[0] + ip_address_destination = ip_addresses[1] + + forward_resource_value = ({"dpid": current_device.name, + "in-port": in_port_forward, + "out-port": out_port_forward, + "ip_address_source": ip_address_source, + "ip_address_destination": ip_address_destination, + }) + forward_rule = json_config_rule_delete ( + resource_key=f"/device[{current_endpoint.name.split('-')[0]}]/flow[{flow_rule_forward}]", + resource_value=forward_resource_value + ) + + LOGGER.debug(f"Forward configuration rule: {forward_rule}") + in_port_reverse = next_endpoint.name + out_port_reverse = current_endpoint.name + reverse_resource_value = { + "dpid": current_device.name, + "in-port": in_port_reverse, + "out-port": out_port_reverse, + "ip_address_source": ip_address_destination, + "ip_address_destination": ip_address_source, + } + reverse_rule = json_config_rule_delete( + resource_key=f"/device[{current_endpoint.name.split('-')[0]}]/flow[{flow_rule_reverse}]", + resource_value=reverse_resource_value + ) + LOGGER.debug(f"Reverse configuration rule: {reverse_rule}") + src_controller.device_config.config_rules.append(ConfigRule(**reverse_rule)) + src_controller.device_config.config_rules.append(ConfigRule(**forward_rule)) + + json_config_rule_delete_1 = json_config_rule_delete('/services/service[{:s}]'.format(service_uuid), { + 'uuid': service_uuid + }) + src_controller.device_config.config_rules.append(ConfigRule(**json_config_rule_delete_1)) + self.__task_executor.configure_device(src_controller) + results.append(True) + + def get_config_rules(controller): + try: + config_rules = controller.device_config.config_rules + for rule in config_rules: + if rule.HasField("custom"): + resource_key = rule.custom.resource_key + resource_value = rule.custom.resource_value + LOGGER.debug(f"Resource key in config: {resource_key}, Resource value in config: {resource_value}") + except Exception as e: + print(f"Error accessing config rules: {e}") + + get_config_rules(src_controller) + LOGGER.debug(f"Configuration rules: {src_controller.device_config.config_rules}") + return results + + except Exception as e: + LOGGER.exception(f"Error in DeleteEndpoint") + return [e] \ No newline at end of file diff --git a/src/service/service/service_handlers/l3nm_ryu/__init__.py b/src/service/service/service_handlers/l3nm_ryu/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..53d5157f750bfb085125cbd33faff1cec5924e14 --- /dev/null +++ b/src/service/service/service_handlers/l3nm_ryu/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/service/service/task_scheduler/TaskExecutor.py b/src/service/service/task_scheduler/TaskExecutor.py index 8f797882f86f4d53c61da22964909ab1a808ba53..f82ff2a8581e7a9f55a0427e446720d5b6095240 100644 --- a/src/service/service/task_scheduler/TaskExecutor.py +++ b/src/service/service/task_scheduler/TaskExecutor.py @@ -52,6 +52,22 @@ class CacheableObjectType(Enum): SERVICE = 'service' QKD_APP = 'qkd-app' +CONTROLLER_DEVICE_TYPES = { + DeviceTypeEnum.EMULATED_IP_SDN_CONTROLLER, + DeviceTypeEnum.EMULATED_MICROWAVE_RADIO_SYSTEM, + DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM, + DeviceTypeEnum.IETF_SLICE, + DeviceTypeEnum.IP_SDN_CONTROLLER, + DeviceTypeEnum.MICROWAVE_RADIO_SYSTEM, + DeviceTypeEnum.OPEN_LINE_SYSTEM, + DeviceTypeEnum.OPENFLOW_RYU_CONTROLLER, + DeviceTypeEnum.TERAFLOWSDN_CONTROLLER, +} +EXPANSION_CONTROLLER_DEVICE_TYPES = { + DeviceTypeEnum.IETF_SLICE, + DeviceTypeEnum.OPENFLOW_RYU_CONTROLLER, +} + class TaskExecutor: def __init__(self, service_handler_factory : ServiceHandlerFactory) -> None: self._service_handler_factory = service_handler_factory @@ -255,16 +271,6 @@ class TaskExecutor: return service_handler_class.check_media_channel(connection_uuid) def get_device_controller(self, device : Device) -> Optional[Device]: - #json_controller = None - #for config_rule in device.device_config.config_rules: - # if config_rule.WhichOneof('config_rule') != 'custom': continue - # if config_rule.custom.resource_key != '_controller': continue - # json_controller = json.loads(config_rule.custom.resource_value) - # break - - #if json_controller is None: return None - - #controller_uuid = json_controller['uuid'] controller_uuid = device.controller_id.device_uuid.uuid if len(controller_uuid) == 0: return None controller = self.get_device(DeviceId(**json_device_id(controller_uuid))) @@ -276,6 +282,7 @@ class TaskExecutor: self, connection : Connection, exclude_managed_by_controller : bool = False ) -> Dict[DeviceTypeEnum, Dict[str, Device]]: devices : Dict[DeviceTypeEnum, Dict[str, Device]] = dict() + controllers : Dict[DeviceTypeEnum, Dict[str, Device]] = dict() for endpoint_id in connection.path_hops_endpoint_ids: device = self.get_device(endpoint_id.device_id) device_uuid = endpoint_id.device_id.device_uuid.uuid @@ -284,18 +291,39 @@ class TaskExecutor: controller = self.get_device_controller(device) if controller is None: device_type = DeviceTypeEnum._value2member_map_[device.device_type] - devices.setdefault(device_type, dict())[device_uuid] = device + if device_type in CONTROLLER_DEVICE_TYPES: + controllers.setdefault(device_type, dict())[device_uuid] = device + else: + devices.setdefault(device_type, dict())[device_uuid] = device else: + # ===== Ryu original test ======================================================================== + #if not exclude_managed_by_controller: + # LOGGER.debug('device managed by controller = {:s}'.format(str(device_uuid))) + # device_type = DeviceTypeEnum._value2member_map_[controller.device_type] + # LOGGER.debug('device_type not exlude by controller = {:s}'.format(str(device_type))) + # devices.setdefault(device_type, dict())[device_uuid] = device + #else: + # device_type = DeviceTypeEnum._value2member_map_[controller.device_type] + # LOGGER.debug('device_type = {:s}'.format(str(device_type))) + # devices.setdefault(device_type, dict())[controller.device_id.device_uuid.uuid] = controller + # ================================================================================================ + if not exclude_managed_by_controller: - controller_device_type_enum = DeviceTypeEnum._value2member_map_[controller.device_type] - if controller_device_type_enum == DeviceTypeEnum.IETF_SLICE: - devices.setdefault(controller_device_type_enum, dict())[device_uuid] = device - else: + # Controller device types for those underlying path is needed by service handler + device_type = DeviceTypeEnum._value2member_map_[controller.device_type] + if device_type not in EXPANSION_CONTROLLER_DEVICE_TYPES: device_type = DeviceTypeEnum._value2member_map_[device.device_type] - devices.setdefault(device_type, dict())[device_uuid] = device + devices.setdefault(device_type, dict())[device_uuid] = device + device_type = DeviceTypeEnum._value2member_map_[controller.device_type] - devices.setdefault(device_type, dict())[controller.device_id.device_uuid.uuid] = controller - return devices + controllers.setdefault(device_type, dict())[controller.device_id.device_uuid.uuid] = controller + + LOGGER.debug('[get_devices_from_connection] devices = {:s}'.format(str(devices))) + LOGGER.debug('[get_devices_from_connection] controllers = {:s}'.format(str(controllers))) + if len(devices) == 0 and len(controllers) > 0: + return controllers + else: + return devices # ----- Service-related methods ------------------------------------------------------------------------------------ @@ -324,14 +352,41 @@ class TaskExecutor: def get_service_handlers( self, connection : Connection, service : Service, **service_handler_settings ) -> Dict[DeviceTypeEnum, Tuple['_ServiceHandler', Dict[str, Device]]]: + # ===== Ryu original test ======================================================================== + ## WARNING: exclude_managed_by_controller should be True, changed to False for test purposes. + ## For Ryu SDN controller we need to know the underlying devices we are traversing. + ## Elaborate proper logic to resolve this case. + #connection_device_types_excluded : Dict[DeviceTypeEnum, Dict[str, Device]] = self.get_devices_from_connection( + # connection, exclude_managed_by_controller=True + #) + #LOGGER.debug('connection_device_types_excluded = {:s}'.format(str(connection_device_types_excluded))) + #connection_device_types_included : Dict[DeviceTypeEnum, Dict[str, Device]] = self.get_devices_from_connection( + # connection, exclude_managed_by_controller=False + #) + #LOGGER.debug('connection_device_types_included = {:s}'.format(str(connection_device_types_included))) + # ================================================================================================ + connection_device_types : Dict[DeviceTypeEnum, Dict[str, Device]] = self.get_devices_from_connection( connection, exclude_managed_by_controller=False ) service_handlers : Dict[DeviceTypeEnum, Tuple['_ServiceHandler', Dict[str, Device]]] = dict() + # ===== Ryu original test ======================================================================== + #for device_type, connection_devices in connection_device_types_excluded.items(): + # ================================================================================================ for device_type, connection_devices in connection_device_types.items(): try: service_handler_class = get_service_handler_class( - self._service_handler_factory, service, connection_devices) + self._service_handler_factory, service, connection_devices + ) + # ===== Ryu original test ======================================================================== + #LOGGER.debug('service_handler_class IN CONNECTION DEVICE TYPE EXCLUDED = {:s}'.format(str(service_handler_class.__name__))) + #service_handler = service_handler_class(service, self, **service_handler_settings) + #LOGGER.debug('service_handler IN CONNECTION DEVICE TYPE EXCLUDED = {:s}'.format(str(service_handler))) + #connection_devices_included = connection_device_types_included.get(device_type, connection_devices) + #LOGGER.debug('connection_devices_included IN CONNECTION DEVICE TYPE EXCLUDED = {:s}'.format(str(connection_devices_included))) + #service_handlers[device_type] = (service_handler, connection_devices_included) + #LOGGER.debug('service_handlers IN CONNECTION DEVICE TYPE EXCLUDED = {:s}'.format(str(service_handlers))) + # ================================================================================================ service_handler = service_handler_class(service, self, **service_handler_settings) service_handlers[device_type] = (service_handler, connection_devices) except ( diff --git a/src/tests/.gitlab-ci.yml b/src/tests/.gitlab-ci.yml index 9c441d746451b4b35718a74c1c4ceb76dad7598e..b80bf4558630b0777156db2bcaa5fe62cead4839 100644 --- a/src/tests/.gitlab-ci.yml +++ b/src/tests/.gitlab-ci.yml @@ -24,5 +24,6 @@ include: #- local: '/src/tests/ofc25-camara-agg-net-controller/.gitlab-ci.yml' #- local: '/src/tests/ofc25-camara-e2e-controller/.gitlab-ci.yml' #- local: '/src/tests/ofc25/.gitlab-ci.yml' + #- local: '/src/tests/ryu-openflow/.gitlab-ci.yml' - local: '/src/tests/tools/mock_tfs_nbi_dependencies/.gitlab-ci.yml' diff --git a/src/tests/ryu-openflow/.gitignore b/src/tests/ryu-openflow/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/src/tests/ryu-openflow/.gitlab-ci.yml b/src/tests/ryu-openflow/.gitlab-ci.yml new file mode 100644 index 0000000000000000000000000000000000000000..4f95d466172b38a891fa1fd000c173e49a59c6c0 --- /dev/null +++ b/src/tests/ryu-openflow/.gitlab-ci.yml @@ -0,0 +1,278 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Build, tag, and push the Docker image to the GitLab Docker registry +build ryu-openflow: + variables: + TEST_NAME: 'ryu-openflow' + IMAGE_TAG: 'mr$CI_MERGE_REQUEST_IID' + stage: build + before_script: + - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY + - docker ps -aq | xargs -r docker rm -f + - containerlab destroy --all --cleanup || true + script: + - echo "CI_PIPELINE_SOURCE = $CI_PIPELINE_SOURCE" + - echo "CI_MERGE_REQUEST_ID = $CI_MERGE_REQUEST_ID" + - echo "CI_MERGE_REQUEST_IID = $CI_MERGE_REQUEST_IID" + - docker buildx build -t "$CI_REGISTRY_IMAGE/${TEST_NAME}-ryu:${IMAGE_TAG}" -f ./src/tests/${TEST_NAME}/Ryu.Dockerfile . + - docker push "$CI_REGISTRY_IMAGE/${TEST_NAME}-ryu:${IMAGE_TAG}" + - docker buildx build -t "$CI_REGISTRY_IMAGE/${TEST_NAME}-test:${IMAGE_TAG}" -f ./src/tests/${TEST_NAME}/Test.Dockerfile . + - docker push "$CI_REGISTRY_IMAGE/${TEST_NAME}-test:${IMAGE_TAG}" + after_script: + - docker images --filter="dangling=true" --quiet | xargs -r docker rmi + rules: + - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' + - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' + - changes: + - src/common/**/*.py + - proto/*.proto + - src/tests/${TEST_NAME}/**/*.{py,in,sh,yml} + - src/tests/${TEST_NAME}/Dockerfile + - .gitlab-ci.yml + + +# - docker rm -f na-t1 na-t2 na-r1 na-r2 +# - docker network rm -f na-br +# +# - > +# docker network create -d bridge --subnet=172.254.253.0/24 --gateway=172.254.253.254 +# --ip-range=172.254.253.0/24 na-br +# - > +# docker run -dit --init --name na-t1 --network=na-br --ip 172.254.253.101 +# --volume "$PWD/src/tests/${TEST_NAME}/node-agents-config/startNetconfAgent-tp.sh:/confd/examples.confd/OC23/startNetconfAgent.sh" +# --volume "$PWD/src/tests/${TEST_NAME}/node-agents-config/platform_t1.xml:/confd/examples.confd/OC23/platform.xml" +# asgamb1/oc23bgp.img:latest /confd/examples.confd/OC23/startNetconfAgent.sh + + +## Deploy TeraFlowSDN and Execute end-2-end test +#end2end_test ryu-openflow: +# timeout: 90m +# variables: +# TEST_NAME: 'ryu-openflow' +# stage: end2end_test +# # Disable to force running it after all other tasks +# #needs: +# # - build ryu-openflow +# before_script: +# - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY +# - docker ps -aq | xargs -r docker rm -f +# - containerlab destroy --all --cleanup || true +# script: +# # Download Docker image to run the test +# - docker pull "${CI_REGISTRY_IMAGE}/${TEST_NAME}:latest" +# +# # Check MicroK8s is ready +# - microk8s status --wait-ready +# - kubectl get pods --all-namespaces +# +# # Deploy ContainerLab Scenario +# - RUNNER_PATH=`pwd` +# #- cd $PWD/src/tests/${TEST_NAME} +# - mkdir -p /tmp/clab/${TEST_NAME} +# - cp -R src/tests/${TEST_NAME}/clab/* /tmp/clab/${TEST_NAME} +# - tree -la /tmp/clab/${TEST_NAME} +# - cd /tmp/clab/${TEST_NAME} +# - containerlab deploy --reconfigure --topo ryu-openflow.clab.yml +# - cd $RUNNER_PATH +# +# # Wait for initialization of Device NOSes +# - sleep 3 +# - docker ps -a +# +# # Dump configuration of the routers (before any configuration) +# - containerlab exec --name ryu-openflow --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" +# - containerlab exec --name ryu-openflow --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" +# - containerlab exec --name ryu-openflow --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" +# +# # Configure TeraFlowSDN deployment +# # Uncomment if DEBUG log level is needed for the components +# #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/contextservice.yaml +# #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/deviceservice.yaml +# #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="frontend").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/pathcompservice.yaml +# #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/serviceservice.yaml +# #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/nbiservice.yaml +# #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/monitoringservice.yaml +# +# - source src/tests/${TEST_NAME}/deploy_specs.sh +# #- export TFS_REGISTRY_IMAGES="${CI_REGISTRY_IMAGE}" +# #- export TFS_SKIP_BUILD="YES" +# #- export TFS_IMAGE_TAG="latest" +# #- echo "TFS_REGISTRY_IMAGES=${CI_REGISTRY_IMAGE}" +# +# # Deploy TeraFlowSDN +# - ./deploy/crdb.sh +# - ./deploy/nats.sh +# - ./deploy/kafka.sh +# - ./deploy/qdb.sh +# - ./deploy/tfs.sh +# - ./deploy/show.sh +# +# ## Wait for Context to be subscribed to NATS +# ## WARNING: this loop is infinite if there is no subscriber (such as monitoring). +# ## Investigate if we can use a counter to limit the number of iterations. +# ## For now, keep it commented out. +# #- LOOP_MAX_ATTEMPTS=180 +# #- LOOP_COUNTER=0 +# #- > +# # while ! kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server 2>&1 | grep -q 'Subscriber is Ready? True'; do +# # echo "Attempt: $LOOP_COUNTER" +# # kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server 2>&1; +# # sleep 1; +# # LOOP_COUNTER=$((LOOP_COUNTER + 1)) +# # if [ "$LOOP_COUNTER" -ge "$LOOP_MAX_ATTEMPTS" ]; then +# # echo "Max attempts reached, exiting the loop." +# # break +# # fi +# # done +# - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server +# +# # Run end-to-end test: onboard scenario +# - > +# docker run -t --rm --name ${TEST_NAME} --network=host +# --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh" +# --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" +# $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-onboarding.sh +# +# # Run end-to-end test: configure service TFS +# - > +# docker run -t --rm --name ${TEST_NAME} --network=host +# --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh" +# --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" +# $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-tfs-create.sh +# +# # Dump configuration of the routers (after configure TFS service) +# - containerlab exec --name ryu-openflow --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" +# - containerlab exec --name ryu-openflow --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" +# - containerlab exec --name ryu-openflow --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" +# +# # Run end-to-end test: test connectivity with ping +# - export TEST1_10=$(containerlab exec --name ryu-openflow --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.1.10' --format json) +# - echo $TEST1_10 +# - echo $TEST1_10 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' +# - export TEST1_1=$(containerlab exec --name ryu-openflow --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.1.1' --format json) +# - echo $TEST1_1 +# - echo $TEST1_1 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' +# - export TEST2_1=$(containerlab exec --name ryu-openflow --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.2.1' --format json) +# - echo $TEST2_1 +# - echo $TEST2_1 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' +# - export TEST2_10=$(containerlab exec --name ryu-openflow --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.2.10' --format json) +# - echo $TEST2_10 +# - echo $TEST2_10 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' +# - export TEST3_1=$(containerlab exec --name ryu-openflow --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.3.1' --format json) +# - echo $TEST3_1 +# - echo $TEST3_1 | grep -E '3 packets transmitted, 0 received, 100\% packet loss' +# - export TEST3_10=$(containerlab exec --name ryu-openflow --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.3.10' --format json) +# - echo $TEST3_10 +# - echo $TEST3_10 | grep -E '3 packets transmitted, 0 received, 100\% packet loss' +# +# # Run end-to-end test: deconfigure service TFS +# - > +# docker run -t --rm --name ${TEST_NAME} --network=host +# --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh" +# --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" +# $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-tfs-remove.sh +# +# # Dump configuration of the routers (after deconfigure TFS service) +# - containerlab exec --name ryu-openflow --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" +# - containerlab exec --name ryu-openflow --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" +# - containerlab exec --name ryu-openflow --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" +# +# # Run end-to-end test: configure service IETF +# - > +# docker run -t --rm --name ${TEST_NAME} --network=host +# --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh" +# --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" +# $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-ietf-create.sh +# +# # Dump configuration of the routers (after configure IETF service) +# - containerlab exec --name ryu-openflow --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" +# - containerlab exec --name ryu-openflow --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" +# - containerlab exec --name ryu-openflow --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" +# +# # Run end-to-end test: test connectivity with ping +# - export TEST1_10=$(containerlab exec --name ryu-openflow --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.1.10' --format json) +# - echo $TEST1_10 +# - echo $TEST1_10 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' +# - export TEST1_1=$(containerlab exec --name ryu-openflow --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.1.1' --format json) +# - echo $TEST1_1 +# - echo $TEST1_1 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' +# - export TEST2_1=$(containerlab exec --name ryu-openflow --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.2.1' --format json) +# - echo $TEST2_1 +# - echo $TEST2_1 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' +# - export TEST2_10=$(containerlab exec --name ryu-openflow --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.2.10' --format json) +# - echo $TEST2_10 +# - echo $TEST2_10 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' +# - export TEST3_1=$(containerlab exec --name ryu-openflow --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.3.1' --format json) +# - echo $TEST3_1 +# - echo $TEST3_1 | grep -E '3 packets transmitted, 0 received, 100\% packet loss' +# - export TEST3_10=$(containerlab exec --name ryu-openflow --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.3.10' --format json) +# - echo $TEST3_10 +# - echo $TEST3_10 | grep -E '3 packets transmitted, 0 received, 100\% packet loss' +# +# # Run end-to-end test: deconfigure service IETF +# - > +# docker run -t --rm --name ${TEST_NAME} --network=host +# --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh" +# --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" +# $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-ietf-remove.sh +# +# # Dump configuration of the routers (after deconfigure IETF service) +# - containerlab exec --name ryu-openflow --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" +# - containerlab exec --name ryu-openflow --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" +# - containerlab exec --name ryu-openflow --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" +# +# # Run end-to-end test: cleanup scenario +# - > +# docker run -t --rm --name ${TEST_NAME} --network=host +# --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh" +# --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" +# $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-cleanup.sh +# +# after_script: +# # Dump configuration of the routers (on after_script) +# - containerlab exec --name ryu-openflow --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" +# - containerlab exec --name ryu-openflow --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" +# - containerlab exec --name ryu-openflow --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" +# +# # Dump TeraFlowSDN component logs +# - source src/tests/${TEST_NAME}/deploy_specs.sh +# - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server +# - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/deviceservice -c server +# - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/pathcompservice -c frontend +# - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/serviceservice -c server +# - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/nbiservice -c server +# #- kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/monitoringservice -c server +# +# # Destroy Scenario +# - docker rm -f ${TEST_NAME} || true +# - RUNNER_PATH=`pwd` +# #- cd $PWD/src/tests/${TEST_NAME} +# - cd /tmp/clab/${TEST_NAME} +# - containerlab destroy --topo ryu-openflow.clab.yml --cleanup || true +# - sudo rm -rf clab-ryu-openflow/ .ryu-openflow.clab.yml.bak || true +# - cd $RUNNER_PATH +# - kubectl delete namespaces tfs || true +# +# # Clean old docker images +# - docker images --filter="dangling=true" --quiet | xargs -r docker rmi +# +# #coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/' +# rules: +# - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' +# - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' +# artifacts: +# when: always +# reports: +# junit: ./src/tests/${TEST_NAME}/report_*.xml diff --git a/src/tests/ryu-openflow/Mininet.Dockerfile b/src/tests/ryu-openflow/Mininet.Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..97927e9e28d6962abc7ee9e5ec41bf9ccb1d8d10 --- /dev/null +++ b/src/tests/ryu-openflow/Mininet.Dockerfile @@ -0,0 +1,27 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM ubuntu:22.04 + +ENV DEBIAN_FRONTEND=noninteractive +RUN apt-get update && \ + apt-get install -y --no-install-recommends iproute2 net-tools openvswitch-switch ca-certificates && \ + apt-get install -y --no-install-recommends mininet=2.3.0-1ubuntu1 && \ + apt-get autoremove -y && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +COPY custom_pentagon_topology.py /opt/custom_pentagon_topology.py + +CMD ["python3", "/opt/custom_pentagon_topology.py"] diff --git a/src/tests/ryu-openflow/README.md b/src/tests/ryu-openflow/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e051c8f1cd9beef623f4179c8185b0be44b1e0c6 --- /dev/null +++ b/src/tests/ryu-openflow/README.md @@ -0,0 +1,41 @@ +# Control of OpenFlow domain through Ryu SDN controller and TeraFlowSDN + +## TeraFlowSDN Deployment +```bash +cd ~/tfs-ctrl +source ~/tfs-ctrl/src/tests/ryu-openflow/deploy_specs.sh +./deploy/all.sh +``` + +## Download and install Mininet +```bash +sudo apt-get install "mininet=2.3.0-1ubuntu1" +``` + +## Deploy SDN controller and dataplane +```bash +cd ~/tfs-ctrl/src/tests/ryu-openflow/ +docker compose build # or docker buildx build --no-cache -t "ryu-image:dev" -f ./Ryu.Dockerfile . +docker compose up -d # or docker run -d -p 6653:6653 -p 8080:8080 ryu-image:dev +sudo python3 custom_pentagon_topology.py +``` + +## Destroy scenario +```bash +cd ~/tfs-ctrl/src/tests/ryu-openflow/ +docker compose down +# Ctrl+C mininet dataplane +``` + +## Onboard scenario +- Through TFS WebUI + +## Request connectivity service +```bash +cd ~/tfs-ctrl/src/tests/ryu-openflow/ +curl -X POST \ + --header "Content-Type: application/json" \ + --data @ietf-l3vpn-service.json \ + --user "admin:admin" \ + http://127.0.0.1/restconf/data/ietf-l3vpn-svc:l3vpn-svc/vpn-services +``` diff --git a/src/tests/ryu-openflow/Ryu.Dockerfile b/src/tests/ryu-openflow/Ryu.Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..ad162d8afe3fc605c8e14e65055e8aa04aa5b482 --- /dev/null +++ b/src/tests/ryu-openflow/Ryu.Dockerfile @@ -0,0 +1,29 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM python:3.9-slim + +RUN apt-get update && apt-get install -y --no-install-recommends git iproute2 && rm -rf /var/lib/apt/lists/* + +RUN pip install --no-cache-dir --upgrade pip +# NOTE: Ryu 4.34 expects eventlet.wsgi.ALREADY_HANDLED, which disappears in Eventlet ≥ 0.30.3. +RUN pip install --no-cache-dir "setuptools" "wheel" "git+https://github.com/faucetsdn/ryu.git@v4.34" "eventlet<0.30.3" + +#COPY apps/ /opt/ryu-apps/ # Copy Ryu Apps, if any +WORKDIR /opt/ryu-apps + +# --- OpenFlow & Ryu REST API ports --- +EXPOSE 6653/tcp 8080/tcp + +CMD ["ryu-manager", "--verbose", "--observe-links", "ryu.app.ofctl_rest", "ryu.app.gui_topology.gui_topology"] diff --git a/src/tests/ryu-openflow/Test.Dockerfile b/src/tests/ryu-openflow/Test.Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..666efc1433bb2e5bc36fa1fb05575707602213ac --- /dev/null +++ b/src/tests/ryu-openflow/Test.Dockerfile @@ -0,0 +1,86 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM python:3.9-slim + +# Install dependencies +RUN apt-get --yes --quiet --quiet update && \ + apt-get --yes --quiet --quiet install wget g++ git && \ + rm -rf /var/lib/apt/lists/* + +# Set Python to show logs as they occur +ENV PYTHONUNBUFFERED=0 + +# Get generic Python packages +RUN python3 -m pip install --upgrade pip +RUN python3 -m pip install --upgrade setuptools wheel +RUN python3 -m pip install --upgrade pip-tools + +# Get common Python packages +# Note: this step enables sharing the previous Docker build steps among all the Python components +WORKDIR /var/teraflow +COPY common_requirements.in common_requirements.in +RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in +RUN python3 -m pip install -r common_requirements.txt + +# Add common files into working directory +WORKDIR /var/teraflow/common +COPY src/common/. ./ +RUN rm -rf proto + +# Create proto sub-folder, copy .proto files, and generate Python code +RUN mkdir -p /var/teraflow/common/proto +WORKDIR /var/teraflow/common/proto +RUN touch __init__.py +COPY proto/*.proto ./ +RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto +RUN rm *.proto +RUN find . -type f -exec sed -i -E 's/^(import\ .*)_pb2/from . \1_pb2/g' {} \; + +# Create component sub-folders, get specific Python packages +RUN mkdir -p /var/teraflow/tests/ryu-openflow +WORKDIR /var/teraflow/tests/ryu-openflow +COPY src/tests/ryu-openflow/requirements.in requirements.in +RUN pip-compile --quiet --output-file=requirements.txt requirements.in +RUN python3 -m pip install -r requirements.txt + +# Add component files into working directory +WORKDIR /var/teraflow +COPY src/__init__.py ./__init__.py +COPY src/common/*.py ./common/ +COPY src/common/tests/. ./common/tests/ +COPY src/common/tools/. ./common/tools/ +COPY src/context/__init__.py context/__init__.py +COPY src/context/client/. context/client/ +COPY src/device/__init__.py device/__init__.py +COPY src/device/client/. device/client/ +COPY src/monitoring/__init__.py monitoring/__init__.py +COPY src/monitoring/client/. monitoring/client/ +COPY src/service/__init__.py service/__init__.py +COPY src/service/client/. service/client/ +COPY src/slice/__init__.py slice/__init__.py +COPY src/slice/client/. slice/client/ +COPY src/vnt_manager/__init__.py vnt_manager/__init__.py +COPY src/vnt_manager/client/. vnt_manager/client/ +COPY src/tests/*.py ./tests/ +COPY src/tests/ryu-openflow/__init__.py ./tests/ryu-openflow/__init__.py +COPY src/tests/ryu-openflow/data/. ./tests/ryu-openflow/data/ +COPY src/tests/ryu-openflow/tests/. ./tests/ryu-openflow/tests/ +COPY src/tests/ryu-openflow/scripts/. ./ + +RUN apt-get --yes --quiet --quiet update && \ + apt-get --yes --quiet --quiet install tree && \ + rm -rf /var/lib/apt/lists/* + +RUN tree -la /var/teraflow diff --git a/src/tests/ryu-openflow/__init__.py b/src/tests/ryu-openflow/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..53d5157f750bfb085125cbd33faff1cec5924e14 --- /dev/null +++ b/src/tests/ryu-openflow/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/tests/ryu-openflow/custom_pentagon_topology.py b/src/tests/ryu-openflow/custom_pentagon_topology.py new file mode 100644 index 0000000000000000000000000000000000000000..9d581e40babdb091b8d90865e71618ffeb7f663e --- /dev/null +++ b/src/tests/ryu-openflow/custom_pentagon_topology.py @@ -0,0 +1,55 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from mininet.topo import Topo +from mininet.net import Mininet +from mininet.node import RemoteController +from mininet.cli import CLI +from mininet.link import TCLink + +class PentagonTopo(Topo): + def build(self): + sw1 = self.addSwitch('s1') + sw2 = self.addSwitch('s2') + sw3 = self.addSwitch('s3') + sw4 = self.addSwitch('s4') + sw5 = self.addSwitch('s5') + + h1 = self.addHost('h1', ip='10.0.0.1', mac='00:00:00:00:00:01') + h2 = self.addHost('h2', ip='10.0.0.2', mac='00:00:00:00:00:02') + h3 = self.addHost('h3', ip='10.0.0.3', mac='00:00:00:00:00:03') + h4 = self.addHost('h4', ip='10.0.0.4', mac='00:00:00:00:00:04') + + self.addLink(sw1, sw2) + self.addLink(sw2, sw3) + self.addLink(sw3, sw4) + self.addLink(sw4, sw5) + self.addLink(sw5, sw1) + + self.addLink(h1, sw2) + self.addLink(h2, sw2) + self.addLink(h3, sw5) + self.addLink(h4, sw5) + +if __name__ == '__main__': + topo = PentagonTopo() + net = Mininet(topo=topo, controller=lambda name: RemoteController(name, ip='127.0.0.1'), link=TCLink) + + net.start() + net.staticArp() + + print('Custom Pentagon Topology is up with static ARP.') + CLI(net) + net.stop() diff --git a/src/tests/ryu-openflow/data/ietf-l3vpn-service.json b/src/tests/ryu-openflow/data/ietf-l3vpn-service.json new file mode 100644 index 0000000000000000000000000000000000000000..eb86c359bbab1feeba0f570be165f5f18349d2b8 --- /dev/null +++ b/src/tests/ryu-openflow/data/ietf-l3vpn-service.json @@ -0,0 +1,103 @@ +{ + "ietf-l3vpn-svc:l3vpn-svc": { + "vpn-services": { + "vpn-service": [ + {"vpn-id": "ietf-l3vpn-svc"} + ] + }, + "sites": { + "site": [ + { + "site-id": "site_DC1", + "management": {"type": "ietf-l3vpn-svc:provider-managed"}, + "locations": {"location": [{"location-id": "DC1"}]}, + "devices": {"device": [{"device-id": "h1","location": "DC1"}]}, + "site-network-accesses": { + "site-network-access": [ + { + "site-network-access-id": "h1-eth0", + "site-network-access-type": "ietf-l3vpn-svc:multipoint", + "device-reference": "h1", + "vpn-attachment": {"vpn-id": "ietf-l3vpn-svc", "site-role": "ietf-l3vpn-svc:spoke-role"}, + "ip-connection": { + "ipv4": { + "address-allocation-type": "ietf-l3vpn-svc:static-address", + "addresses": { + "provider-address": "10.0.0.1", + "customer-address": "10.0.0.1", + "prefix-length": 8 + } + } + }, + "service": { + "svc-mtu": 1500, + "svc-input-bandwidth": 1000000000, + "svc-output-bandwidth": 1000000000, + "qos": { + "qos-profile": { + "classes": { + "class": [ + { + "class-id": "qos-realtime", + "direction": "ietf-l3vpn-svc:both", + "latency": {"latency-boundary": 10}, + "bandwidth": {"guaranteed-bw-percent": 100} + } + ] + } + } + } + } + } + ] + } + }, + { + "site-id": "site_DC2", + "management": {"type": "ietf-l3vpn-svc:provider-managed"}, + "locations": {"location": [{"location-id": "DC2"}]}, + "devices": {"device": [{"device-id": "h3", "location": "DC2"}]}, + "site-network-accesses": { + "site-network-access": [ + { + "site-network-access-id": "h3-eth0", + "site-network-access-type": "ietf-l3vpn-svc:multipoint", + "device-reference": "h3", + "vpn-attachment": {"vpn-id": "ietf-l3vpn-svc", "site-role": "ietf-l3vpn-svc:hub-role"}, + "ip-connection": { + "ipv4": { + "address-allocation-type": "ietf-l3vpn-svc:static-address", + "addresses": { + "provider-address": "10.0.0.1", + "customer-address": "10.0.0.3", + "prefix-length": 8 + } + } + }, + "service": { + "svc-mtu": 1500, + "svc-input-bandwidth": 1000000000, + "svc-output-bandwidth": 1000000000, + "qos": { + "qos-profile": { + "classes": { + "class": [ + { + "class-id": "qos-realtime", + "direction": "ietf-l3vpn-svc:both", + "latency": {"latency-boundary": 10}, + "bandwidth": {"guaranteed-bw-percent": 100} + } + ] + } + } + } + } + } + ] + } + } + ] + } + } +} diff --git a/src/tests/ryu-openflow/data/tfs-topology.json b/src/tests/ryu-openflow/data/tfs-topology.json new file mode 100644 index 0000000000000000000000000000000000000000..e427f16ea95833a1480a84c4fe83d7539c2707ac --- /dev/null +++ b/src/tests/ryu-openflow/data/tfs-topology.json @@ -0,0 +1,125 @@ +{ + "contexts": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}} + ], + "topologies": [ + { + "topology_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "topology_uuid": {"uuid": "admin"} + } + } + ], + "devices": [ + { + "device_id": {"device_uuid": {"uuid": "RYU"}}, "device_type": "openflow-ryu-controller", + "device_drivers": ["DEVICEDRIVER_RYU"], "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.10"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "8080"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"timeout": 120}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "h1"}}, "device_type": "emu-client", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "h1-eth0", "type": "copper/internal"}, + {"uuid": "int", "type": "copper/internal"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "h2"}}, "device_type": "emu-client", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "h2-eth0", "type": "copper/internal"}, + {"uuid": "int", "type": "copper/internal"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "h3"}}, "device_type": "emu-client", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "h3-eth0", "type": "copper/internal"}, + {"uuid": "int", "type": "copper/internal"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "h4"}}, "device_type": "emu-client", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "h4-eth0", "type": "copper/internal"}, + {"uuid": "int", "type": "copper/internal"} + ]}}} + ]} + } + ], + "links": [ + { + "link_id": {"link_uuid": {"uuid": "h1/h1-eth0==0000000000000002/s2-eth3"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "h1"}}, "endpoint_uuid": {"uuid": "h1-eth0"}}, + {"device_id": {"device_uuid": {"uuid": "0000000000000002"}}, "endpoint_uuid": {"uuid": "s2-eth3"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "0000000000000002/s2-eth3==h1/h1-eth0"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "0000000000000002"}}, "endpoint_uuid": {"uuid": "s2-eth3"}}, + {"device_id": {"device_uuid": {"uuid": "h1"}}, "endpoint_uuid": {"uuid": "h1-eth0"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "h2/h2-eth0==0000000000000002/s2-eth4"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "h2"}}, "endpoint_uuid": {"uuid": "h2-eth0"}}, + {"device_id": {"device_uuid": {"uuid": "0000000000000002"}}, "endpoint_uuid": {"uuid": "s2-eth4"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "0000000000000002/s2-eth4==h2/h2-eth0"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "0000000000000002"}}, "endpoint_uuid": {"uuid": "s2-eth4"}}, + {"device_id": {"device_uuid": {"uuid": "h2"}}, "endpoint_uuid": {"uuid": "h2-eth0"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "h3/h3-eth0==0000000000000005/s5-eth3"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "h3"}}, "endpoint_uuid": {"uuid": "h3-eth0"}}, + {"device_id": {"device_uuid": {"uuid": "0000000000000005"}}, "endpoint_uuid": {"uuid": "s5-eth3"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "0000000000000005/s5-eth3==h3/h3-eth0"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "0000000000000005"}}, "endpoint_uuid": {"uuid": "s5-eth3"}}, + {"device_id": {"device_uuid": {"uuid": "h3"}}, "endpoint_uuid": {"uuid": "h3-eth0"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "h4/h4-eth0==0000000000000005/s5-eth4"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "h4"}}, "endpoint_uuid": {"uuid": "h4-eth0"}}, + {"device_id": {"device_uuid": {"uuid": "0000000000000005"}}, "endpoint_uuid": {"uuid": "s5-eth4"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "0000000000000005/s5-eth4==h4/h4-eth0"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "0000000000000005"}}, "endpoint_uuid": {"uuid": "s5-eth4"}}, + {"device_id": {"device_uuid": {"uuid": "h4"}}, "endpoint_uuid": {"uuid": "h4-eth0"}} + ] + } + ] +} diff --git a/src/tests/ryu-openflow/deploy_specs.sh b/src/tests/ryu-openflow/deploy_specs.sh new file mode 100755 index 0000000000000000000000000000000000000000..daef2fdc728f3b8243e463915166a92921364610 --- /dev/null +++ b/src/tests/ryu-openflow/deploy_specs.sh @@ -0,0 +1,214 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# ----- TeraFlowSDN ------------------------------------------------------------ + +# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" + +# Set the list of components, separated by spaces, you want to build images for, and deploy. +#export TFS_COMPONENTS="context device pathcomp service slice nbi webui" +export TFS_COMPONENTS="context device pathcomp service nbi" + +# Uncomment to activate Monitoring (old) +#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring" + +# Uncomment to activate Monitoring Framework (new) +#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics automation" + +# Uncomment to activate QoS Profiles +#export TFS_COMPONENTS="${TFS_COMPONENTS} qos_profile" + +# Uncomment to activate BGP-LS Speaker +#export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker" + +# Uncomment to activate Optical Controller +# To manage optical connections, "service" requires "opticalcontroller" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "opticalcontroller" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} opticalcontroller service ${AFTER}" +#fi + +# Uncomment to activate ZTP +#export TFS_COMPONENTS="${TFS_COMPONENTS} ztp" + +# Uncomment to activate Policy Manager +#export TFS_COMPONENTS="${TFS_COMPONENTS} policy" + +# Uncomment to activate Optical CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager" + +# Uncomment to activate L3 CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector" + +# Uncomment to activate TE +#export TFS_COMPONENTS="${TFS_COMPONENTS} te" + +# Uncomment to activate Forecaster +#export TFS_COMPONENTS="${TFS_COMPONENTS} forecaster" + +# Uncomment to activate E2E Orchestrator +#export TFS_COMPONENTS="${TFS_COMPONENTS} e2e_orchestrator" + +# Uncomment to activate VNT Manager +#export TFS_COMPONENTS="${TFS_COMPONENTS} vnt_manager" + +# Uncomment to activate DLT and Interdomain +#export TFS_COMPONENTS="${TFS_COMPONENTS} interdomain dlt" +#if [[ "$TFS_COMPONENTS" == *"dlt"* ]]; then +# export KEY_DIRECTORY_PATH="src/dlt/gateway/keys/priv_sk" +# export CERT_DIRECTORY_PATH="src/dlt/gateway/keys/cert.pem" +# export TLS_CERT_PATH="src/dlt/gateway/keys/ca.crt" +#fi + +# Uncomment to activate QKD App +# To manage QKD Apps, "service" requires "qkd_app" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "qkd_app" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} qkd_app service ${AFTER}" +#fi + +# Uncomment to activate Load Generator +#export TFS_COMPONENTS="${TFS_COMPONENTS} load_generator" + + +# Set the tag you want to use for your images. +export TFS_IMAGE_TAG="dev" + +# Set the name of the Kubernetes namespace to deploy TFS to. +export TFS_K8S_NAMESPACE="tfs" + +# Set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" + +# Uncomment to monitor performance of components +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml" + +# Uncomment when deploying Optical CyberSecurity +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml" + +# Set the new Grafana admin password +export TFS_GRAFANA_PASSWORD="admin123+" + +# Disable skip-build flag to rebuild the Docker images. +export TFS_SKIP_BUILD="" + + +# ----- CockroachDB ------------------------------------------------------------ + +# Set the namespace where CockroackDB will be deployed. +export CRDB_NAMESPACE="crdb" + +# Set the external port CockroackDB Postgre SQL interface will be exposed to. +export CRDB_EXT_PORT_SQL="26257" + +# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to. +export CRDB_EXT_PORT_HTTP="8081" + +# Set the database username to be used by Context. +export CRDB_USERNAME="tfs" + +# Set the database user's password to be used by Context. +export CRDB_PASSWORD="tfs123" + +# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/crdb.sh for additional details +export CRDB_DEPLOY_MODE="single" + +# Disable flag for dropping database, if it exists. +export CRDB_DROP_DATABASE_IF_EXISTS="YES" + +# Disable flag for re-deploying CockroachDB from scratch. +export CRDB_REDEPLOY="" + + +# ----- NATS ------------------------------------------------------------------- + +# Set the namespace where NATS will be deployed. +export NATS_NAMESPACE="nats" + +# Set the external port NATS Client interface will be exposed to. +export NATS_EXT_PORT_CLIENT="4222" + +# Set the external port NATS HTTP Mgmt GUI interface will be exposed to. +export NATS_EXT_PORT_HTTP="8222" + +# Set NATS installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/nats.sh for additional details +export NATS_DEPLOY_MODE="single" + +# Disable flag for re-deploying NATS from scratch. +export NATS_REDEPLOY="" + + +# ----- QuestDB ---------------------------------------------------------------- + +# Set the namespace where QuestDB will be deployed. +export QDB_NAMESPACE="qdb" + +# Set the external port QuestDB Postgre SQL interface will be exposed to. +export QDB_EXT_PORT_SQL="8812" + +# Set the external port QuestDB Influx Line Protocol interface will be exposed to. +export QDB_EXT_PORT_ILP="9009" + +# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to. +export QDB_EXT_PORT_HTTP="9000" + +# Set the database username to be used for QuestDB. +export QDB_USERNAME="admin" + +# Set the database user's password to be used for QuestDB. +export QDB_PASSWORD="quest" + +# Set the table name to be used by Monitoring for KPIs. +export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis" + +# Set the table name to be used by Slice for plotting groups. +export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups" + +# Disable flag for dropping tables if they exist. +export QDB_DROP_TABLES_IF_EXIST="YES" + +# Disable flag for re-deploying QuestDB from scratch. +export QDB_REDEPLOY="" + + +# ----- K8s Observability ------------------------------------------------------ + +# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to. +export PROM_EXT_PORT_HTTP="9090" + +# Set the external port Grafana HTTP Dashboards will be exposed to. +export GRAF_EXT_PORT_HTTP="3000" + + +# ----- Apache Kafka ----------------------------------------------------------- + +# Set the namespace where Apache Kafka will be deployed. +export KFK_NAMESPACE="kafka" + +# Set the port Apache Kafka server will be exposed to. +export KFK_SERVER_PORT="9092" + +# Set the flag to YES for redeploying of Apache Kafka +export KFK_REDEPLOY="" diff --git a/src/tests/ryu-openflow/docker-compose.yml b/src/tests/ryu-openflow/docker-compose.yml new file mode 100644 index 0000000000000000000000000000000000000000..adb37e61d6b92744529fbe7fd8028e76f503d99a --- /dev/null +++ b/src/tests/ryu-openflow/docker-compose.yml @@ -0,0 +1,20 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +services: + ryu: + network_mode: host + build: + context: . + dockerfile: Ryu.Dockerfile diff --git a/src/tests/ryu-openflow/redeploy-tfs.sh b/src/tests/ryu-openflow/redeploy-tfs.sh new file mode 100755 index 0000000000000000000000000000000000000000..7b1b26750b670e3d2e90306876056f6261dd9cf1 --- /dev/null +++ b/src/tests/ryu-openflow/redeploy-tfs.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source ~/tfs-ctrl/src/tests/ryu-openflow/deploy_specs.sh +./deploy/all.sh diff --git a/src/tests/ryu-openflow/requirements.in b/src/tests/ryu-openflow/requirements.in new file mode 100644 index 0000000000000000000000000000000000000000..d733b0adb03957eb23b685c8a901cdc4b3a6d8d6 --- /dev/null +++ b/src/tests/ryu-openflow/requirements.in @@ -0,0 +1,15 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +requests==2.27.* diff --git a/src/tests/ryu-openflow/scripts/run-cleanup.sh b/src/tests/ryu-openflow/scripts/run-cleanup.sh new file mode 100755 index 0000000000000000000000000000000000000000..042511da1c5aad21bb026109ffb1cc50140a1ee0 --- /dev/null +++ b/src/tests/ryu-openflow/scripts/run-cleanup.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source /var/teraflow/tfs_runtime_env_vars.sh +export PYTHONPATH=/var/teraflow +pytest --verbose --log-level=INFO \ + --junitxml=/opt/results/report_cleanup.xml \ + /var/teraflow/tests/ryu-openflow/tests/test_cleanup.py diff --git a/src/tests/ryu-openflow/scripts/run-onboarding.sh b/src/tests/ryu-openflow/scripts/run-onboarding.sh new file mode 100755 index 0000000000000000000000000000000000000000..6023b1e5a886fcd668e3a16b99235853bbf8f12d --- /dev/null +++ b/src/tests/ryu-openflow/scripts/run-onboarding.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source /var/teraflow/tfs_runtime_env_vars.sh +export PYTHONPATH=/var/teraflow +pytest --verbose --log-level=INFO \ + --junitxml=/opt/results/report_onboarding.xml \ + /var/teraflow/tests/ryu-openflow/tests/test_onboarding.py diff --git a/src/tests/ryu-openflow/scripts/run-service-ietf-create.sh b/src/tests/ryu-openflow/scripts/run-service-ietf-create.sh new file mode 100755 index 0000000000000000000000000000000000000000..11a23c74a7330882e57eba75485396ac37e60c54 --- /dev/null +++ b/src/tests/ryu-openflow/scripts/run-service-ietf-create.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source /var/teraflow/tfs_runtime_env_vars.sh +export PYTHONPATH=/var/teraflow +pytest --verbose --log-level=INFO \ + --junitxml=/opt/results/report_service_ietf_create.xml \ + /var/teraflow/tests/ryu-openflow/tests/test_service_ietf_create.py diff --git a/src/tests/ryu-openflow/scripts/run-service-ietf-remove.sh b/src/tests/ryu-openflow/scripts/run-service-ietf-remove.sh new file mode 100755 index 0000000000000000000000000000000000000000..7532cd9cc40264aae1b3700467e2b4fbb6154589 --- /dev/null +++ b/src/tests/ryu-openflow/scripts/run-service-ietf-remove.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source /var/teraflow/tfs_runtime_env_vars.sh +export PYTHONPATH=/var/teraflow +pytest --verbose --log-level=INFO \ + --junitxml=/opt/results/report_service_ietf_remove.xml \ + /var/teraflow/tests/ryu-openflow/tests/test_service_ietf_remove.py diff --git a/src/tests/ryu-openflow/tests/Fixtures.py b/src/tests/ryu-openflow/tests/Fixtures.py new file mode 100644 index 0000000000000000000000000000000000000000..15978851faae668339fa4eed6db8ab7e1be2eb5e --- /dev/null +++ b/src/tests/ryu-openflow/tests/Fixtures.py @@ -0,0 +1,43 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from monitoring.client.MonitoringClient import MonitoringClient +from service.client.ServiceClient import ServiceClient + +@pytest.fixture(scope='session') +def context_client(): + _client = ContextClient() + yield _client + _client.close() + +@pytest.fixture(scope='session') +def device_client(): + _client = DeviceClient() + yield _client + _client.close() + +@pytest.fixture(scope='session') +def monitoring_client(): + _client = MonitoringClient() + yield _client + _client.close() + +@pytest.fixture(scope='session') +def service_client(): + _client = ServiceClient() + yield _client + _client.close() diff --git a/src/tests/ryu-openflow/tests/Tools.py b/src/tests/ryu-openflow/tests/Tools.py new file mode 100644 index 0000000000000000000000000000000000000000..f6ecf468477307572a8043e75833e5a9f925405b --- /dev/null +++ b/src/tests/ryu-openflow/tests/Tools.py @@ -0,0 +1,109 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum, logging, requests +from typing import Any, Dict, List, Optional, Set, Union +from common.Constants import ServiceNameEnum +from common.Settings import get_service_host, get_service_port_http + +NBI_ADDRESS = get_service_host(ServiceNameEnum.NBI) +NBI_PORT = get_service_port_http(ServiceNameEnum.NBI) +NBI_USERNAME = 'admin' +NBI_PASSWORD = 'admin' +NBI_BASE_URL = '' + +class RestRequestMethod(enum.Enum): + GET = 'get' + POST = 'post' + PUT = 'put' + PATCH = 'patch' + DELETE = 'delete' + +EXPECTED_STATUS_CODES : Set[int] = { + requests.codes['OK' ], + requests.codes['CREATED' ], + requests.codes['ACCEPTED' ], + requests.codes['NO_CONTENT'], +} + +def do_rest_request( + method : RestRequestMethod, url : str, body : Optional[Any] = None, timeout : int = 10, + allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, + logger : Optional[logging.Logger] = None +) -> Optional[Union[Dict, List]]: + request_url = 'http://{:s}:{:s}@{:s}:{:d}{:s}{:s}'.format( + NBI_USERNAME, NBI_PASSWORD, NBI_ADDRESS, NBI_PORT, str(NBI_BASE_URL), url + ) + + if logger is not None: + msg = 'Request: {:s} {:s}'.format(str(method.value).upper(), str(request_url)) + if body is not None: msg += ' body={:s}'.format(str(body)) + logger.warning(msg) + reply = requests.request(method.value, request_url, timeout=timeout, json=body, allow_redirects=allow_redirects) + if logger is not None: + logger.warning('Reply: {:s}'.format(str(reply.text))) + assert reply.status_code in expected_status_codes, 'Reply failed with status code {:d}'.format(reply.status_code) + + if reply.content and len(reply.content) > 0: return reply.json() + return None + +def do_rest_get_request( + url : str, body : Optional[Any] = None, timeout : int = 10, + allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, + logger : Optional[logging.Logger] = None +) -> Optional[Union[Dict, List]]: + return do_rest_request( + RestRequestMethod.GET, url, body=body, timeout=timeout, allow_redirects=allow_redirects, + expected_status_codes=expected_status_codes, logger=logger + ) + +def do_rest_post_request( + url : str, body : Optional[Any] = None, timeout : int = 10, + allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, + logger : Optional[logging.Logger] = None +) -> Optional[Union[Dict, List]]: + return do_rest_request( + RestRequestMethod.POST, url, body=body, timeout=timeout, allow_redirects=allow_redirects, + expected_status_codes=expected_status_codes, logger=logger + ) + +def do_rest_put_request( + url : str, body : Optional[Any] = None, timeout : int = 10, + allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, + logger : Optional[logging.Logger] = None +) -> Optional[Union[Dict, List]]: + return do_rest_request( + RestRequestMethod.PUT, url, body=body, timeout=timeout, allow_redirects=allow_redirects, + expected_status_codes=expected_status_codes, logger=logger + ) + +def do_rest_patch_request( + url : str, body : Optional[Any] = None, timeout : int = 10, + allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, + logger : Optional[logging.Logger] = None +) -> Optional[Union[Dict, List]]: + return do_rest_request( + RestRequestMethod.PATCH, url, body=body, timeout=timeout, allow_redirects=allow_redirects, + expected_status_codes=expected_status_codes, logger=logger + ) + +def do_rest_delete_request( + url : str, body : Optional[Any] = None, timeout : int = 10, + allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, + logger : Optional[logging.Logger] = None +) -> Optional[Union[Dict, List]]: + return do_rest_request( + RestRequestMethod.DELETE, url, body=body, timeout=timeout, allow_redirects=allow_redirects, + expected_status_codes=expected_status_codes, logger=logger + ) diff --git a/src/tests/ryu-openflow/tests/__init__.py b/src/tests/ryu-openflow/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..53d5157f750bfb085125cbd33faff1cec5924e14 --- /dev/null +++ b/src/tests/ryu-openflow/tests/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/tests/ryu-openflow/tests/test_cleanup.py b/src/tests/ryu-openflow/tests/test_cleanup.py new file mode 100644 index 0000000000000000000000000000000000000000..0ee3e7ed3b3b0b11c58025a7ba305e529b053c1a --- /dev/null +++ b/src/tests/ryu-openflow/tests/test_cleanup.py @@ -0,0 +1,44 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, os +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import ContextId +from common.tools.descriptor.Loader import DescriptorLoader, validate_empty_scenario +from common.tools.object_factory.Context import json_context_id +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from .Fixtures import context_client, device_client # pylint: disable=unused-import + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'tfs-topology.json') +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) + +def test_scenario_cleanup( + context_client : ContextClient, # pylint: disable=redefined-outer-name + device_client : DeviceClient, # pylint: disable=redefined-outer-name +) -> None: + # Verify the scenario has no services/slices + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 0 + assert len(response.slice_ids) == 0 + + # Load descriptors and validate the base scenario + descriptor_loader = DescriptorLoader( + descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client) + descriptor_loader.validate() + descriptor_loader.unload() + validate_empty_scenario(context_client) diff --git a/src/tests/ryu-openflow/tests/test_onboarding.py b/src/tests/ryu-openflow/tests/test_onboarding.py new file mode 100644 index 0000000000000000000000000000000000000000..0eb5e6e885b1dfa896bf2197a529dac35033ac65 --- /dev/null +++ b/src/tests/ryu-openflow/tests/test_onboarding.py @@ -0,0 +1,67 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, os, time +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import ContextId, DeviceOperationalStatusEnum, Empty +from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results, validate_empty_scenario +from common.tools.object_factory.Context import json_context_id +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from .Fixtures import context_client, device_client # pylint: disable=unused-import + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'tfs-topology.json') +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) + +def test_scenario_onboarding( + context_client : ContextClient, # pylint: disable=redefined-outer-name + device_client : DeviceClient, # pylint: disable=redefined-outer-name +) -> None: + validate_empty_scenario(context_client) + + descriptor_loader = DescriptorLoader( + descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client) + results = descriptor_loader.process() + check_descriptor_load_results(results, descriptor_loader) + descriptor_loader.validate() + + # Verify the scenario has no services/slices + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 0 + assert len(response.slice_ids) == 0 + +def test_scenario_devices_enabled( + context_client : ContextClient, # pylint: disable=redefined-outer-name +) -> None: + """ + This test validates that the devices are enabled. + """ + DEVICE_OP_STATUS_ENABLED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED + + num_devices = -1 + num_devices_enabled, num_retry = 0, 0 + while (num_devices != num_devices_enabled) and (num_retry < 10): + time.sleep(1.0) + response = context_client.ListDevices(Empty()) + num_devices = len(response.devices) + num_devices_enabled = 0 + for device in response.devices: + if device.device_operational_status != DEVICE_OP_STATUS_ENABLED: continue + num_devices_enabled += 1 + LOGGER.info('Num Devices enabled: {:d}/{:d}'.format(num_devices_enabled, num_devices)) + num_retry += 1 + assert num_devices_enabled == num_devices diff --git a/src/tests/ryu-openflow/tests/test_service_ietf_create.py b/src/tests/ryu-openflow/tests/test_service_ietf_create.py new file mode 100644 index 0000000000000000000000000000000000000000..83bcedf266d2c48f6734f6c23d9914cc673be970 --- /dev/null +++ b/src/tests/ryu-openflow/tests/test_service_ietf_create.py @@ -0,0 +1,71 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging, os +from typing import Dict +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import ContextId, ServiceStatusEnum, ServiceTypeEnum +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from context.client.ContextClient import ContextClient +from .Fixtures import context_client # pylint: disable=unused-import +from .Tools import do_rest_get_request, do_rest_post_request + + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +REQUEST_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'ietf-l3vpn-service.json') +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) + + +# pylint: disable=redefined-outer-name, unused-argument +def test_service_ietf_creation( + context_client : ContextClient, +): + # Issue service creation request + with open(REQUEST_FILE, 'r', encoding='UTF-8') as f: + svc1_data = json.load(f) + URL = '/restconf/data/ietf-l3vpn-svc:l3vpn-svc/vpn-services' + do_rest_post_request(URL, body=svc1_data, logger=LOGGER, expected_status_codes={201}) + vpn_id = svc1_data['ietf-l3vpn-svc:l3vpn-svc']['vpn-services']['vpn-service'][0]['vpn-id'] + + URL = '/restconf/data/ietf-l3vpn-svc:l3vpn-svc/vpn-services/vpn-service={:s}/'.format(vpn_id) + service_data = do_rest_get_request(URL, logger=LOGGER, expected_status_codes={200}) + service_uuid = service_data['service-id'] + + # Verify service was created + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 1 + assert len(response.slice_ids) == 0 + + # Check there is 1 service + response = context_client.ListServices(ADMIN_CONTEXT_ID) + LOGGER.warning('Services[{:d}] = {:s}'.format( + len(response.services), grpc_message_to_json_string(response) + )) + assert len(response.services) == 1 + + for service in response.services: + service_id = service.service_id + assert service_id.service_uuid.uuid == service_uuid + assert service.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE + assert service.service_type == ServiceTypeEnum.SERVICETYPE_L3NM + + response = context_client.ListConnections(service_id) + LOGGER.warning(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( + grpc_message_to_json_string(service_id), len(response.connections), + grpc_message_to_json_string(response) + )) + assert len(response.connections) == 1 diff --git a/src/tests/ryu-openflow/tests/test_service_ietf_remove.py b/src/tests/ryu-openflow/tests/test_service_ietf_remove.py new file mode 100644 index 0000000000000000000000000000000000000000..fa466524b956e9f33cc669b8c0f49694ed6c7513 --- /dev/null +++ b/src/tests/ryu-openflow/tests/test_service_ietf_remove.py @@ -0,0 +1,77 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, os +from typing import Dict, Set, Tuple +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import ContextId, ServiceStatusEnum, ServiceTypeEnum +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from context.client.ContextClient import ContextClient +from .Fixtures import context_client # pylint: disable=unused-import +from .Tools import do_rest_delete_request + + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +REQUEST_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'ietf-l3vpn-service.json') +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) + + +# pylint: disable=redefined-outer-name, unused-argument +def test_service_ietf_removal( + context_client : ContextClient, # pylint: disable=redefined-outer-name +): + # Verify the scenario has 1 service and 0 slices + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 1 + assert len(response.slice_ids) == 0 + + # Check there are no slices + response = context_client.ListSlices(ADMIN_CONTEXT_ID) + LOGGER.warning('Slices[{:d}] = {:s}'.format(len(response.slices), grpc_message_to_json_string(response))) + assert len(response.slices) == 0 + + # Check there is 1 service + response = context_client.ListServices(ADMIN_CONTEXT_ID) + LOGGER.warning('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) + assert len(response.services) == 1 + + service_uuids : Set[str] = set() + for service in response.services: + service_id = service.service_id + assert service.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE + assert service.service_type == ServiceTypeEnum.SERVICETYPE_L3NM + + response = context_client.ListConnections(service_id) + LOGGER.warning(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( + grpc_message_to_json_string(service_id), len(response.connections), + grpc_message_to_json_string(response) + )) + assert len(response.connections) == 1 + + service_uuids.add(service_id.service_uuid.uuid) + + # Identify service to delete + assert len(service_uuids) == 1 + service_uuid = set(service_uuids).pop() + + URL = '/restconf/data/ietf-l3vpn-svc:l3vpn-svc/vpn-services/vpn-service={:s}/'.format(service_uuid) + do_rest_delete_request(URL, logger=LOGGER, expected_status_codes={204}) + + # Verify the scenario has no services/slices + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 0 + assert len(response.slice_ids) == 0 diff --git a/src/webui/service/static/topology_icons/openflow-ryu-controller.png b/src/webui/service/static/topology_icons/openflow-ryu-controller.png new file mode 100644 index 0000000000000000000000000000000000000000..2982c57308983b367f1fa13c559fb702edcbadfe Binary files /dev/null and b/src/webui/service/static/topology_icons/openflow-ryu-controller.png differ