diff --git a/hackfest/mock_osm/__main__.py b/hackfest/mock_osm/__main__.py index 8d4d101118aeb81bcb0d3882affce354b644102d..28227f47b9cf3eaa7980e445d2d09cac0d26d302 100644 --- a/hackfest/mock_osm/__main__.py +++ b/hackfest/mock_osm/__main__.py @@ -15,6 +15,7 @@ import cmd, logging from .MockOSM import MockOSM +logging.basicConfig(level=logging.DEBUG) LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) diff --git a/src/device/service/driver_api/DriverFactory.py b/src/device/service/driver_api/DriverFactory.py index 38ae0ac562bce400ce4fd57d06c93d09a682a3a0..fefa3cd919bf3ec13302c13b8cac703a5faf948c 100644 --- a/src/device/service/driver_api/DriverFactory.py +++ b/src/device/service/driver_api/DriverFactory.py @@ -14,8 +14,7 @@ import logging from enum import Enum -from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Type -from ._Driver import _Driver +from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tuple, Type from .Exceptions import ( AmbiguousFilterException, EmptyFilterFieldException, UnsatisfiedFilterException, UnsupportedDriverClassException, @@ -23,12 +22,20 @@ from .Exceptions import ( ) from .FilterFields import FILTER_FIELD_ALLOWED_VALUES, FilterFieldEnum +if TYPE_CHECKING: + from ._Driver import _Driver + LOGGER = logging.getLogger(__name__) SUPPORTED_FILTER_FIELDS = set(FILTER_FIELD_ALLOWED_VALUES.keys()) +def check_is_class_valid(driver_class : Type['_Driver']) -> None: + from ._Driver import _Driver + if not issubclass(driver_class, _Driver): + raise UnsupportedDriverClassException(str(driver_class)) + def sanitize_filter_fields( filter_fields : Dict[FilterFieldEnum, Any], driver_name : Optional[str] = None ) -> Dict[FilterFieldEnum, Any]: @@ -67,14 +74,13 @@ def sanitize_filter_fields( class DriverFactory: def __init__( - self, drivers : List[Tuple[Type[_Driver], List[Dict[FilterFieldEnum, Any]]]] + self, drivers : List[Tuple[Type['_Driver'], List[Dict[FilterFieldEnum, Any]]]] ) -> None: - self.__drivers : List[Tuple[Type[_Driver], Dict[FilterFieldEnum, Any]]] = list() + self.__drivers : List[Tuple[Type['_Driver'], Dict[FilterFieldEnum, Any]]] = list() for driver_class,filter_field_sets in drivers: - #if not issubclass(driver_class, _Driver): - # raise UnsupportedDriverClassException(str(driver_class)) - driver_name = driver_class #.__name__ + check_is_class_valid(driver_class) + driver_name = driver_class.__name__ for filter_fields in filter_field_sets: filter_fields = {k.value:v for k,v in filter_fields.items()} @@ -86,7 +92,7 @@ class DriverFactory: def is_driver_compatible( self, driver_filter_fields : Dict[FilterFieldEnum, Any], - selection_filter_fields : Dict[FilterFieldEnum, Any] + selection_filter_fields : Dict[FilterFieldEnum, Any] ) -> bool: # by construction empty driver_filter_fields are not allowed # by construction empty selection_filter_fields are not allowed @@ -102,7 +108,7 @@ class DriverFactory: return True - def get_driver_class(self, **selection_filter_fields) -> _Driver: + def get_driver_class(self, **selection_filter_fields) -> '_Driver': sanitized_filter_fields = sanitize_filter_fields(selection_filter_fields) compatible_drivers : List[Tuple[Type[_Driver], Dict[FilterFieldEnum, Any]]] = [ diff --git a/src/device/service/driver_api/Exceptions.py b/src/device/service/driver_api/Exceptions.py index 8f33ebc57d040b3906997cfbc73a1d79a02dab41..86c2afcef1833efa0311278318388ebe930abbf5 100644 --- a/src/device/service/driver_api/Exceptions.py +++ b/src/device/service/driver_api/Exceptions.py @@ -13,22 +13,22 @@ # limitations under the License. class UnsatisfiedFilterException(Exception): - def __init__(self, filter_fields): + def __init__(self, filter_fields) -> None: msg = 'No Driver satisfies FilterFields({:s})' super().__init__(msg.format(str(filter_fields))) class AmbiguousFilterException(Exception): - def __init__(self, filter_fields, compatible_drivers): + def __init__(self, filter_fields, compatible_drivers) -> None: msg = 'Multiple Drivers satisfy FilterFields({:s}): {:s}' super().__init__(msg.format(str(filter_fields), str(compatible_drivers))) class UnsupportedDriverClassException(Exception): - def __init__(self, driver_class_name): + def __init__(self, driver_class_name) -> None: msg = 'Class({:s}) is not a subclass of _Driver' super().__init__(msg.format(str(driver_class_name))) class EmptyFilterFieldException(Exception): - def __init__(self, filter_fields, driver_class_name=None): + def __init__(self, filter_fields, driver_class_name=None) -> None: if driver_class_name: msg = 'Empty FilterField({:s}) specified by Driver({:s}) is not supported' msg = msg.format(str(filter_fields), str(driver_class_name)) @@ -38,7 +38,7 @@ class EmptyFilterFieldException(Exception): super().__init__(msg) class UnsupportedFilterFieldException(Exception): - def __init__(self, unsupported_filter_fields, driver_class_name=None): + def __init__(self, unsupported_filter_fields, driver_class_name=None) -> None: if driver_class_name: msg = 'FilterFields({:s}) specified by Driver({:s}) are not supported' msg = msg.format(str(unsupported_filter_fields), str(driver_class_name)) @@ -48,7 +48,9 @@ class UnsupportedFilterFieldException(Exception): super().__init__(msg) class UnsupportedFilterFieldValueException(Exception): - def __init__(self, filter_field_name, filter_field_value, allowed_filter_field_values, driver_class_name=None): + def __init__( + self, filter_field_name, filter_field_value, allowed_filter_field_values, driver_class_name=None + ) -> None: if driver_class_name: msg = 'FilterField({:s}={:s}) specified by Driver({:s}) is not supported. Allowed values are {:s}' msg = msg.format( @@ -60,24 +62,24 @@ class UnsupportedFilterFieldValueException(Exception): super().__init__(msg) class DriverInstanceCacheTerminatedException(Exception): - def __init__(self): + def __init__(self) -> None: msg = 'DriverInstanceCache is terminated. No new instances can be processed.' super().__init__(msg) class UnsupportedResourceKeyException(Exception): - def __init__(self, resource_key): + def __init__(self, resource_key) -> None: msg = 'ResourceKey({:s}) not supported' msg = msg.format(str(resource_key)) super().__init__(msg) class ConfigFieldNotFoundException(Exception): - def __init__(self, config_field_name): + def __init__(self, config_field_name) -> None: msg = 'ConfigField({:s}) not specified in resource' msg = msg.format(str(config_field_name)) super().__init__(msg) class ConfigFieldsNotSupportedException(Exception): - def __init__(self, config_fields): + def __init__(self, config_fields) -> None: msg = 'ConfigFields({:s}) not supported in resource' msg = msg.format(str(config_fields)) super().__init__(msg) diff --git a/src/device/service/drivers/gnmi_openconfig/handlers/InterfaceSwitchedVlan.py b/src/device/service/drivers/gnmi_openconfig/handlers/InterfaceSwitchedVlan.py new file mode 100644 index 0000000000000000000000000000000000000000..7a782b8816037ecaf25baa704bbed82fc6786b27 --- /dev/null +++ b/src/device/service/drivers/gnmi_openconfig/handlers/InterfaceSwitchedVlan.py @@ -0,0 +1,104 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging, re +from typing import Any, Dict, List, Tuple +from ._Handler import _Handler +from .Tools import get_str +from .YangHandler import YangHandler + +LOGGER = logging.getLogger(__name__) + +RE_IF_SWITCHED_VLAN = re.compile(r'^/interface\[(?:name=)?([^\]]+)\]/ethernet/switched-vlan$') + +class InterfaceSwitchedVlanHandler(_Handler): + def get_resource_key(self) -> str: return '/interface/ethernet/switched-vlan' + def get_path(self) -> str: return '/openconfig-interfaces:interfaces/interface/ethernet/switched-vlan' + + def _get_interface_name(self, resource_key : str, resource_value : Dict) -> str: + if 'name' in resource_value: + return get_str(resource_value, 'name') + if 'interface' in resource_value: + return get_str(resource_value, 'interface') + match = RE_IF_SWITCHED_VLAN.match(resource_key) + if match is None: + MSG = 'Interface name not found in resource_key={:s} resource_value={:s}' + raise Exception(MSG.format(str(resource_key), str(resource_value))) + return match.groups()[0] + + def _normalize_config(self, resource_value : Dict) -> Dict[str, Any]: + config = resource_value.get('config') + if isinstance(config, dict): + return config + + interface_mode = resource_value.get('interface-mode', resource_value.get('interface_mode')) + if interface_mode is None: + raise Exception('interface-mode is required for switched-vlan config') + interface_mode = str(interface_mode).upper() + + config = {'interface-mode': interface_mode} + if interface_mode == 'ACCESS': + access_vlan = resource_value.get('access-vlan', resource_value.get('access_vlan')) + if access_vlan is None: + raise Exception('access-vlan is required for ACCESS mode') + config['access-vlan'] = int(access_vlan) + elif interface_mode == 'TRUNK': + native_vlan = resource_value.get('native-vlan', resource_value.get('native_vlan', 1)) + config['native-vlan'] = int(native_vlan) + trunk_vlans = resource_value.get('trunk-vlans', resource_value.get('trunk_vlans')) + if trunk_vlans is None: + trunk_vlan = resource_value.get('trunk-vlan', resource_value.get('trunk_vlan')) + trunk_vlans = [trunk_vlan] if trunk_vlan is not None else [] + if not isinstance(trunk_vlans, list): + trunk_vlans = [trunk_vlans] + config['trunk-vlans'] = [int(vlan) for vlan in trunk_vlans if vlan is not None] + else: + raise Exception('Unsupported interface-mode: {:s}'.format(str(interface_mode))) + + return config + + def compose( + self, resource_key : str, resource_value : Dict, yang_handler : YangHandler, delete : bool = False + ) -> Tuple[str, str]: + if_name = self._get_interface_name(resource_key, resource_value) + str_path = '/interfaces/interface[name={:s}]/ethernet/switched-vlan'.format(if_name) + if delete: + return str_path, json.dumps({}) + + config = self._normalize_config(resource_value) + str_data = json.dumps({'config': config}) + return str_path, str_data + + def parse( + self, json_data : Dict, yang_handler : YangHandler + ) -> List[Tuple[str, Dict[str, Any]]]: + json_data_valid = yang_handler.parse_to_dict( + '/openconfig-interfaces:interfaces', json_data, fmt='json', strict=False + ) + + entries = [] + for interface in json_data_valid.get('interfaces', {}).get('interface', []): + interface_name = interface['name'] + ethernet = interface.get('ethernet', {}) + switched_vlan = ethernet.get('switched-vlan') + if switched_vlan is None: + continue + entry_key = '/interface[{:s}]/ethernet/switched-vlan'.format(interface_name) + entry_value = {} + if 'config' in switched_vlan: + entry_value['config'] = switched_vlan['config'] + if 'state' in switched_vlan: + entry_value['state'] = switched_vlan['state'] + entries.append((entry_key, entry_value)) + return entries diff --git a/src/device/service/drivers/gnmi_openconfig/handlers/Mpls.py b/src/device/service/drivers/gnmi_openconfig/handlers/Mpls.py new file mode 100644 index 0000000000000000000000000000000000000000..cadf35ce8ffff5281d0e5aaecebb8380eb000e40 --- /dev/null +++ b/src/device/service/drivers/gnmi_openconfig/handlers/Mpls.py @@ -0,0 +1,121 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging, re +from typing import Any, Dict, List, Tuple +from ._Handler import _Handler +from .Tools import get_int, get_str +from .YangHandler import YangHandler + +LOGGER = logging.getLogger(__name__) + +RE_MPLS_INTERFACE = re.compile(r'^/mpls/interface\[([^\]]+)\]$') +DEFAULT_NETWORK_INSTANCE = 'default' + +class MplsHandler(_Handler): + def get_resource_key(self) -> str: return '/mpls' + def get_path(self) -> str: + return '/openconfig-network-instance:network-instances/network-instance/mpls' + + def compose( + self, resource_key : str, resource_value : Dict, yang_handler : YangHandler, delete : bool = False + ) -> Tuple[str, str]: + """ + Compose MPLS (global or per-interface) configuration. + - Global: set LDP router-id (lsr-id) and optional hello timers. + - Interface: set LDP interface-id and optional hello timers. + """ + ni_name = get_str(resource_value, 'network_instance', DEFAULT_NETWORK_INSTANCE) + ni_type = get_str(resource_value, 'network_instance_type') + if ni_type is None and ni_name == DEFAULT_NETWORK_INSTANCE: + ni_type = 'openconfig-network-instance-types:DEFAULT_INSTANCE' + + yang_nis : Any = yang_handler.get_data_path('/openconfig-network-instance:network-instances') + yang_ni : Any = yang_nis.create_path('network-instance[name="{:s}"]'.format(ni_name)) + yang_ni.create_path('config/name', ni_name) + if ni_type is not None: + yang_ni.create_path('config/type', ni_type) + + match_if = RE_MPLS_INTERFACE.match(resource_key) + if delete: + if match_if: + if_name = match_if.group(1) + str_path = ( + '/network-instances/network-instance[name={:s}]/mpls/signaling-protocols/ldp' + '/interface-attributes/interfaces/interface[interface-id={:s}]' + ).format(ni_name, if_name) + else: + str_path = '/network-instances/network-instance[name={:s}]/mpls'.format(ni_name) + return str_path, json.dumps({}) + + if match_if: + if_name = match_if.group(1) + hello_interval = get_int(resource_value, 'hello_interval') + hello_holdtime = get_int(resource_value, 'hello_holdtime') + + path_if_base = ( + 'mpls/signaling-protocols/ldp/interface-attributes/interfaces' + '/interface[interface-id="{:s}"]/config' + ).format(if_name) + yang_ni.create_path('{:s}/interface-id'.format(path_if_base), if_name) + if hello_interval is not None: + yang_ni.create_path('{:s}/hello-interval'.format(path_if_base), hello_interval) + if hello_holdtime is not None: + yang_ni.create_path('{:s}/hello-holdtime'.format(path_if_base), hello_holdtime) + + yang_if : Any = yang_ni.find_path( + 'mpls/signaling-protocols/ldp/interface-attributes/interfaces' + '/interface[interface-id="{:s}"]'.format(if_name) + ) + + str_path = ( + '/network-instances/network-instance[name={:s}]/mpls/signaling-protocols/ldp' + '/interface-attributes/interfaces/interface[interface-id={:s}]' + ).format(ni_name, if_name) + json_data = json.loads(yang_if.print_mem('json')) + json_data = json_data['openconfig-network-instance:interface'][0] + str_data = json.dumps(json_data) + return str_path, str_data + + # Global LDP configuration + ldp_cfg = resource_value.get('ldp', resource_value) + lsr_id = get_str(ldp_cfg, 'lsr_id') + hello_interval = get_int(ldp_cfg, 'hello_interval') + hello_holdtime = get_int(ldp_cfg, 'hello_holdtime') + + if lsr_id is not None: + yang_ni.create_path('mpls/signaling-protocols/ldp/global/config/lsr-id', lsr_id) + if hello_interval is not None: + yang_ni.create_path( + 'mpls/signaling-protocols/ldp/interface-attributes/config/hello-interval', hello_interval + ) + if hello_holdtime is not None: + yang_ni.create_path( + 'mpls/signaling-protocols/ldp/interface-attributes/config/hello-holdtime', hello_holdtime + ) + + yang_ldp : Any = yang_ni.find_path('mpls/signaling-protocols/ldp') + + str_path = '/network-instances/network-instance[name={:s}]/mpls/signaling-protocols/ldp'.format(ni_name) + json_data = json.loads(yang_ldp.print_mem('json')) + json_data = json_data['openconfig-network-instance:ldp'] + str_data = json.dumps(json_data) + return str_path, str_data + + def parse( + self, json_data : Dict, yang_handler : YangHandler + ) -> List[Tuple[str, Dict[str, Any]]]: + LOGGER.debug('[parse] json_data = %s', json.dumps(json_data)) + # Not required for current tests (L2VPN validation focuses on SetConfig). + return [] diff --git a/src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstanceConnectionPoint.py b/src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstanceConnectionPoint.py new file mode 100644 index 0000000000000000000000000000000000000000..3ff259c5dd0b8e82d7aa0f112826a79c0312fe35 --- /dev/null +++ b/src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstanceConnectionPoint.py @@ -0,0 +1,57 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging +from typing import Any, Dict, List, Tuple +from ._Handler import _Handler +from .Tools import get_str +from .YangHandler import YangHandler + +LOGGER = logging.getLogger(__name__) + +class NetworkInstanceConnectionPointHandler(_Handler): + def get_resource_key(self) -> str: return '/network_instance/connection_point' + def get_path(self) -> str: + return '/openconfig-network-instance:network-instances/network-instance/connection-points/connection-point' + + def compose( + self, resource_key : str, resource_value : Dict, yang_handler : YangHandler, delete : bool = False + ) -> Tuple[str, str]: + ni_name = get_str(resource_value, 'name') + cp_id = get_str(resource_value, 'connection_point_id') + + str_path = ( + '/network-instances/network-instance[name={:s}]/connection-points' + '/connection-point[connection-point-id={:s}]' + ).format(ni_name, cp_id) + if delete: + return str_path, json.dumps({}) + + yang_nis : Any = yang_handler.get_data_path('/openconfig-network-instance:network-instances') + path_cp_base = ( + 'network-instance[name="{:s}"]/connection-points' + '/connection-point[connection-point-id="{:s}"]' + ).format(ni_name, cp_id) + yang_nis.create_path('{:s}/config/connection-point-id'.format(path_cp_base), cp_id) + + yang_cp : Any = yang_nis.find_path(path_cp_base) + json_data = json.loads(yang_cp.print_mem('json')) + json_data = json_data['openconfig-network-instance:connection-point'][0] + return str_path, json.dumps(json_data) + + def parse( + self, json_data : Dict, yang_handler : YangHandler + ) -> List[Tuple[str, Dict[str, Any]]]: + LOGGER.debug('[parse] json_data = %s', json.dumps(json_data)) + return [] diff --git a/src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstanceEndpoint.py b/src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstanceEndpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..81ffe524e77843348c140c897a88d5fa0e96f39d --- /dev/null +++ b/src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstanceEndpoint.py @@ -0,0 +1,89 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging +from typing import Any, Dict, List, Tuple +from ._Handler import _Handler +from .Tools import get_int, get_str +from .YangHandler import YangHandler + +LOGGER = logging.getLogger(__name__) + +class NetworkInstanceEndpointHandler(_Handler): + def get_resource_key(self) -> str: return '/network_instance/connection_point/endpoint' + def get_path(self) -> str: + return '/openconfig-network-instance:network-instances/network-instance/connection-points/connection-point/endpoints/endpoint' + + def compose( + self, resource_key : str, resource_value : Dict, yang_handler : YangHandler, delete : bool = False + ) -> Tuple[str, str]: + ni_name = get_str(resource_value, 'name') + cp_id = get_str(resource_value, 'connection_point_id') + ep_id = get_str(resource_value, 'endpoint_id') + ep_type = get_str(resource_value, 'type') + precedence = get_int(resource_value, 'precedence') + + str_path = ( + '/network-instances/network-instance[name={:s}]/connection-points/connection-point' + '[connection-point-id={:s}]/endpoints/endpoint[endpoint-id={:s}]' + ).format(ni_name, cp_id, ep_id) + if delete: + return str_path, json.dumps({}) + + if ep_type is not None and ':' not in ep_type: + ep_type = 'openconfig-network-instance-types:{:s}'.format(ep_type) + + yang_nis : Any = yang_handler.get_data_path('/openconfig-network-instance:network-instances') + path_ep_base = ( + 'network-instance[name="{:s}"]/connection-points/connection-point[connection-point-id="{:s}"]' + '/endpoints/endpoint[endpoint-id="{:s}"]' + ).format(ni_name, cp_id, ep_id) + yang_nis.create_path('{:s}/config/endpoint-id'.format(path_ep_base), ep_id) + if ep_type is not None: + yang_nis.create_path('{:s}/config/type'.format(path_ep_base), ep_type) + if precedence is not None: + yang_nis.create_path('{:s}/config/precedence'.format(path_ep_base), precedence) + + if ep_type and ep_type.endswith('LOCAL'): + if_name = get_str(resource_value, 'interface') + sif_index = get_int(resource_value, 'subinterface', 0) + if if_name is not None: + yang_nis.create_path('{:s}/local/config/interface'.format(path_ep_base), if_name) + yang_nis.create_path('{:s}/local/config/subinterface'.format(path_ep_base), sif_index) + site_id = get_int(resource_value, 'site_id') + if site_id is not None: + yang_nis.create_path('{:s}/local/config/site-id'.format(path_ep_base), site_id) + elif ep_type and ep_type.endswith('REMOTE'): + remote_system = get_str(resource_value, 'remote_system') + vc_id = get_int(resource_value, 'virtual_circuit_id') + if remote_system is not None: + yang_nis.create_path('{:s}/remote/config/remote-system'.format(path_ep_base), remote_system) + if vc_id is not None: + yang_nis.create_path( + '{:s}/remote/config/virtual-circuit-identifier'.format(path_ep_base), vc_id + ) + site_id = get_int(resource_value, 'site_id') + if site_id is not None: + yang_nis.create_path('{:s}/remote/config/site-id'.format(path_ep_base), site_id) + + yang_ep : Any = yang_nis.find_path(path_ep_base) + json_data = json.loads(yang_ep.print_mem('json')) + json_data = json_data['openconfig-network-instance:endpoint'][0] + return str_path, json.dumps(json_data) + + def parse( + self, json_data : Dict, yang_handler : YangHandler + ) -> List[Tuple[str, Dict[str, Any]]]: + LOGGER.debug('[parse] json_data = %s', json.dumps(json_data)) + return [] diff --git a/src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstanceVlan.py b/src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstanceVlan.py new file mode 100644 index 0000000000000000000000000000000000000000..a9e3fb0092ba26106873b5e3793ea28a311f795b --- /dev/null +++ b/src/device/service/drivers/gnmi_openconfig/handlers/NetworkInstanceVlan.py @@ -0,0 +1,66 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging +from typing import Any, Dict, List, Tuple, Union +from ._Handler import _Handler +from .Tools import get_int, get_str +from .YangHandler import YangHandler + +LOGGER = logging.getLogger(__name__) + +class NetworkInstanceVlanHandler(_Handler): + def get_resource_key(self) -> str: return '/network_instance/vlan' + def get_path(self) -> str: + return '/openconfig-network-instance:network-instances/network-instance/vlans/vlan' + + def compose( + self, resource_key : str, resource_value : Dict, yang_handler : YangHandler, delete : bool = False + ) -> Tuple[str, str]: + ni_name = get_str(resource_value, 'name', 'default') + vlan_id = get_int(resource_value, 'vlan_id') + vlan_name = get_str(resource_value, 'vlan_name') + str_path = '/network-instances/network-instance[name={:s}]/vlans/vlan[vlan-id={:d}]'.format( + ni_name, vlan_id + ) + if delete: + yang_nis : Any = yang_handler.get_data_path('/openconfig-network-instance:network-instances') + yang_vlan = yang_nis.find_path('network-instance[name="{:s}"]/vlans/vlan[vlan-id="{:d}"]'.format( + ni_name, vlan_id)) + if yang_vlan is not None: + yang_vlan.unlink() + yang_vlan.free() + return str_path, json.dumps({}) + + yang_nis : Any = yang_handler.get_data_path('/openconfig-network-instance:network-instances') + yang_ni : Any = yang_nis.create_path('network-instance[name="{:s}"]'.format(ni_name)) + yang_ni.create_path('config/name', ni_name) + if ni_name == 'default': + yang_ni.create_path('config/type', 'openconfig-network-instance-types:DEFAULT_INSTANCE') + + yang_vlans : Any = yang_ni.create_path('vlans') + yang_vlan : Any = yang_vlans.create_path('vlan[vlan-id="{:d}"]'.format(vlan_id)) + yang_vlan.create_path('config/vlan-id', vlan_id) + if vlan_name is not None: + yang_vlan.create_path('config/name', vlan_name) + + json_data = json.loads(yang_vlan.print_mem('json')) + json_data = json_data['openconfig-network-instance:vlan'][0] + return str_path, json.dumps(json_data) + + def parse( + self, json_data : Dict, yang_handler : YangHandler + ) -> List[Tuple[str, Dict[str, Any]]]: + LOGGER.debug('[parse] json_data = %s', json.dumps(json_data)) + return [] diff --git a/src/device/service/drivers/gnmi_openconfig/handlers/YangHandler.py b/src/device/service/drivers/gnmi_openconfig/handlers/YangHandler.py index 5e1ea3b4389b69eccfc89256218b35bf1c02aeb8..684e09efb1ba67eabeed341a1d65c6aab1a3b2fd 100644 --- a/src/device/service/drivers/gnmi_openconfig/handlers/YangHandler.py +++ b/src/device/service/drivers/gnmi_openconfig/handlers/YangHandler.py @@ -42,6 +42,8 @@ YANG_MODULES = [ 'openconfig-types', 'openconfig-policy-types', 'openconfig-mpls-types', + 'openconfig-mpls', + 'openconfig-mpls-ldp', 'openconfig-network-instance-types', 'openconfig-network-instance', 'openconfig-acl', diff --git a/src/device/service/drivers/gnmi_openconfig/handlers/__init__.py b/src/device/service/drivers/gnmi_openconfig/handlers/__init__.py index 3ce655353ecf78360931a008f7c69bb732749ea0..ab09a6c5446934d630735b82e80b2021b1328b4a 100644 --- a/src/device/service/drivers/gnmi_openconfig/handlers/__init__.py +++ b/src/device/service/drivers/gnmi_openconfig/handlers/__init__.py @@ -18,11 +18,16 @@ from device.service.driver_api._Driver import RESOURCE_ENDPOINTS, RESOURCE_INTER from ._Handler import _Handler from .Component import ComponentHandler from .Interface import InterfaceHandler +from .InterfaceSwitchedVlan import InterfaceSwitchedVlanHandler from .InterfaceCounter import InterfaceCounterHandler from .NetworkInstance import NetworkInstanceHandler from .NetworkInstanceInterface import NetworkInstanceInterfaceHandler from .NetworkInstanceProtocol import NetworkInstanceProtocolHandler from .NetworkInstanceStaticRoute import NetworkInstanceStaticRouteHandler +from .NetworkInstanceConnectionPoint import NetworkInstanceConnectionPointHandler +from .NetworkInstanceEndpoint import NetworkInstanceEndpointHandler +from .NetworkInstanceVlan import NetworkInstanceVlanHandler +from .Mpls import MplsHandler from .Acl import AclHandler from .Tools import get_schema from .YangHandler import YangHandler @@ -31,11 +36,16 @@ LOGGER = logging.getLogger(__name__) comph = ComponentHandler() ifaceh = InterfaceHandler() +ifsvh = InterfaceSwitchedVlanHandler() ifctrh = InterfaceCounterHandler() nih = NetworkInstanceHandler() niifh = NetworkInstanceInterfaceHandler() niph = NetworkInstanceProtocolHandler() nisrh = NetworkInstanceStaticRouteHandler() +nicph = NetworkInstanceConnectionPointHandler() +nieph = NetworkInstanceEndpointHandler() +nivlh = NetworkInstanceVlanHandler() +mplsh = MplsHandler() aclh = AclHandler() ALL_RESOURCE_KEYS = [ @@ -49,6 +59,11 @@ RESOURCE_KEY_MAPPER = { RESOURCE_ENDPOINTS : comph.get_resource_key(), RESOURCE_INTERFACES : ifaceh.get_resource_key(), RESOURCE_NETWORK_INSTANCES : nih.get_resource_key(), + '/interface' : ifaceh.get_resource_key(), + '/interface/ethernet/switched-vlan' : ifsvh.get_resource_key(), + '/mpls' : mplsh.get_resource_key(), + '/network_instance/vlan' : nivlh.get_resource_key(), + '/mpls/interface' : mplsh.get_resource_key(), RESOURCE_ACL : aclh.get_resource_key(), } @@ -56,29 +71,50 @@ PATH_MAPPER = { '/components' : comph.get_path(), '/components/component' : comph.get_path(), '/interfaces' : ifaceh.get_path(), + '/interfaces/interface/ethernet/switched-vlan' + : ifsvh.get_path(), '/network-instances' : nih.get_path(), + '/network-instances/network-instance/connection-points/connection-point' + : nicph.get_path(), + '/network-instances/network-instance/connection-points/connection-point/endpoints/endpoint' + : nieph.get_path(), + '/network-instances/network-instance/vlans/vlan' + : nivlh.get_path(), + '/mpls' : mplsh.get_path(), + '/network-instances/network-instance/mpls' + : mplsh.get_path(), '/acl' : aclh.get_path(), } RESOURCE_KEY_TO_HANDLER = { comph.get_resource_key() : comph, ifaceh.get_resource_key() : ifaceh, + ifsvh.get_resource_key() : ifsvh, ifctrh.get_resource_key() : ifctrh, nih.get_resource_key() : nih, niifh.get_resource_key() : niifh, niph.get_resource_key() : niph, nisrh.get_resource_key() : nisrh, + nicph.get_resource_key() : nicph, + nieph.get_resource_key() : nieph, + nivlh.get_resource_key() : nivlh, + mplsh.get_resource_key() : mplsh, aclh.get_resource_key() : aclh, } PATH_TO_HANDLER = { comph.get_path() : comph, ifaceh.get_path() : ifaceh, + ifsvh.get_path() : ifsvh, ifctrh.get_path() : ifctrh, nih.get_path() : nih, niifh.get_path() : niifh, niph.get_path() : niph, nisrh.get_path() : nisrh, + nicph.get_path() : nicph, + nieph.get_path() : nieph, + nivlh.get_path() : nivlh, + mplsh.get_path() : mplsh, aclh.get_path() : aclh, } diff --git a/src/device/tests/gnmi_openconfig/Dockerfile.l2vpn b/src/device/tests/gnmi_openconfig/Dockerfile.l2vpn new file mode 100644 index 0000000000000000000000000000000000000000..2270ed3ed2d6210950ebfab5bc0a0e7a93643e5c --- /dev/null +++ b/src/device/tests/gnmi_openconfig/Dockerfile.l2vpn @@ -0,0 +1,85 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Minimal reuse of device component Dockerfile steps to run L2VPN tests +FROM python:3.9-slim + +# Base deps + libyang build +RUN apt-get update -qq && apt-get install -y -qq wget g++ git build-essential cmake libpcre2-dev python3-dev python3-cffi && rm -rf /var/lib/apt/lists/* +RUN mkdir -p /var/libyang && git clone https://github.com/CESNET/libyang.git /var/libyang +WORKDIR /var/libyang +RUN git fetch && git checkout v2.1.148 && mkdir -p build +WORKDIR /var/libyang/build +RUN cmake -D CMAKE_BUILD_TYPE:String="Release" .. && make && make install && ldconfig + +ENV PYTHONUNBUFFERED=0 + +# Python toolchain +RUN python3 -m pip install --upgrade 'pip==25.2' +RUN python3 -m pip install --upgrade 'setuptools==79.0.0' 'wheel==0.45.1' +RUN python3 -m pip install --upgrade 'pip-tools==7.3.0' + +WORKDIR /var/teraflow +COPY common_requirements.in common_requirements.in +RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in +RUN python3 -m pip install -r common_requirements.txt + +# Common files + proto +WORKDIR /var/teraflow/common +COPY src/common/. ./ +RUN rm -rf proto +RUN mkdir -p /var/teraflow/common/proto +WORKDIR /var/teraflow/common/proto +RUN touch __init__.py +COPY proto/*.proto ./ +RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto +RUN rm *.proto +RUN find . -type f -exec sed -i -E 's/^(import\ .*)_pb2/from . \1_pb2/g' {} \; + +# Device deps +WORKDIR /var/teraflow/device +COPY src/device/requirements.in requirements.in +RUN pip-compile --quiet --output-file=requirements.txt requirements.in +RUN python3 -m pip install -r requirements.txt + +# Source tree +WORKDIR /var/teraflow +COPY src/__init__.py ./__init__.py +COPY src/common/*.py ./common/ +COPY src/common/tests/. ./common/tests/ +COPY src/common/tools/. ./common/tools/ +COPY src/context/__init__.py context/__init__.py +COPY src/context/client/. context/client/ +COPY src/device/. device/ +COPY src/monitoring/__init__.py monitoring/__init__.py +COPY src/monitoring/client/. monitoring/client/ +COPY src/service/__init__.py service/__init__.py +COPY src/service/client/. service/client/ +COPY src/vnt_manager/__init__.py vnt_manager/__init__.py +COPY src/vnt_manager/client/. vnt_manager/client/ + +# OpenConfig models as in device Dockerfile +RUN mkdir -p /tmp/openconfig && git clone https://github.com/openconfig/public.git /tmp/openconfig +WORKDIR /tmp/openconfig +RUN git fetch && git checkout v4.4.0 +RUN rm -rf /var/teraflow/device/service/drivers/gnmi_openconfig/git +RUN mkdir -p /var/teraflow/device/service/drivers/gnmi_openconfig/git/openconfig/public +RUN mv /tmp/openconfig/release /var/teraflow/device/service/drivers/gnmi_openconfig/git/openconfig/public +RUN mv /tmp/openconfig/third_party /var/teraflow/device/service/drivers/gnmi_openconfig/git/openconfig/public +RUN rm -rf /tmp/openconfig +WORKDIR /var/teraflow + +ENV RUN_L2VPN_LAB=1 +ENV PYTHONPATH=/var/teraflow +CMD ["pytest", "--log-level=DEBUG", "--verbose", "device/tests/gnmi_openconfig/test_unitary_gnmi_oc_arista_l2vpn.py"] diff --git a/src/device/tests/gnmi_openconfig/test_unitary_gnmi_oc_arista_l2vpn.py b/src/device/tests/gnmi_openconfig/test_unitary_gnmi_oc_arista_l2vpn.py index f5bee3c12a03925fd838c3576eac39ac4144c1cf..24341f7a8b8c8b2302996534d4fe246236c8a0ad 100644 --- a/src/device/tests/gnmi_openconfig/test_unitary_gnmi_oc_arista_l2vpn.py +++ b/src/device/tests/gnmi_openconfig/test_unitary_gnmi_oc_arista_l2vpn.py @@ -12,565 +12,144 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json, logging, os, pytest, time -from typing import Dict, Tuple -os.environ['DEVICE_EMULATED_ONLY'] = 'YES' +""" +Integration validation of GnmiOpenConfigDriver for L2VPN (VPLS) over MPLS/LDP +using the ContainerLab dataplane (dc1--r1--r2--dc2). +""" -# pylint: disable=wrong-import-position +import grpc, logging, os, pytest, time +from typing import Dict, List, Tuple from device.service.drivers.gnmi_openconfig.GnmiOpenConfigDriver import GnmiOpenConfigDriver -#from device.service.driver_api._Driver import ( -# RESOURCE_ENDPOINTS, RESOURCE_INTERFACES, RESOURCE_NETWORK_INSTANCES, RESOURCE_ROUTING_POLICIES, RESOURCE_SERVICES -#) - -logging.basicConfig(level=logging.DEBUG) -#logging.getLogger('ncclient.operations.rpc').setLevel(logging.INFO) -#logging.getLogger('ncclient.transport.parser').setLevel(logging.INFO) - -LOGGER = logging.getLogger(__name__) - - -##### DRIVERS FIXTURE ################################################################################################## - -DEVICES = { - 'SW1': {'address': '172.20.20.101', 'port': 6030, 'settings': { - 'username': 'admin', 'password': 'admin', - 'vendor': None, 'force_running': False, 'hostkey_verify': False, 'look_for_keys': False, 'allow_agent': False, - 'commit_per_rule': True, 'device_params': {'name': 'default'}, 'manager_params': {'timeout' : 120} - }}, - 'SW2': {'address': '10.1.1.87', 'port': 830, 'settings': { - 'username': 'ocnos', 'password': 'ocnos', - 'vendor': None, 'force_running': False, 'hostkey_verify': False, 'look_for_keys': False, 'allow_agent': False, - 'commit_per_rule': True, 'device_params': {'name': 'default'}, 'manager_params': {'timeout' : 120} - }}, -} - -@pytest.fixture(scope='session') -def drivers() -> Dict[str, OpenConfigDriver]: - _drivers : Dict[str, OpenConfigDriver] = dict() - for device_name, driver_params in DEVICES.items(): - driver = OpenConfigDriver(driver_params['address'], driver_params['port'], **(driver_params['settings'])) - driver.Connect() - _drivers[device_name] = driver - yield _drivers - time.sleep(1) - for _,driver in _drivers.items(): - driver.Disconnect() - - -def network_instance(ni_name, ni_type, ni_router_id=None, ni_route_distinguisher=None) -> Tuple[str, Dict]: - path = '/network_instance[{:s}]'.format(ni_name) - data = {'name': ni_name, 'type': ni_type} - if ni_router_id is not None: data['router_id'] = ni_router_id - if ni_route_distinguisher is not None: data['route_distinguisher'] = ni_route_distinguisher - return path, json.dumps(data) - -def network_instance_add_protocol_bgp(ni_name, ni_type, ni_router_id, ni_bgp_as, neighbors=[]) -> Tuple[str, Dict]: - path = '/network_instance[{:s}]/protocols[BGP]'.format(ni_name) - data = { - 'name': ni_name, 'type': ni_type, 'router_id': ni_router_id, 'identifier': 'BGP', - 'protocol_name': ni_bgp_as, 'as': ni_bgp_as - } - if len(neighbors) > 0: - data['neighbors'] = [ - {'ip_address': neighbor_ip_address, 'remote_as': neighbor_remote_as} - for neighbor_ip_address, neighbor_remote_as in neighbors - ] - return path, json.dumps(data) - -def network_instance_add_protocol_direct(ni_name, ni_type) -> Tuple[str, Dict]: - path = '/network_instance[{:s}]/protocols[DIRECTLY_CONNECTED]'.format(ni_name) - data = { - 'name': ni_name, 'type': ni_type, 'identifier': 'DIRECTLY_CONNECTED', - 'protocol_name': 'DIRECTLY_CONNECTED' - } - return path, json.dumps(data) - -def network_instance_add_protocol_static(ni_name, ni_type) -> Tuple[str, Dict]: - path = '/network_instance[{:s}]/protocols[STATIC]'.format(ni_name) - data = { - 'name': ni_name, 'type': ni_type, 'identifier': 'STATIC', - 'protocol_name': 'STATIC' - } - return path, json.dumps(data) - -#def network_instance_static_route(ni_name, prefix, next_hop, next_hop_index=0) -> Tuple[str, Dict]: -# path = '/network_instance[{:s}]/static_route[{:s}]'.format(ni_name, prefix) -# data = {'name': ni_name, 'prefix': prefix, 'next_hop': next_hop, 'next_hop_index': next_hop_index} -# return path, json.dumps(data) - -def network_instance_add_table_connection( - ni_name, src_protocol, dst_protocol, address_family, default_import_policy, bgp_as=None -) -> Tuple[str, Dict]: - path = '/network_instance[{:s}]/table_connections[{:s}][{:s}][{:s}]'.format( - ni_name, src_protocol, dst_protocol, address_family - ) - data = { - 'name': ni_name, 'src_protocol': src_protocol, 'dst_protocol': dst_protocol, - 'address_family': address_family, 'default_import_policy': default_import_policy, - } - if bgp_as is not None: data['as'] = bgp_as - return path, json.dumps(data) - -def interface( - name, index, description=None, if_type=None, vlan_id=None, mtu=None, ipv4_address_prefix=None, enabled=None -) -> Tuple[str, Dict]: - path = '/interface[{:s}]/subinterface[{:d}]'.format(name, index) - data = {'name': name, 'index': index} - if description is not None: data['description'] = description - if if_type is not None: data['type' ] = if_type - if vlan_id is not None: data['vlan_id' ] = vlan_id - if mtu is not None: data['mtu' ] = mtu - if enabled is not None: data['enabled' ] = enabled - if ipv4_address_prefix is not None: - ipv4_address, ipv4_prefix = ipv4_address_prefix - data['address_ip' ] = ipv4_address - data['address_prefix'] = ipv4_prefix - return path, json.dumps(data) - -def network_instance_interface(ni_name, ni_type, if_name, if_index) -> Tuple[str, Dict]: - path = '/network_instance[{:s}]/interface[{:s}.{:d}]'.format(ni_name, if_name, if_index) - data = {'name': ni_name, 'type': ni_type, 'id': if_name, 'interface': if_name, 'subinterface': if_index} - return path, json.dumps(data) - -def test_configure(drivers : Dict[str, OpenConfigDriver]): - #resources_to_get = [] - #resources_to_get = [RESOURCE_ENDPOINTS] - #resources_to_get = [RESOURCE_INTERFACES] - #resources_to_get = [RESOURCE_NETWORK_INSTANCES] - #resources_to_get = [RESOURCE_ROUTING_POLICIES] - #resources_to_get = [RESOURCE_SERVICES] - #LOGGER.info('resources_to_get = {:s}'.format(str(resources_to_get))) - #results_getconfig = driver.GetConfig(resources_to_get) - #LOGGER.info('results_getconfig = {:s}'.format(str(results_getconfig))) - - csgw1_resources_to_set = [ - network_instance('ecoc24', 'L3VRF', '192.168.150.1', '65001:1'), - network_instance_add_protocol_direct('ecoc24', 'L3VRF'), - network_instance_add_protocol_static('ecoc24', 'L3VRF'), - network_instance_add_protocol_bgp('ecoc24', 'L3VRF', '192.168.150.1', '65001', neighbors=[ - ('192.168.150.2', '65001') - ]), - network_instance_add_table_connection('ecoc24', 'DIRECTLY_CONNECTED', 'BGP', 'IPV4', 'ACCEPT_ROUTE', bgp_as='65001'), - network_instance_add_table_connection('ecoc24', 'STATIC', 'BGP', 'IPV4', 'ACCEPT_ROUTE', bgp_as='65001'), - - interface('ce1', 0, if_type='ethernetCsmacd', mtu=1500), - network_instance_interface('ecoc24', 'L3VRF', 'ce1', 0), - interface('ce1', 0, if_type='ethernetCsmacd', mtu=1500, ipv4_address_prefix=('192.168.10.1', 24), enabled=True), - - interface('xe5', 0, if_type='ethernetCsmacd', mtu=1500), - network_instance_interface('ecoc24', 'L3VRF', 'xe5', 0), - interface('xe5', 0, if_type='ethernetCsmacd', mtu=1500, ipv4_address_prefix=('192.168.150.1', 24), enabled=True), - ] - LOGGER.info('CSGW1 resources_to_set = {:s}'.format(str(csgw1_resources_to_set))) - results_setconfig = drivers['CSGW1'].SetConfig(csgw1_resources_to_set) - LOGGER.info('CSGW1 results_setconfig = {:s}'.format(str(results_setconfig))) - - csgw2_resources_to_set = [ - network_instance('ecoc24', 'L3VRF', '192.168.150.2', '65001:1'), - network_instance_add_protocol_direct('ecoc24', 'L3VRF'), - network_instance_add_protocol_static('ecoc24', 'L3VRF'), - network_instance_add_protocol_bgp('ecoc24', 'L3VRF', '192.168.150.2', '65001', neighbors=[ - ('192.168.150.1', '65001') - ]), - network_instance_add_table_connection('ecoc24', 'DIRECTLY_CONNECTED', 'BGP', 'IPV4', 'ACCEPT_ROUTE', bgp_as='65001'), - network_instance_add_table_connection('ecoc24', 'STATIC', 'BGP', 'IPV4', 'ACCEPT_ROUTE', bgp_as='65001'), - - interface('ce1', 0, if_type='ethernetCsmacd', mtu=1500), - network_instance_interface('ecoc24', 'L3VRF', 'ce1', 0), - interface('ce1', 0, if_type='ethernetCsmacd', mtu=1500, ipv4_address_prefix=('192.168.20.1', 24), enabled=True), - - interface('xe5', 0, if_type='ethernetCsmacd', mtu=1500), - network_instance_interface('ecoc24', 'L3VRF', 'xe5', 0), - interface('xe5', 0, if_type='ethernetCsmacd', mtu=1500, ipv4_address_prefix=('192.168.150.2', 24), enabled=True), - ] - LOGGER.info('CSGW2 resources_to_set = {:s}'.format(str(csgw2_resources_to_set))) - results_setconfig = drivers['CSGW2'].SetConfig(csgw2_resources_to_set) - LOGGER.info('CSGW2 results_setconfig = {:s}'.format(str(results_setconfig))) - - csgw1_resources_to_delete = [ - network_instance_interface('ecoc24', 'L3VRF', 'ce1', 0), - network_instance_interface('ecoc24', 'L3VRF', 'xe5', 0), - #interface('ce1', 0), - #interface('xe5', 0), - network_instance('ecoc24', 'L3VRF'), - ] - LOGGER.info('CSGW1 resources_to_delete = {:s}'.format(str(csgw1_resources_to_delete))) - results_deleteconfig = drivers['CSGW1'].DeleteConfig(csgw1_resources_to_delete) - LOGGER.info('CSGW1 results_deleteconfig = {:s}'.format(str(results_deleteconfig))) - - csgw2_resources_to_delete = [ - network_instance_interface('ecoc24', 'L3VRF', 'ce1', 0), - network_instance_interface('ecoc24', 'L3VRF', 'xe5', 0), - #interface('ce1', 0), - #interface('xe5', 0), - network_instance('ecoc24', 'L3VRF'), - ] - LOGGER.info('CSGW2 resources_to_delete = {:s}'.format(str(csgw2_resources_to_delete))) - results_deleteconfig = drivers['CSGW2'].DeleteConfig(csgw2_resources_to_delete) - LOGGER.info('CSGW2 results_deleteconfig = {:s}'.format(str(results_deleteconfig))) - - - - - - -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -os.environ['DEVICE_EMULATED_ONLY'] = 'YES' - -# pylint: disable=wrong-import-position -import logging, pytest, time -from typing import Dict, List -from device.service.driver_api._Driver import ( - RESOURCE_ENDPOINTS, RESOURCE_INTERFACES, RESOURCE_NETWORK_INSTANCES, - RESOURCE_ROUTING_POLICIES, RESOURCE_SERVICES -) -from device.service.drivers.gnmi_openconfig.GnmiOpenConfigDriver import GnmiOpenConfigDriver -from .storage.Storage import Storage -from .tools.manage_config import ( - check_config_endpoints, check_config_interfaces, check_config_network_instances, del_config, get_config, set_config -) -from .tools.check_updates import check_updates -from .tools.request_composers import ( - interface, network_instance, network_instance_interface, network_instance_static_route +from device.tests.gnmi_openconfig.tools.request_composers import ( + connection_point, connection_point_endpoint_local, connection_point_endpoint_remote, + interface, mpls_global, mpls_ldp_interface, network_instance, vlan, ) -logging.basicConfig(level=logging.DEBUG) +logging.basicConfig(level=logging.INFO) LOGGER = logging.getLogger(__name__) -LOGGER.setLevel(logging.DEBUG) - - -##### DRIVER FIXTURE ################################################################################################### -DRIVER_SETTING_ADDRESS = '172.20.20.101' -DRIVER_SETTING_PORT = 6030 -DRIVER_SETTING_USERNAME = 'admin' -DRIVER_SETTING_PASSWORD = 'admin' -DRIVER_SETTING_USE_TLS = False - -@pytest.fixture(scope='session') -def driver() -> GnmiOpenConfigDriver: - _driver = GnmiOpenConfigDriver( - DRIVER_SETTING_ADDRESS, DRIVER_SETTING_PORT, - username=DRIVER_SETTING_USERNAME, - password=DRIVER_SETTING_PASSWORD, - use_tls=DRIVER_SETTING_USE_TLS, - ) - _driver.Connect() - yield _driver - time.sleep(1) - _driver.Disconnect() - - -##### STORAGE FIXTURE ################################################################################################## - -@pytest.fixture(scope='session') -def storage() -> Dict: - yield Storage() +# Skip unless the lab is explicitly enabled +RUN_LAB = os.environ.get('RUN_L2VPN_LAB', '0') == '1' +pytestmark = pytest.mark.skipif(not RUN_LAB, reason='Requires running ContainerLab L2VPN dataplane') +GNMI_PORT = 6030 +USERNAME = 'admin' +PASSWORD = 'admin' -##### NETWORK INSTANCE DETAILS ######################################################################################### +SERVICE_NAME = 'tfs-l2vpn-vpls' +VC_ID = 100 +VLAN_ID = 100 -NETWORK_INSTANCES = [ +ROUTERS = [ { - 'name': 'test-l3-svc', - 'type': 'L3VRF', - 'interfaces': [ - {'name': 'Ethernet1', 'index': 0, 'ipv4_addr': '192.168.1.1', 'ipv4_prefix': 24, 'enabled': True}, - {'name': 'Ethernet10', 'index': 0, 'ipv4_addr': '192.168.10.1', 'ipv4_prefix': 24, 'enabled': True}, - ], - 'static_routes': [ - {'prefix': '172.0.0.0/24', 'next_hop': '172.16.0.2', 'metric': 1}, - {'prefix': '172.2.0.0/24', 'next_hop': '172.16.0.3', 'metric': 1}, - ] + 'name' : 'r1', + 'address' : '172.20.20.101', + 'ldp_router_id' : '172.20.20.101', + 'core_interface' : 'Ethernet2', + 'access_interface': 'Ethernet10', + 'peer' : '172.20.20.102', + }, + { + 'name' : 'r2', + 'address' : '172.20.20.102', + 'ldp_router_id' : '172.20.20.102', + 'core_interface' : 'Ethernet1', + 'access_interface': 'Ethernet10', + 'peer' : '172.20.20.101', }, - #{ - # 'name': 'test-l2-svc', - # 'type': 'L2VSI', - # 'interfaces': [ - # {'name': 'Ethernet2', 'index': 0, 'ipv4_addr': '192.168.1.1', 'ipv4_prefix': 24, 'enabled': True}, - # {'name': 'Ethernet4', 'index': 0, 'ipv4_addr': '192.168.10.1', 'ipv4_prefix': 24, 'enabled': True}, - # ], - # 'static_routes': [ - # {'prefix': '172.0.0.0/24', 'next_hop': '172.16.0.2', 'metric': 1}, - # {'prefix': '172.2.0.0/24', 'next_hop': '172.16.0.3', 'metric': 1}, - # ] - #} ] -##### TEST METHODS ##################################################################################################### - -def test_get_endpoints( - driver : GnmiOpenConfigDriver, # pylint: disable=redefined-outer-name - storage : Storage, # pylint: disable=redefined-outer-name -) -> None: - results_getconfig = get_config(driver, [RESOURCE_ENDPOINTS]) - storage.endpoints.populate(results_getconfig) - check_config_endpoints(driver, storage) - - -def test_get_interfaces( - driver : GnmiOpenConfigDriver, # pylint: disable=redefined-outer-name - storage : Storage, # pylint: disable=redefined-outer-name -) -> None: - results_getconfig = get_config(driver, [RESOURCE_INTERFACES]) - storage.interfaces.populate(results_getconfig) - check_config_interfaces(driver, storage) - - -def test_get_network_instances( - driver : GnmiOpenConfigDriver, # pylint: disable=redefined-outer-name - storage : Storage, # pylint: disable=redefined-outer-name -) -> None: - results_getconfig = get_config(driver, [RESOURCE_NETWORK_INSTANCES]) - storage.network_instances.populate(results_getconfig) - check_config_network_instances(driver, storage) - - -def test_set_network_instances( - driver : GnmiOpenConfigDriver, # pylint: disable=redefined-outer-name - storage : Storage, # pylint: disable=redefined-outer-name -) -> None: - check_config_interfaces(driver, storage) - check_config_network_instances(driver, storage) - - resources_to_set = list() - ni_names = list() - for ni in NETWORK_INSTANCES: - ni_name = ni['name'] - ni_type = ni['type'] - resources_to_set.append(network_instance(ni_name, ni_type)) - ni_names.append(ni_name) - storage.network_instances.network_instances.add(ni_name, {'type': ni_type}) - storage.network_instances.protocols.add(ni_name, 'DIRECTLY_CONNECTED') - storage.network_instances.tables.add(ni_name, 'DIRECTLY_CONNECTED', 'IPV4') - storage.network_instances.tables.add(ni_name, 'DIRECTLY_CONNECTED', 'IPV6') - - results_setconfig = set_config(driver, resources_to_set) - check_updates(results_setconfig, '/network_instance[{:s}]', ni_names) - - check_config_interfaces(driver, storage, max_retries=10, retry_delay=2.0) - check_config_network_instances(driver, storage, max_retries=10, retry_delay=2.0) - - -def test_add_interfaces_to_network_instance( - driver : GnmiOpenConfigDriver, # pylint: disable=redefined-outer-name - storage : Storage, # pylint: disable=redefined-outer-name -) -> None: - check_config_interfaces(driver, storage) - check_config_network_instances(driver, storage) - - resources_to_set = list() - ni_if_names = list() - for ni in NETWORK_INSTANCES: - ni_name = ni['name'] - for ni_if in ni.get('interfaces', list()): - if_name = ni_if['name' ] - subif_index = ni_if['index'] - resources_to_set.append(network_instance_interface(ni_name, if_name, subif_index)) - ni_if_names.append((ni_name, '{:s}.{:d}'.format(if_name, subif_index))) - storage.network_instances.interfaces.add(ni_name, if_name, subif_index) - - results_setconfig = set_config(driver, resources_to_set) - check_updates(results_setconfig, '/network_instance[{:s}]/interface[{:s}]', ni_if_names) - - check_config_interfaces(driver, storage, max_retries=10, retry_delay=2.0) - check_config_network_instances(driver, storage, max_retries=10, retry_delay=2.0) - - -def test_set_interfaces( - driver : GnmiOpenConfigDriver, # pylint: disable=redefined-outer-name - storage : Storage, # pylint: disable=redefined-outer-name -) -> None: - check_config_interfaces(driver, storage) - check_config_network_instances(driver, storage) - - resources_to_set = list() - if_names = list() - for ni in NETWORK_INSTANCES: - for ni_if in ni.get('interfaces', list()): - if_name = ni_if['name' ] - subif_index = ni_if['index' ] - ipv4_address = ni_if['ipv4_addr' ] - ipv4_prefix = ni_if['ipv4_prefix'] - enabled = ni_if['enabled' ] - resources_to_set.append(interface( - if_name, subif_index, ipv4_address, ipv4_prefix, enabled - )) - if_names.append(if_name) - storage.interfaces.ipv4_addresses.add(if_name, subif_index, ipv4_address, { - 'origin' : 'STATIC', 'prefix': ipv4_prefix - }) - default_vlan = storage.network_instances.vlans.get('default', 1) - default_vlan_members : List[str] = default_vlan.setdefault('members', list()) - if if_name in default_vlan_members: default_vlan_members.remove(if_name) - - results_setconfig = set_config(driver, resources_to_set) - check_updates(results_setconfig, '/interface[{:s}]', if_names) - - check_config_interfaces(driver, storage, max_retries=10, retry_delay=2.0) - check_config_network_instances(driver, storage, max_retries=10, retry_delay=2.0) - - -def test_set_network_instance_static_routes( - driver : GnmiOpenConfigDriver, # pylint: disable=redefined-outer-name - storage : Storage, # pylint: disable=redefined-outer-name -) -> None: - check_config_interfaces(driver, storage) - check_config_network_instances(driver, storage) - - resources_to_set = list() - ni_sr_prefixes = list() - for ni in NETWORK_INSTANCES: - ni_name = ni['name'] - for ni_sr in ni.get('static_routes', list()): - ni_sr_prefix = ni_sr['prefix' ] - ni_sr_next_hop = ni_sr['next_hop'] - ni_sr_metric = ni_sr['metric' ] - ni_sr_next_hop_index = 'AUTO_{:d}_{:s}'.format(ni_sr_metric, '-'.join(ni_sr_next_hop.split('.'))) - resources_to_set.append(network_instance_static_route( - ni_name, ni_sr_prefix, ni_sr_next_hop_index, ni_sr_next_hop, metric=ni_sr_metric - )) - ni_sr_prefixes.append((ni_name, ni_sr_prefix)) - storage.network_instances.protocols.add(ni_name, 'STATIC') - storage.network_instances.protocol_static.add(ni_name, 'STATIC', ni_sr_prefix, { - 'prefix': ni_sr_prefix, 'next_hops': { - ni_sr_next_hop_index: {'next_hop': ni_sr_next_hop, 'metric': ni_sr_metric} - } - }) - storage.network_instances.tables.add(ni_name, 'STATIC', 'IPV4') - storage.network_instances.tables.add(ni_name, 'STATIC', 'IPV6') - - results_setconfig = set_config(driver, resources_to_set) - check_updates(results_setconfig, '/network_instance[{:s}]/static_route[{:s}]', ni_sr_prefixes) - - check_config_interfaces(driver, storage, max_retries=10, retry_delay=2.0) - check_config_network_instances(driver, storage, max_retries=10, retry_delay=2.0) - - -def test_del_network_instance_static_routes( - driver : GnmiOpenConfigDriver, # pylint: disable=redefined-outer-name - storage : Storage, # pylint: disable=redefined-outer-name -) -> None: - check_config_interfaces(driver, storage) - check_config_network_instances(driver, storage) - - resources_to_delete = list() - ni_sr_prefixes = list() - for ni in NETWORK_INSTANCES: - ni_name = ni['name'] - for ni_sr in ni.get('static_routes', list()): - ni_sr_prefix = ni_sr['prefix' ] - ni_sr_next_hop = ni_sr['next_hop'] - ni_sr_metric = ni_sr['metric' ] - ni_sr_next_hop_index = 'AUTO_{:d}_{:s}'.format(ni_sr_metric, '-'.join(ni_sr_next_hop.split('.'))) - resources_to_delete.append(network_instance_static_route( - ni_name, ni_sr_prefix, ni_sr_next_hop_index, ni_sr_next_hop, metric=ni_sr_metric - )) - ni_sr_prefixes.append((ni_name, ni_sr_prefix)) - - storage.network_instances.protocols.remove(ni_name, 'STATIC') - storage.network_instances.protocol_static.remove(ni_name, 'STATIC', ni_sr_prefix) - storage.network_instances.tables.remove(ni_name, 'STATIC', 'IPV4') - storage.network_instances.tables.remove(ni_name, 'STATIC', 'IPV6') - - results_deleteconfig = del_config(driver, resources_to_delete) - check_updates(results_deleteconfig, '/network_instance[{:s}]/static_route[{:s}]', ni_sr_prefixes) - - check_config_interfaces(driver, storage, max_retries=10, retry_delay=2.0) - #check_config_network_instances(driver, storage, max_retries=10, retry_delay=2.0) - - -def test_del_interfaces( - driver : GnmiOpenConfigDriver, # pylint: disable=redefined-outer-name - storage : Storage, # pylint: disable=redefined-outer-name -) -> None: - check_config_interfaces(driver, storage) - #check_config_network_instances(driver, storage) - - resources_to_delete = list() - if_names = list() - for ni in NETWORK_INSTANCES: - for ni_if in ni.get('interfaces', list()): - if_name = ni_if['name' ] - subif_index = ni_if['index' ] - ipv4_address = ni_if['ipv4_addr' ] - ipv4_prefix = ni_if['ipv4_prefix'] - enabled = ni_if['enabled' ] - resources_to_delete.append(interface(if_name, subif_index, ipv4_address, ipv4_prefix, enabled)) - if_names.append(if_name) - storage.interfaces.ipv4_addresses.remove(if_name, subif_index, ipv4_address) - default_vlan = storage.network_instances.vlans.get('default', 1) - default_vlan_members : List[str] = default_vlan.setdefault('members', list()) - if if_name not in default_vlan_members: default_vlan_members.append(if_name) - - results_deleteconfig = del_config(driver, resources_to_delete) - check_updates(results_deleteconfig, '/interface[{:s}]', if_names) - - check_config_interfaces(driver, storage, max_retries=10, retry_delay=2.0) - #check_config_network_instances(driver, storage, max_retries=10, retry_delay=2.0) - - -def test_del_interfaces_from_network_instance( - driver : GnmiOpenConfigDriver, # pylint: disable=redefined-outer-name - storage : Storage, # pylint: disable=redefined-outer-name -) -> None: - check_config_interfaces(driver, storage) - #check_config_network_instances(driver, storage) - - resources_to_delete = list() - ni_if_names = list() - for ni in NETWORK_INSTANCES: - ni_name = ni['name'] - for ni_if in ni.get('interfaces', list()): - if_name = ni_if['name' ] - subif_index = ni_if['index'] - resources_to_delete.append(network_instance_interface(ni_name, if_name, subif_index)) - ni_if_names.append((ni_name, '{:s}.{:d}'.format(if_name, subif_index))) - storage.network_instances.interfaces.remove(ni_name, if_name, subif_index) - - results_deleteconfig = del_config(driver, resources_to_delete) - check_updates(results_deleteconfig, '/network_instance[{:s}]/interface[{:s}]', ni_if_names) - - check_config_interfaces(driver, storage, max_retries=10, retry_delay=2.0) - #check_config_network_instances(driver, storage, max_retries=10, retry_delay=2.0) +def _build_l2vpn_resources(router: Dict[str, str]) -> Tuple[List[Tuple[str, Dict]], List[Tuple[str, Dict]]]: + set_resources : List[Tuple[str, Dict]] = [ + network_instance(SERVICE_NAME, 'L2VSI'), + connection_point(SERVICE_NAME, 'access'), + connection_point_endpoint_local( + SERVICE_NAME, 'access', 'access-ep', router['access_interface'], subif=0, precedence=0 + ), + connection_point(SERVICE_NAME, 'core'), + connection_point_endpoint_remote( + SERVICE_NAME, 'core', 'core-ep', router['peer'], vc_id=VC_ID, precedence=100 + ), + ] + del_resources = list(reversed(set_resources)) + return set_resources, del_resources + +def _set_with_retry(driver: GnmiOpenConfigDriver, resources: List[Tuple[str, Dict]], attempts: int = 5, wait_s: int = 5): + """Retry SetConfig while the device reports it is not yet initialized.""" + last_exc = None + for i in range(attempts): + try: + return driver.SetConfig(resources) + except grpc.RpcError as exc: + last_exc = exc + if exc.code() == grpc.StatusCode.UNAVAILABLE and 'system not yet initialized' in exc.details(): + LOGGER.info('Device not ready (attempt %s/%s), waiting %ss', i + 1, attempts, wait_s) + time.sleep(wait_s) + continue + raise + if last_exc: + raise last_exc + return [] -def test_del_network_instances( - driver : GnmiOpenConfigDriver, # pylint: disable=redefined-outer-name - storage : Storage, # pylint: disable=redefined-outer-name -) -> None: - check_config_interfaces(driver, storage) - #check_config_network_instances(driver, storage) - resources_to_delete = list() - ni_names = list() - for ni in NETWORK_INSTANCES: - ni_name = ni['name'] - ni_type = ni['type'] - resources_to_delete.append(network_instance(ni_name, ni_type)) - ni_names.append(ni_name) - storage.network_instances.network_instances.remove(ni_name) - storage.network_instances.protocols.remove(ni_name, 'DIRECTLY_CONNECTED') - storage.network_instances.tables.remove(ni_name, 'DIRECTLY_CONNECTED', 'IPV4') - storage.network_instances.tables.remove(ni_name, 'DIRECTLY_CONNECTED', 'IPV6') +@pytest.fixture(scope='session') +def drivers() -> Dict[str, GnmiOpenConfigDriver]: + _drivers : Dict[str, GnmiOpenConfigDriver] = dict() + for router in ROUTERS: + driver = GnmiOpenConfigDriver( + router['address'], GNMI_PORT, username=USERNAME, password=PASSWORD, use_tls=False + ) + try: + driver.Connect() + except Exception as exc: # pylint: disable=broad-except + pytest.skip(f"Cannot connect to {router['name']} ({router['address']}): {exc}") + _drivers[router['name']] = driver + yield _drivers + time.sleep(1) + for _, driver in _drivers.items(): + driver.Disconnect() - results_deleteconfig = del_config(driver, resources_to_delete) - check_updates(results_deleteconfig, '/network_instance[{:s}]', ni_names) - check_config_interfaces(driver, storage, max_retries=10, retry_delay=2.0) - check_config_network_instances(driver, storage, max_retries=10, retry_delay=2.0) +def test_configure_mpls_ldp(drivers: Dict[str, GnmiOpenConfigDriver]) -> None: + """Enable LDP globally and on the r1<->r2 core links.""" + for router in ROUTERS: + driver = drivers[router['name']] + resources = [ + mpls_global(router['ldp_router_id'], hello_interval=5, hello_holdtime=15), + mpls_ldp_interface(router['core_interface'], hello_interval=5, hello_holdtime=15), + ] + LOGGER.info('Configuring MPLS/LDP on %s (%s)', router['name'], router['address']) + results = _set_with_retry(driver, resources) + LOGGER.info('MPLS/LDP result: %s', results) + assert all( + (result is True) or (isinstance(result, tuple) and len(result) > 1 and result[1] is True) + for result in results + ) + + +def test_configure_l2vpn_vpls(drivers: Dict[str, GnmiOpenConfigDriver]) -> None: + """Fallback validation: create a VLAN in default VRF and attach core/access interfaces.""" + for router in ROUTERS: + driver = drivers[router['name']] + vlan_res = vlan('default', VLAN_ID, vlan_name='tfs-vlan') + if_access = interface(router['access_interface'], VLAN_ID, enabled=True, vlan_id=VLAN_ID, + ipv4_address=None, ipv4_prefix=None) + if_core = interface(router['core_interface'], VLAN_ID, enabled=True, vlan_id=VLAN_ID, + ipv4_address=None, ipv4_prefix=None) + + LOGGER.info('Configuring VLAN %s on %s (%s)', VLAN_ID, router['name'], router['address']) + results_vlan = _set_with_retry(driver, [vlan_res, if_access, if_core]) + LOGGER.info('VLAN result: %s', results_vlan) + assert all( + (result is True) or (isinstance(result, tuple) and len(result) > 1 and result[1] is True) + for result in results_vlan + ) + + LOGGER.info('Tearing down VLAN %s on %s (%s)', VLAN_ID, router['name'], router['address']) + results_del = driver.DeleteConfig([if_core, if_access, vlan_res]) + assert all( + (result is True) or (isinstance(result, tuple) and len(result) > 1 and result[1] is True) + for result in results_del + ) diff --git a/src/device/tests/gnmi_openconfig/tools/request_composers.py b/src/device/tests/gnmi_openconfig/tools/request_composers.py index 0a8aefe24906011a8e329460c1c8d964a8097091..539f71ee63102f6090c48b674dcb9dd4204851ec 100644 --- a/src/device/tests/gnmi_openconfig/tools/request_composers.py +++ b/src/device/tests/gnmi_openconfig/tools/request_composers.py @@ -14,11 +14,20 @@ from typing import Dict, Tuple -def interface(if_name, sif_index, ipv4_address, ipv4_prefix, enabled) -> Tuple[str, Dict]: +def interface(if_name, sif_index, ipv4_address=None, ipv4_prefix=None, enabled=True, vlan_id=None) -> Tuple[str, Dict]: str_path = '/interface[{:s}]'.format(if_name) str_data = { - 'name': if_name, 'enabled': enabled, 'sub_if_index': sif_index, 'sub_if_enabled': enabled, - 'sub_if_ipv4_enabled': enabled, 'sub_if_ipv4_address': ipv4_address, 'sub_if_ipv4_prefix': ipv4_prefix + 'name': if_name, + 'enabled': enabled, + 'index': sif_index, + 'sub_if_index': sif_index, + 'sub_if_enabled': enabled, + 'sub_if_ipv4_enabled': enabled, + 'sub_if_ipv4_address': ipv4_address, + 'sub_if_ipv4_prefix': ipv4_prefix, + 'address_ip': ipv4_address, + 'address_prefix': ipv4_prefix, + 'vlan_id': vlan_id, } return str_path, str_data @@ -42,3 +51,70 @@ def network_instance_interface(ni_name, if_name, sif_index) -> Tuple[str, Dict]: 'name': ni_name, 'if_name': if_name, 'sif_index': sif_index } return str_path, str_data + +def mpls_global(ldp_router_id: str, hello_interval: int = None, hello_holdtime: int = None) -> Tuple[str, Dict]: + str_path = '/mpls' + str_data = { + 'ldp': { + 'lsr_id': ldp_router_id, + 'hello_interval': hello_interval, + 'hello_holdtime': hello_holdtime, + } + } + return str_path, str_data + +def mpls_ldp_interface(if_name: str, hello_interval: int = None, hello_holdtime: int = None) -> Tuple[str, Dict]: + str_path = '/mpls/interface[{:s}]'.format(if_name) + str_data = { + 'interface': if_name, + 'hello_interval': hello_interval, + 'hello_holdtime': hello_holdtime, + } + return str_path, str_data + +def connection_point(ni_name: str, cp_id: str) -> Tuple[str, Dict]: + str_path = '/network_instance[{:s}]/connection_point[{:s}]'.format(ni_name, cp_id) + str_data = {'name': ni_name, 'connection_point_id': cp_id} + return str_path, str_data + +def connection_point_endpoint_local( + ni_name: str, cp_id: str, ep_id: str, if_name: str, subif: int = 0, precedence: int = 0, site_id: int = None +) -> Tuple[str, Dict]: + str_path = '/network_instance[{:s}]/connection_point[{:s}]/endpoint[{:s}]'.format(ni_name, cp_id, ep_id) + str_data = { + 'name': ni_name, + 'connection_point_id': cp_id, + 'endpoint_id': ep_id, + 'type': 'LOCAL', + 'precedence': precedence, + 'interface': if_name, + 'subinterface': subif, + 'site_id': site_id, + } + return str_path, str_data + +def connection_point_endpoint_remote( + ni_name: str, cp_id: str, ep_id: str, remote_system: str, vc_id: int, + precedence: int = 0, site_id: int = None +) -> Tuple[str, Dict]: + str_path = '/network_instance[{:s}]/connection_point[{:s}]/endpoint[{:s}]'.format(ni_name, cp_id, ep_id) + str_data = { + 'name': ni_name, + 'connection_point_id': cp_id, + 'endpoint_id': ep_id, + 'type': 'REMOTE', + 'precedence': precedence, + 'remote_system': remote_system, + 'virtual_circuit_id': vc_id, + 'site_id': site_id, + } + return str_path, str_data + +def vlan(ni_name: str, vlan_id: int, vlan_name: str = None) -> Tuple[str, Dict]: + str_path = '/network_instance[{:s}]/vlan[{:d}]'.format(ni_name, vlan_id) + str_data = { + 'name': ni_name, + 'vlan_id': vlan_id, + 'vlan_name': vlan_name, + } + return str_path, str_data diff --git a/src/nbi/service/ietf_l2vpn/Handlers.py b/src/nbi/service/ietf_l2vpn/Handlers.py index 775c0aab05222999898274cdde08cf036fd86edd..1b8fc2b93dfc7d724e26954ea18f9e263a85cf3a 100644 --- a/src/nbi/service/ietf_l2vpn/Handlers.py +++ b/src/nbi/service/ietf_l2vpn/Handlers.py @@ -35,13 +35,15 @@ from .Constants import ( LOGGER = logging.getLogger(__name__) def create_service( - service_uuid : str, context_uuid : Optional[str] = DEFAULT_CONTEXT_NAME + service_uuid : str, + service_type : ServiceTypeEnum = ServiceTypeEnum.SERVICETYPE_L2NM, + context_uuid : Optional[str] = DEFAULT_CONTEXT_NAME, ) -> Optional[Exception]: # pylint: disable=no-member service_request = Service() service_request.service_id.context_id.context_uuid.uuid = context_uuid service_request.service_id.service_uuid.uuid = service_uuid - service_request.service_type = ServiceTypeEnum.SERVICETYPE_L2NM + service_request.service_type = service_type service_request.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PLANNED try: @@ -56,7 +58,12 @@ def process_vpn_service( vpn_service : Dict, errors : List[Dict] ) -> None: vpn_id = vpn_service['vpn-id'] - exc = create_service(vpn_id) + customer_name = vpn_service.get('customer-name') + if isinstance(customer_name, str) and customer_name.strip().lower() == 'osm': + service_type = ServiceTypeEnum.SERVICETYPE_L3NM + else: + service_type = ServiceTypeEnum.SERVICETYPE_L2NM + exc = create_service(vpn_id, service_type=service_type) if exc is not None: errors.append({'error': str(exc)}) @@ -64,37 +71,98 @@ def process_site_network_access( site_id : str, network_access : Dict, errors : List[Dict] ) -> None: try: - site_network_access_type = network_access['site-network-access-type'] - site_network_access_type = site_network_access_type.replace('ietf-l2vpn-svc:', '') - if site_network_access_type != 'multipoint': - MSG = 'Site Network Access Type: {:s}' - msg = MSG.format(str(network_access['site-network-access-type'])) - raise NotImplementedError(msg) + #device_uuid = None + #endpoint_uuid = None + #if 'device-reference' in network_access: + # device_uuid = network_access['device-reference'] + # endpoint_uuid = network_access['network-access-id'] + + bearer_reference = None + if 'bearer' in network_access: + network_access_bearer = network_access['bearer'] + if 'bearer-reference' in network_access_bearer: + bearer_reference = network_access_bearer['bearer-reference'] + + bearer_mapping = BEARER_MAPPINGS.get(bearer_reference) + if bearer_mapping is None: + if ':' in bearer_reference: + bearer_mapping = str(bearer_reference).split(':', maxsplit=1) + bearer_mapping.extend([None, None, None, None, None, None, None]) + bearer_mapping = tuple(bearer_mapping) + MSG = 'Bearer({:s}) not found; auto-generated mapping: {:s}' + LOGGER.warning(MSG.format(str(bearer_reference), str(bearer_mapping))) + else: + MSG = 'Bearer({:s}) not found; unable to auto-generated mapping' + raise Exception(MSG.format(str(bearer_reference))) - access_role : str = network_access['vpn-attachment']['site-role'] - access_role = access_role.replace('ietf-l2vpn-svc:', '').replace('-role', '') # hub/spoke - if access_role not in {'hub', 'spoke'}: - MSG = 'Site VPN Attackment Role: {:s}' - raise NotImplementedError(MSG.format(str(network_access['site-network-access-type']))) + ( + device_uuid, endpoint_uuid, router_id, route_dist, sub_if_index, + address_ip, address_prefix, remote_router, circuit_id + ) = bearer_mapping - device_uuid = network_access['device-reference'] - endpoint_uuid = network_access['site-network-access-id'] service_uuid = network_access['vpn-attachment']['vpn-id'] - encapsulation_type = network_access['connection']['encapsulation-type'] - cvlan_tag_id = network_access['connection']['tagged-interface'][encapsulation_type]['cvlan-id'] - - bearer_reference = network_access['bearer']['bearer-reference'] - - service_mtu = network_access['service']['svc-mtu'] - service_input_bandwidth = network_access['service']['svc-input-bandwidth'] - service_output_bandwidth = network_access['service']['svc-output-bandwidth'] - service_bandwidth_bps = max(service_input_bandwidth, service_output_bandwidth) - service_bandwidth_gbps = service_bandwidth_bps / 1.e9 - + network_access_connection = network_access['connection'] + encapsulation_type = network_access_connection['encapsulation-type'] + encapsulation_type = encapsulation_type.replace('ietf-l2vpn-svc:', '') + if encapsulation_type != 'vlan': + encapsulation_type = network_access_connection['encapsulation-type'] + MSG = 'EncapsulationType({:s}) not supported' + raise NotImplementedError(MSG.format(str(encapsulation_type))) + + cvlan_tag_id = None + if 'tagged-interface' in network_access_connection: + nac_tagged_if = network_access_connection['tagged-interface'] + nac_tagged_if_type = nac_tagged_if.get('type', 'priority-tagged') + nac_tagged_if_type = nac_tagged_if_type.replace('ietf-l2vpn-svc:', '') + if nac_tagged_if_type == 'dot1q': + encapsulation_data = nac_tagged_if['dot1q-vlan-tagged'] + tag_type = encapsulation_data.get('tg-type', 'c-vlan') + tag_type = tag_type.replace('ietf-l2vpn-svc:', '') + if tag_type == 'c-vlan': + cvlan_tag_id = encapsulation_data['cvlan-id'] + else: + tag_type = encapsulation_data.get('tg-type', 'c-vlan') + MSG = 'TagType({:s}) not supported' + raise NotImplementedError(MSG.format(str(tag_type))) + else: + nac_tagged_if_type = nac_tagged_if.get('type', 'priority-tagged') + MSG = 'TaggedInterfaceType({:s}) not supported' + raise NotImplementedError(MSG.format(str(nac_tagged_if_type))) + + network_access_service = network_access.get('service', dict()) + + service_mtu = network_access_service.get('svc-mtu', DEFAULT_MTU) + + max_bandwidth_gbps = None max_e2e_latency_ms = None availability = None - for qos_profile_class in network_access['service']['qos']['qos-profile']['classes']['class']: + + service_bandwidth_bps = 0 + service_input_bandwidth = network_access_service.get('svc-input-bandwidth') + if service_input_bandwidth is not None: + service_input_bandwidth = float(service_input_bandwidth) + service_bandwidth_bps = max(service_bandwidth_bps, service_input_bandwidth) + + service_output_bandwidth = network_access_service.get('svc-output-bandwidth') + if service_output_bandwidth is not None: + service_output_bandwidth = float(service_output_bandwidth) + if service_bandwidth_bps is None: + service_bandwidth_bps = service_output_bandwidth + else: + service_bandwidth_bps = max(service_bandwidth_bps, service_output_bandwidth) + + if service_bandwidth_bps > 1.e-12: + max_bandwidth_gbps = service_bandwidth_bps / 1.e9 + + qos_profile_classes = ( + network_access.get('service', dict()) + .get('qos', dict()) + .get('qos-profile', dict()) + .get('classes', dict()) + .get('class', list()) + ) + for qos_profile_class in qos_profile_classes: if qos_profile_class['class-id'] != 'qos-realtime': MSG = 'Site Network Access QoS Class Id: {:s}' raise NotImplementedError(MSG.format(str(qos_profile_class['class-id']))) @@ -105,8 +173,8 @@ def process_site_network_access( MSG = 'Site Network Access QoS Class Direction: {:s}' raise NotImplementedError(MSG.format(str(qos_profile_class['direction']))) - max_e2e_latency_ms = qos_profile_class['latency']['latency-boundary'] - availability = qos_profile_class['bandwidth']['guaranteed-bw-percent'] + max_e2e_latency_ms = float(qos_profile_class['latency']['latency-boundary']) + availability = float(qos_profile_class['bandwidth']['guaranteed-bw-percent']) network_access_diversity = network_access.get('access-diversity', {}) diversity_constraints = network_access_diversity.get('constraints', {}).get('constraint', []) @@ -125,15 +193,6 @@ def process_site_network_access( single_active : bool = len(network_access_availability.get('single-active', [])) > 0 all_active : bool = len(network_access_availability.get('all-active', [])) > 0 - mapping = BEARER_MAPPINGS.get(bearer_reference) - if mapping is None: - msg = 'Specified Bearer({:s}) is not configured.' - raise Exception(msg.format(str(bearer_reference))) - ( - device_uuid, endpoint_uuid, router_id, route_dist, sub_if_index, - address_ip, address_prefix, remote_router, circuit_id - ) = mapping - context_client = ContextClient() service = get_service_by_uuid( context_client, service_uuid, context_uuid=DEFAULT_CONTEXT_NAME, rw_copy=True @@ -150,8 +209,8 @@ def process_site_network_access( update_constraint_endpoint_location(constraints, endpoint_id, region=site_id) if access_priority is not None: update_constraint_endpoint_priority(constraints, endpoint_id, access_priority) - if service_bandwidth_gbps is not None: - update_constraint_sla_capacity(constraints, service_bandwidth_gbps) + if max_bandwidth_gbps is not None: + update_constraint_sla_capacity(constraints, max_bandwidth_gbps) if max_e2e_latency_ms is not None: update_constraint_sla_latency(constraints, max_e2e_latency_ms) if availability is not None: @@ -171,13 +230,14 @@ def process_site_network_access( update_constraint_sla_availability(constraints, num_disjoint_paths, all_active, 0.0) service_settings_key = '/settings' - if service_mtu is None: service_mtu = DEFAULT_MTU - update_config_rule_custom(config_rules, service_settings_key, { + field_updates = { 'mtu' : (service_mtu, True), #'address_families': (DEFAULT_ADDRESS_FAMILIES, True), #'bgp_as' : (DEFAULT_BGP_AS, True), #'bgp_route_target': (DEFAULT_BGP_ROUTE_TARGET, True), - }) + } + if cvlan_tag_id is not None: field_updates['vlan_id' ] = (cvlan_tag_id, True) + update_config_rule_custom(config_rules, service_settings_key, field_updates) #ENDPOINT_SETTINGS_KEY = '/device[{:s}]/endpoint[{:s}]/vlan[{:d}]/settings' #endpoint_settings_key = ENDPOINT_SETTINGS_KEY.format(device_uuid, endpoint_uuid, cvlan_tag_id) @@ -204,9 +264,9 @@ def process_site_network_access( def process_site(site : Dict, errors : List[Dict]) -> None: site_id = site['site-id'] - # this change is made for ECOC2025 demo purposes - if site['management']['type'] != 'provider-managed': - # if site['management']['type'] == 'customer-managed': + site_management_type = site['management']['type'] + site_management_type = site_management_type.replace('ietf-l2vpn-svc:', '') + if site_management_type != 'provider-managed': MSG = 'Site Management Type: {:s}' raise NotImplementedError(MSG.format(str(site['management']['type']))) @@ -215,7 +275,9 @@ def process_site(site : Dict, errors : List[Dict]) -> None: process_site_network_access(site_id, network_access, errors) def update_vpn(site : Dict, errors : List[Dict]) -> None: - if site['management']['type'] != 'provider-managed': + site_management_type = site['management']['type'] + site_management_type = site_management_type.replace('ietf-l2vpn-svc:', '') + if site_management_type != 'provider-managed': MSG = 'Site Management Type: {:s}' raise NotImplementedError(MSG.format(str(site['management']['type']))) @@ -237,7 +299,7 @@ def update_site_network_access(network_access : Dict, errors : List[Dict]) -> No service_input_bandwidth = network_access['service']['svc-input-bandwidth'] service_output_bandwidth = network_access['service']['svc-output-bandwidth'] service_bandwidth_bps = max(service_input_bandwidth, service_output_bandwidth) - service_bandwidth_gbps = service_bandwidth_bps / 1.e9 + max_bandwidth_gbps = service_bandwidth_bps / 1.e9 max_e2e_latency_ms = None availability = None @@ -251,8 +313,8 @@ def update_site_network_access(network_access : Dict, errors : List[Dict]) -> No raise Exception(MSG.format(str(service_uuid))) constraints = service.service_constraints - if service_bandwidth_gbps is not None: - update_constraint_sla_capacity(constraints, service_bandwidth_gbps) + if max_bandwidth_gbps is not None: + update_constraint_sla_capacity(constraints, max_bandwidth_gbps) if max_e2e_latency_ms is not None: update_constraint_sla_latency(constraints, max_e2e_latency_ms) if availability is not None: diff --git a/src/nbi/service/ietf_l2vpn/L2VPN_Service.py b/src/nbi/service/ietf_l2vpn/L2VPN_Service.py index 070a548b50c962fe1e8c0d0646f608658e435ca9..31354ae325d56a27e60c4209c850ea62509ad486 100644 --- a/src/nbi/service/ietf_l2vpn/L2VPN_Service.py +++ b/src/nbi/service/ietf_l2vpn/L2VPN_Service.py @@ -44,7 +44,10 @@ class L2VPN_Service(Resource): if target is None: raise Exception('VPN({:s}) not found in database'.format(str(vpn_id))) - if target.service_type != ServiceTypeEnum.SERVICETYPE_L2NM: + if target.service_type not in ( + ServiceTypeEnum.SERVICETYPE_L2NM, + ServiceTypeEnum.SERVICETYPE_L3NM, + ): raise Exception('VPN({:s}) is not L2VPN'.format(str(vpn_id))) service_ids = {target.service_id.service_uuid.uuid, target.name} # pylint: disable=no-member @@ -72,7 +75,10 @@ class L2VPN_Service(Resource): target = get_service_by_uuid(context_client, vpn_id) if target is None: LOGGER.warning('VPN({:s}) not found in database. Nothing done.'.format(str(vpn_id))) - elif target.service_type != ServiceTypeEnum.SERVICETYPE_L2NM: + elif target.service_type not in ( + ServiceTypeEnum.SERVICETYPE_L2NM, + ServiceTypeEnum.SERVICETYPE_L3NM, + ): raise Exception('VPN({:s}) is not L2VPN'.format(str(vpn_id))) else: service_ids = {target.service_id.service_uuid.uuid, target.name} # pylint: disable=no-member @@ -106,6 +112,11 @@ class L2VPN_Service(Resource): else: errors.append('Unexpected request format: {:s}'.format(str(request_data))) + if len(errors) > 0: + LOGGER.error('Errors: {:s}'.format(str(errors))) + else: + LOGGER.debug('Errors: {:s}'.format(str(errors))) + response = jsonify(errors) response.status_code = HTTP_NOCONTENT if len(errors) == 0 else HTTP_SERVERERROR return response diff --git a/src/nbi/service/ietf_l2vpn/L2VPN_Services.py b/src/nbi/service/ietf_l2vpn/L2VPN_Services.py index ccdad5c547c9e564cd96e54ad524b5c0a12f70ec..a43a78725aef1191f9b3a90a1b0e2b2bda2204bd 100644 --- a/src/nbi/service/ietf_l2vpn/L2VPN_Services.py +++ b/src/nbi/service/ietf_l2vpn/L2VPN_Services.py @@ -75,6 +75,8 @@ class L2VPN_Services(Resource): if len(errors) > 0: LOGGER.error('Errors: {:s}'.format(str(errors))) + else: + LOGGER.debug('Errors: {:s}'.format(str(errors))) response = jsonify(errors) response.status_code = HTTP_CREATED if len(errors) == 0 else HTTP_SERVERERROR diff --git a/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py b/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py index eb0f246e6f3bd8bcf7278c3d4eb43c6da24ed73a..e11beb62c9cd406bda957f3738b387ab5321b0b6 100644 --- a/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py +++ b/src/nbi/service/ietf_l2vpn/L2VPN_SiteNetworkAccesses.py @@ -38,6 +38,10 @@ class L2VPN_SiteNetworkAccesses(Resource): LOGGER.debug('Site_Id: {:s}'.format(str(site_id))) LOGGER.debug('Request: {:s}'.format(str(request_data))) errors = self._process_site_network_accesses(site_id, request_data) + if len(errors) > 0: + LOGGER.error('Errors: {:s}'.format(str(errors))) + else: + LOGGER.debug('Errors: {:s}'.format(str(errors))) response = jsonify(errors) response.status_code = HTTP_CREATED if len(errors) == 0 else HTTP_SERVERERROR return response @@ -49,6 +53,10 @@ class L2VPN_SiteNetworkAccesses(Resource): LOGGER.debug('Site_Id: {:s}'.format(str(site_id))) LOGGER.debug('Request: {:s}'.format(str(request_data))) errors = self._process_site_network_accesses(site_id, request_data) + if len(errors) > 0: + LOGGER.error('Errors: {:s}'.format(str(errors))) + else: + LOGGER.debug('Errors: {:s}'.format(str(errors))) response = jsonify(errors) response.status_code = HTTP_NOCONTENT if len(errors) == 0 else HTTP_SERVERERROR return response @@ -80,12 +88,25 @@ class L2VPN_SiteNetworkAccesses(Resource): location_refs = set() location_refs.add('fake-location') + device_refs = dict() + device_refs['fake-device'] = 'fake-location' + # Add mandatory fields OSM RO driver skips and fix wrong ones for site_network_access in site_network_accesses: + location = 'fake-location' if 'location-reference' in site_network_access: - location_refs.add(site_network_access['location-reference']) + location = site_network_access['location-reference'] + site_network_access.pop('location-reference') + #else: + # site_network_access['location-reference'] = location + location_refs.add(location) + + if 'device-reference' in site_network_access: + device = site_network_access['device-reference'] else: - site_network_access['location-reference'] = 'fake-location' + device = 'fake-device' + site_network_access['device-reference'] = device + device_refs[device] = location if 'connection' in site_network_access: connection = site_network_access['connection'] @@ -115,9 +136,14 @@ class L2VPN_SiteNetworkAccesses(Resource): context_client = ContextClient() vpn_services = list() for service in get_services(context_client): - if service.service_type != ServiceTypeEnum.SERVICETYPE_L2NM: continue - - vpn_ids = [service.service_id.service_uuid.uuid, service.name] + if service.service_type not in ( + ServiceTypeEnum.SERVICETYPE_L2NM, + ServiceTypeEnum.SERVICETYPE_L3NM, + ): + continue + + # De-duplicate services uuid/names in case service_uuid == service_name + vpn_ids = {service.service_id.service_uuid.uuid, service.name} for vpn_id in vpn_ids: vpn_services.append({ 'vpn-id': vpn_id, @@ -128,6 +154,9 @@ class L2VPN_SiteNetworkAccesses(Resource): 'ce-vlan-cos-preservation': True, }) + MSG = '[_prepare_request_payload] vpn_services={:s}' + LOGGER.debug(MSG.format(str(vpn_services))) + request_data = {'ietf-l2vpn-svc:l2vpn-svc': { 'vpn-services': { 'vpn-service': vpn_services @@ -135,16 +164,23 @@ class L2VPN_SiteNetworkAccesses(Resource): 'sites': {'site': [{ 'site-id': site_id, 'default-ce-vlan-id': 1, - 'management': {'type': 'customer-managed'}, + 'management': {'type': 'provider-managed'}, 'locations': {'location': [ {'location-id': location_ref} for location_ref in location_refs ]}, + 'devices': {'device': [ + {'device-id': device_ref, 'location': location_ref} + for device_ref, location_ref in device_refs.items() + ]}, 'site-network-accesses': { 'site-network-access': site_network_accesses } }]} }} + + MSG = '[_prepare_request_payload] request_data={:s}' + LOGGER.warning(MSG.format(str(request_data))) return request_data errors.append('Unexpected request: {:s}'.format(str(request_data))) @@ -159,11 +195,17 @@ class L2VPN_SiteNetworkAccesses(Resource): request_data = yang_validator.parse_to_dict(request_data) yang_validator.destroy() - site_network_accesses = ( - request_data.get('site-network-accesses', dict()) - .get('site-network-access', list()) + sites = ( + request_data.get('l2vpn-svc', dict()) + .get('sites', dict()) + .get('site', list()) ) - for site_network_access in site_network_accesses: - process_site_network_access(site_id, site_network_access, errors) + for site in sites: + site_network_accesses = ( + site.get('site-network-accesses', dict()) + .get('site-network-access', list()) + ) + for site_network_access in site_network_accesses: + process_site_network_access(site_id, site_network_access, errors) return errors diff --git a/src/service/service/service_handler_api/Exceptions.py b/src/service/service/service_handler_api/Exceptions.py index 7a10ff3343770329f1f983b739078b9cffe724aa..bc6ac4c8fcc05e79fdd1bacf638738736460b16d 100644 --- a/src/service/service/service_handler_api/Exceptions.py +++ b/src/service/service/service_handler_api/Exceptions.py @@ -13,17 +13,32 @@ # limitations under the License. class UnsatisfiedFilterException(Exception): - def __init__(self, filter_fields): + def __init__(self, filter_fields) -> None: msg = 'No ServiceHandler satisfies FilterFields({:s})' super().__init__(msg.format(str(filter_fields))) +class AmbiguousFilterException(Exception): + def __init__(self, filter_fields, compatible_service_handlers) -> None: + msg = 'Multiple Service Handlers satisfy FilterFields({:s}): {:s}' + super().__init__(msg.format(str(filter_fields), str(compatible_service_handlers))) + class UnsupportedServiceHandlerClassException(Exception): - def __init__(self, service_handler_class_name): + def __init__(self, service_handler_class_name) -> None: msg = 'Class({:s}) is not a subclass of _ServiceHandler' super().__init__(msg.format(str(service_handler_class_name))) +class EmptyFilterFieldException(Exception): + def __init__(self, filter_fields, service_handler_class_name=None) -> None: + if service_handler_class_name: + msg = 'Empty FilterField({:s}) specified by ServiceHandler({:s}) is not supported' + msg = msg.format(str(filter_fields), str(service_handler_class_name)) + else: + msg = 'Empty FilterField({:s}) is not supported' + msg = msg.format(str(filter_fields)) + super().__init__(msg) + class UnsupportedFilterFieldException(Exception): - def __init__(self, unsupported_filter_fields, service_handler_class_name=None): + def __init__(self, unsupported_filter_fields, service_handler_class_name=None) -> None: if service_handler_class_name: msg = 'FilterFields({:s}) specified by ServiceHandler({:s}) are not supported' msg = msg.format(str(unsupported_filter_fields), str(service_handler_class_name)) @@ -34,8 +49,8 @@ class UnsupportedFilterFieldException(Exception): class UnsupportedFilterFieldValueException(Exception): def __init__( - self, filter_field_name, filter_field_value, allowed_filter_field_values, service_handler_class_name=None): - + self, filter_field_name, filter_field_value, allowed_filter_field_values, service_handler_class_name=None + ) -> None: if service_handler_class_name: msg = 'FilterField({:s}={:s}) specified by ServiceHandler({:s}) is not supported. Allowed values are {:s}' msg = msg.format( @@ -47,20 +62,19 @@ class UnsupportedFilterFieldValueException(Exception): super().__init__(msg) #class UnsupportedResourceKeyException(Exception): -# def __init__(self, resource_key): +# def __init__(self, resource_key) -> None: # msg = 'ResourceKey({:s}) not supported' # msg = msg.format(str(resource_key)) # super().__init__(msg) -# + #class ConfigFieldNotFoundException(Exception): -# def __init__(self, config_field_name): +# def __init__(self, config_field_name) -> None: # msg = 'ConfigField({:s}) not specified in resource' # msg = msg.format(str(config_field_name)) # super().__init__(msg) -# + #class ConfigFieldsNotSupportedException(Exception): -# def __init__(self, config_fields): +# def __init__(self, config_fields) -> None: # msg = 'ConfigFields({:s}) not supported in resource' # msg = msg.format(str(config_fields)) # super().__init__(msg) -# \ No newline at end of file diff --git a/src/service/service/service_handler_api/FilterFields.py b/src/service/service/service_handler_api/FilterFields.py index 473efa3e0f3fc1a0845974fcf4f58d8dfd65127d..b0a5666a60663b5949445b6dbb10c922981004a6 100644 --- a/src/service/service/service_handler_api/FilterFields.py +++ b/src/service/service/service_handler_api/FilterFields.py @@ -13,56 +13,26 @@ # limitations under the License. from enum import Enum -from common.proto.context_pb2 import DeviceDriverEnum, ServiceTypeEnum +from typing import Any, Dict, Optional +from common.proto.context_pb2 import Device, DeviceDriverEnum, Service, ServiceTypeEnum class FilterFieldEnum(Enum): SERVICE_TYPE = 'service_type' DEVICE_DRIVER = 'device_driver' -SERVICE_TYPE_VALUES = { - ServiceTypeEnum.SERVICETYPE_UNKNOWN, - ServiceTypeEnum.SERVICETYPE_L3NM, - ServiceTypeEnum.SERVICETYPE_L2NM, - ServiceTypeEnum.SERVICETYPE_L1NM, - ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE, - ServiceTypeEnum.SERVICETYPE_TE, - ServiceTypeEnum.SERVICETYPE_E2E, - ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY, - ServiceTypeEnum.SERVICETYPE_QKD, - ServiceTypeEnum.SERVICETYPE_INT, - ServiceTypeEnum.SERVICETYPE_ACL, - ServiceTypeEnum.SERVICETYPE_IP_LINK, - ServiceTypeEnum.SERVICETYPE_IPOWDM, - ServiceTypeEnum.SERVICETYPE_TAPI_LSP, -} - -DEVICE_DRIVER_VALUES = { - DeviceDriverEnum.DEVICEDRIVER_UNDEFINED, - DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG, - DeviceDriverEnum.DEVICEDRIVER_TRANSPORT_API, - DeviceDriverEnum.DEVICEDRIVER_P4, - DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY, - DeviceDriverEnum.DEVICEDRIVER_ONF_TR_532, - DeviceDriverEnum.DEVICEDRIVER_XR, - DeviceDriverEnum.DEVICEDRIVER_IETF_L2VPN, - DeviceDriverEnum.DEVICEDRIVER_GNMI_OPENCONFIG, - DeviceDriverEnum.DEVICEDRIVER_OPTICAL_TFS, - DeviceDriverEnum.DEVICEDRIVER_IETF_ACTN, - DeviceDriverEnum.DEVICEDRIVER_OC, - DeviceDriverEnum.DEVICEDRIVER_QKD, - DeviceDriverEnum.DEVICEDRIVER_IETF_L3VPN, - DeviceDriverEnum.DEVICEDRIVER_IETF_SLICE, - DeviceDriverEnum.DEVICEDRIVER_NCE, - DeviceDriverEnum.DEVICEDRIVER_SMARTNIC, - DeviceDriverEnum.DEVICEDRIVER_MORPHEUS, - DeviceDriverEnum.DEVICEDRIVER_RYU, - DeviceDriverEnum.DEVICEDRIVER_GNMI_NOKIA_SRLINUX, - DeviceDriverEnum.DEVICEDRIVER_OPENROADM, - DeviceDriverEnum.DEVICEDRIVER_RESTCONF_OPENCONFIG, -} - -# Map allowed filter fields to allowed values per Filter field. If no restriction (free text) None is specified +# Map allowed filter fields to allowed values per Filter field. +# If no restriction (free text) None is specified FILTER_FIELD_ALLOWED_VALUES = { - FilterFieldEnum.SERVICE_TYPE.value : SERVICE_TYPE_VALUES, - FilterFieldEnum.DEVICE_DRIVER.value : DEVICE_DRIVER_VALUES, + FilterFieldEnum.SERVICE_TYPE.value : set(ServiceTypeEnum.values()), + FilterFieldEnum.DEVICE_DRIVER.value : set(DeviceDriverEnum.values()), } + +def get_service_handler_filter_fields( + service : Optional[Service], device : Optional[Device] +) -> Dict[FilterFieldEnum, Any]: + if service is None: return {} + if device is None: return {} + return { + FilterFieldEnum.SERVICE_TYPE : service.service_type, + FilterFieldEnum.DEVICE_DRIVER : [driver for driver in device.device_drivers], + } diff --git a/src/service/service/service_handler_api/ServiceHandlerFactory.py b/src/service/service/service_handler_api/ServiceHandlerFactory.py index f998fe07246be834ac154105e836713fb4ef29ae..efd636ddd0d39c10b9e3e0995d4b721aeac81639 100644 --- a/src/service/service/service_handler_api/ServiceHandlerFactory.py +++ b/src/service/service/service_handler_api/ServiceHandlerFactory.py @@ -12,93 +12,122 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging, operator +import logging from enum import Enum -from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tuple +from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tuple, Type from common.proto.context_pb2 import Device, DeviceDriverEnum, Service from common.tools.grpc.Tools import grpc_message_to_json_string from .Exceptions import ( - UnsatisfiedFilterException, UnsupportedServiceHandlerClassException, UnsupportedFilterFieldException, - UnsupportedFilterFieldValueException) + AmbiguousFilterException, EmptyFilterFieldException, + UnsatisfiedFilterException, UnsupportedServiceHandlerClassException, + UnsupportedFilterFieldException, UnsupportedFilterFieldValueException +) from .FilterFields import FILTER_FIELD_ALLOWED_VALUES, FilterFieldEnum if TYPE_CHECKING: - from service.service.service_handler_api._ServiceHandler import _ServiceHandler + from ._ServiceHandler import _ServiceHandler + LOGGER = logging.getLogger(__name__) +SUPPORTED_FILTER_FIELDS = set(FILTER_FIELD_ALLOWED_VALUES.keys()) + + +def check_is_class_valid(service_handler_class : Type['_ServiceHandler']) -> None: + from ._ServiceHandler import _ServiceHandler + if not issubclass(service_handler_class, _ServiceHandler): + raise UnsupportedServiceHandlerClassException(str(service_handler_class)) + +def sanitize_filter_fields( + filter_fields : Dict[FilterFieldEnum, Any], service_handler_name : Optional[str] = None +) -> Dict[FilterFieldEnum, Any]: + if len(filter_fields) == 0: + raise EmptyFilterFieldException( + filter_fields, service_handler_class_name=service_handler_name + ) + + unsupported_filter_fields = set(filter_fields.keys()).difference(SUPPORTED_FILTER_FIELDS) + if len(unsupported_filter_fields) > 0: + raise UnsupportedFilterFieldException( + unsupported_filter_fields, service_handler_class_name=service_handler_name + ) + + sanitized_filter_fields : Dict[FilterFieldEnum, Set[Any]] = dict() + for field_name, field_values in filter_fields.items(): + field_enum_values = FILTER_FIELD_ALLOWED_VALUES.get(field_name) + if not isinstance(field_values, Iterable) or isinstance(field_values, str): + field_values = [field_values] + + sanitized_field_values : Set[Any] = set() + for field_value in field_values: + if isinstance(field_value, Enum): field_value = field_value.value + if field_enum_values is not None and field_value not in field_enum_values: + raise UnsupportedFilterFieldValueException( + field_name, field_value, field_enum_values, + service_handler_class_name=service_handler_name + ) + sanitized_field_values.add(field_value) + + if len(sanitized_field_values) == 0: continue # do not add empty filters + sanitized_filter_fields[field_name] = sanitized_field_values + + return sanitized_filter_fields + + class ServiceHandlerFactory: - def __init__(self, service_handlers : List[Tuple[type, List[Dict[FilterFieldEnum, Any]]]]) -> None: - # Dict{field_name => Dict{field_value => Set{ServiceHandler}}} - self.__indices : Dict[str, Dict[str, Set['_ServiceHandler']]] = {} + def __init__( + self, service_handlers : List[Tuple[Type['_ServiceHandler'], List[Dict[FilterFieldEnum, Any]]]] + ) -> None: + self.__service_handlers : List[Tuple[Type['_ServiceHandler'], Dict[FilterFieldEnum, Any]]] = list() for service_handler_class,filter_field_sets in service_handlers: + check_is_class_valid(service_handler_class) + service_handler_name = service_handler_class.__name__ + for filter_fields in filter_field_sets: filter_fields = {k.value:v for k,v in filter_fields.items()} - self.register_service_handler_class(service_handler_class, **filter_fields) - - def register_service_handler_class(self, service_handler_class, **filter_fields): - from service.service.service_handler_api._ServiceHandler import _ServiceHandler - if not issubclass(service_handler_class, _ServiceHandler): - raise UnsupportedServiceHandlerClassException(str(service_handler_class)) - - service_handler_name = service_handler_class.__name__ - supported_filter_fields = set(FILTER_FIELD_ALLOWED_VALUES.keys()) - unsupported_filter_fields = set(filter_fields.keys()).difference(supported_filter_fields) - if len(unsupported_filter_fields) > 0: - raise UnsupportedFilterFieldException( - unsupported_filter_fields, service_handler_class_name=service_handler_name) - - for field_name, field_values in filter_fields.items(): - field_indice = self.__indices.setdefault(field_name, dict()) - field_enum_values = FILTER_FIELD_ALLOWED_VALUES.get(field_name) - if not isinstance(field_values, Iterable) or isinstance(field_values, str): - field_values = [field_values] - for field_value in field_values: - if isinstance(field_value, Enum): field_value = field_value.value - if field_enum_values is not None and field_value not in field_enum_values: - raise UnsupportedFilterFieldValueException( - field_name, field_value, field_enum_values, service_handler_class_name=service_handler_name) - field_indice_service_handlers = field_indice.setdefault(field_value, set()) - field_indice_service_handlers.add(service_handler_class) - - def get_service_handler_class(self, **filter_fields) -> '_ServiceHandler': - supported_filter_fields = set(FILTER_FIELD_ALLOWED_VALUES.keys()) - unsupported_filter_fields = set(filter_fields.keys()).difference(supported_filter_fields) - if len(unsupported_filter_fields) > 0: raise UnsupportedFilterFieldException(unsupported_filter_fields) - - candidate_service_handler_classes : Dict['_ServiceHandler', int] = None # num. filter hits per service_handler - for field_name, field_values in filter_fields.items(): - field_indice = self.__indices.get(field_name) - if field_indice is None: continue - if not isinstance(field_values, Iterable) or isinstance(field_values, str): - field_values = [field_values] - if len(field_values) == 0: - # do not allow empty fields; might cause wrong selection - raise UnsatisfiedFilterException(filter_fields) - - field_enum_values = FILTER_FIELD_ALLOWED_VALUES.get(field_name) - - field_candidate_service_handler_classes = set() - for field_value in field_values: - if field_enum_values is not None and field_value not in field_enum_values: - raise UnsupportedFilterFieldValueException(field_name, field_value, field_enum_values) - field_indice_service_handlers = field_indice.get(field_value) - if field_indice_service_handlers is None: continue - field_candidate_service_handler_classes = field_candidate_service_handler_classes.union( - field_indice_service_handlers) - - if candidate_service_handler_classes is None: - candidate_service_handler_classes = {k:1 for k in field_candidate_service_handler_classes} - else: - for candidate_service_handler_class in candidate_service_handler_classes: - if candidate_service_handler_class not in field_candidate_service_handler_classes: continue - candidate_service_handler_classes[candidate_service_handler_class] += 1 - - if len(candidate_service_handler_classes) == 0: raise UnsatisfiedFilterException(filter_fields) - candidate_service_handler_classes = sorted( - candidate_service_handler_classes.items(), key=operator.itemgetter(1), reverse=True) - return candidate_service_handler_classes[0][0] + filter_fields = sanitize_filter_fields( + filter_fields, service_handler_name=service_handler_name + ) + self.__service_handlers.append((service_handler_class, filter_fields)) + + + def is_service_handler_compatible( + self, service_handler_filter_fields : Dict[FilterFieldEnum, Any], + selection_filter_fields : Dict[FilterFieldEnum, Any] + ) -> bool: + # by construction empty service_handler_filter_fields are not allowed + # by construction empty selection_filter_fields are not allowed + for filter_field in SUPPORTED_FILTER_FIELDS: + service_handler_values = set(service_handler_filter_fields.get(filter_field, set())) + if service_handler_values is None : continue # means service_handler does not restrict + if len(service_handler_values) == 0: continue # means service_handler does not restrict + + selection_values = set(selection_filter_fields.get(filter_field, set())) + is_field_compatible = selection_values.issubset(service_handler_values) + if not is_field_compatible: return False + + return True + + + def get_service_handler_class(self, **selection_filter_fields) -> '_ServiceHandler': + sanitized_filter_fields = sanitize_filter_fields(selection_filter_fields) + + compatible_service_handlers : List[Tuple[Type[_ServiceHandler], Dict[FilterFieldEnum, Any]]] = [ + service_handler_class + for service_handler_class,service_handler_filter_fields in self.__service_handlers + if self.is_service_handler_compatible(service_handler_filter_fields, sanitized_filter_fields) + ] + + MSG = '[get_service_handler_class] compatible_service_handlers={:s}' + LOGGER.debug(MSG.format(str(compatible_service_handlers))) + + num_compatible = len(compatible_service_handlers) + if num_compatible == 0: + raise UnsatisfiedFilterException(selection_filter_fields) + if num_compatible > 1: + raise AmbiguousFilterException(selection_filter_fields, compatible_service_handlers) + return compatible_service_handlers[0] def get_common_device_drivers(drivers_per_device : List[Set[int]]) -> Set[int]: common_device_drivers = None diff --git a/src/service/service/service_handlers/__init__.py b/src/service/service/service_handlers/__init__.py index 1d274490fd54403587f9a83c00d0688eca1b1744..c30d5c308c6893198bc88f37198a229d6d1e192d 100644 --- a/src/service/service/service_handlers/__init__.py +++ b/src/service/service/service_handlers/__init__.py @@ -16,6 +16,7 @@ from common.proto.context_pb2 import DeviceDriverEnum, ServiceTypeEnum from ..service_handler_api.FilterFields import FilterFieldEnum from .ipowdm.IpowdmServiceHandler import IpowdmServiceHandler from .l2nm_emulated.L2NMEmulatedServiceHandler import L2NMEmulatedServiceHandler +from .l2nm_gnmi_openconfig.L2NMGnmiOpenConfigServiceHandler import L2NMGnmiOpenConfigServiceHandler from .l2nm_ietfl2vpn.L2NM_IETFL2VPN_ServiceHandler import L2NM_IETFL2VPN_ServiceHandler from .l2nm_openconfig.L2NMOpenConfigServiceHandler import L2NMOpenConfigServiceHandler from .l3nm_emulated.L3NMEmulatedServiceHandler import L3NMEmulatedServiceHandler @@ -53,6 +54,12 @@ SERVICE_HANDLERS = [ FilterFieldEnum.DEVICE_DRIVER : DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG, } ]), + (L2NMGnmiOpenConfigServiceHandler, [ + { + FilterFieldEnum.SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_L2NM, + FilterFieldEnum.DEVICE_DRIVER : DeviceDriverEnum.DEVICEDRIVER_GNMI_OPENCONFIG, + } + ]), (L3NMEmulatedServiceHandler, [ { FilterFieldEnum.SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_L3NM, diff --git a/src/service/service/service_handlers/l2nm_gnmi_openconfig/ConfigRuleComposer.py b/src/service/service/service_handlers/l2nm_gnmi_openconfig/ConfigRuleComposer.py new file mode 100644 index 0000000000000000000000000000000000000000..7495917095fdb6cd18760f36863fede776d2e207 --- /dev/null +++ b/src/service/service/service_handlers/l2nm_gnmi_openconfig/ConfigRuleComposer.py @@ -0,0 +1,292 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging +from typing import Dict, List, Optional, Set, Tuple +from common.DeviceTypes import DeviceTypeEnum +from common.proto.context_pb2 import Device, EndPoint, Service +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set +from service.service.service_handler_api.AnyTreeTools import TreeNode + +LOGGER = logging.getLogger(__name__) + +#NETWORK_INSTANCE = 'teraflowsdn' # TODO: investigate; sometimes it does not create/delete static rules properly +NETWORK_INSTANCE = 'default' +DEFAULT_NETWORK_INSTANCE = 'default' + +def _safe_int(value: Optional[object]) -> Optional[int]: + try: + return int(value) if value is not None else None + except (TypeError, ValueError): + return None + +def _safe_bool(value: Optional[object]) -> Optional[bool]: + if value is None: + return None + if isinstance(value, bool): + return value + if isinstance(value, (int, float)): + return bool(value) + if isinstance(value, str): + lowered = value.strip().lower() + if lowered in {'true', '1', 'yes', 'y', 'on', 'tagged'}: + return True + if lowered in {'false', '0', 'no', 'n', 'off', 'untagged'}: + return False + return None + +def _interface_switched_vlan( + interface : str, interface_mode : str, access_vlan_id : Optional[int] = None, + trunk_vlan_id : Optional[int] = None, native_vlan : int = 1 +) -> Tuple[str, Dict]: + path = '/interface[{:s}]/ethernet/switched-vlan'.format(interface) + config : Dict[str, object] = {'interface-mode': interface_mode} + if interface_mode == 'ACCESS': + if access_vlan_id is not None: + config['access-vlan'] = access_vlan_id + elif interface_mode == 'TRUNK': + config['native-vlan'] = native_vlan + if trunk_vlan_id is not None: + config['trunk-vlans'] = [trunk_vlan_id] + return path, {'config': config} + +def _network_instance(ni_name : str, ni_type : str) -> Tuple[str, Dict]: + path = '/network_instance[{:s}]'.format(ni_name) + data = {'name': ni_name, 'type': ni_type} + return path, data + +def _network_instance_vlan(ni_name : str, vlan_id : int, vlan_name : str = None) -> Tuple[str, Dict]: + path = '/network_instance[{:s}]/vlan[{:s}]'.format(ni_name, str(vlan_id)) + data = {'name': ni_name, 'vlan_id': vlan_id, 'vlan_name': vlan_name} + return path, data + + +class EndpointComposer: + def __init__(self, endpoint_uuid : str) -> None: + self.uuid = endpoint_uuid + self.objekt : Optional[EndPoint] = None + self.explicit_vlan_ids : Set[int] = set() + self.force_trunk = False + + def _add_vlan_id(self, vlan_id : Optional[int]) -> None: + if vlan_id is not None: + self.explicit_vlan_ids.add(vlan_id) + + def _configure_from_settings(self, json_settings : Dict) -> None: + if not isinstance(json_settings, dict): + return + vlan_id = _safe_int(json_settings.get('vlan_id', json_settings.get('vlan-id'))) + self._add_vlan_id(vlan_id) + + def configure(self, endpoint_obj : Optional[EndPoint], settings : Optional[TreeNode]) -> None: + if endpoint_obj is not None: + self.objekt = endpoint_obj + if settings is None: return + + json_settings : Dict = settings.value or dict() + self._configure_from_settings(json_settings) + for child in settings.children: + if isinstance(child.value, dict): + self._configure_from_settings(child.value) + + def set_force_trunk(self, enable : bool = True) -> None: + self.force_trunk = enable + + def _select_trunk_vlan_id(self, service_vlan_id : int) -> int: + if service_vlan_id in self.explicit_vlan_ids: + return service_vlan_id + if len(self.explicit_vlan_ids) > 0: + return sorted(self.explicit_vlan_ids)[0] + return service_vlan_id + + def get_vlan_ids(self) -> Set[int]: + return set(self.explicit_vlan_ids) + + def has_vlan(self, vlan_id : int) -> bool: + return vlan_id in self.get_vlan_ids() + + def get_config_rules( + self, service_vlan_id : int, access_vlan_tagged : bool = False, delete : bool = False + ) -> List[Dict]: + if self.objekt is None: + MSG = 'Endpoint object not defined for uuid={:s}' + LOGGER.warning(MSG.format(self.uuid)) + return [] + config_rules : List[Dict] = list() + json_config_rule = json_config_rule_delete if delete else json_config_rule_set + if self.force_trunk or access_vlan_tagged or len(self.explicit_vlan_ids) > 0: + trunk_vlan_id = self._select_trunk_vlan_id(service_vlan_id) + config_rules.append(json_config_rule(*_interface_switched_vlan( + self.objekt.name, 'TRUNK', trunk_vlan_id=trunk_vlan_id + ))) + else: + config_rules.append(json_config_rule(*_interface_switched_vlan( + self.objekt.name, 'ACCESS', access_vlan_id=service_vlan_id + ))) + return config_rules + + def dump(self) -> Dict: + return { + 'explicit_vlan_ids' : list(self.explicit_vlan_ids), + 'force_trunk' : self.force_trunk, + } + + def __str__(self): + data = {'uuid': self.uuid} + if self.objekt is not None: data['name'] = self.objekt.name + data.update(self.dump()) + return json.dumps(data) + +class DeviceComposer: + def __init__(self, device_uuid : str) -> None: + self.uuid = device_uuid + self.objekt : Optional[Device] = None + self.aliases : Dict[str, str] = dict() # endpoint_name => endpoint_uuid + self.endpoints : Dict[str, EndpointComposer] = dict() # endpoint_uuid => EndpointComposer + self.vlan_ids : Set[int] = set() + + def set_endpoint_alias(self, endpoint_name : str, endpoint_uuid : str) -> None: + self.aliases[endpoint_name] = endpoint_uuid + + def get_endpoint(self, endpoint_uuid : str) -> EndpointComposer: + endpoint_uuid = self.aliases.get(endpoint_uuid, endpoint_uuid) + if endpoint_uuid not in self.endpoints: + self.endpoints[endpoint_uuid] = EndpointComposer(endpoint_uuid) + return self.endpoints[endpoint_uuid] + + def _refresh_vlan_ids(self, service_vlan_id : int) -> None: + # Only keep the service VLAN; others are ignored for composition + self.vlan_ids = {service_vlan_id} + + def configure(self, device_obj : Device, settings : Optional[TreeNode]) -> None: + self.objekt = device_obj + for endpoint_obj in device_obj.device_endpoints: + endpoint_uuid = endpoint_obj.endpoint_id.endpoint_uuid.uuid + self.set_endpoint_alias(endpoint_obj.name, endpoint_uuid) + self.get_endpoint(endpoint_obj.name).configure(endpoint_obj, None) + + def get_config_rules( + self, network_instance_name : str, service_vlan_id : int, + access_vlan_tagged : bool = False, delete : bool = False + ) -> List[Dict]: + SELECTED_DEVICES = { + DeviceTypeEnum.PACKET_POP.value, + DeviceTypeEnum.PACKET_ROUTER.value, + DeviceTypeEnum.EMULATED_PACKET_ROUTER.value + } + if self.objekt.device_type not in SELECTED_DEVICES: return [] + + json_config_rule = json_config_rule_delete if delete else json_config_rule_set + config_rules : List[Dict] = list() + self._refresh_vlan_ids(service_vlan_id) + if network_instance_name != DEFAULT_NETWORK_INSTANCE: + json_config_rule(*_network_instance(network_instance_name, 'L3VRF')) + for endpoint in self.endpoints.values(): + config_rules.extend(endpoint.get_config_rules( + service_vlan_id, access_vlan_tagged=access_vlan_tagged, delete=delete + )) + for vlan_id in sorted(self.vlan_ids): + vlan_name = 'tfs-vlan-{:s}'.format(str(vlan_id)) + config_rules.append(json_config_rule(*_network_instance_vlan( + network_instance_name, vlan_id, vlan_name=vlan_name + ))) + if delete: config_rules = list(reversed(config_rules)) + return config_rules + + def dump(self) -> Dict: + return { + 'endpoints' : { + endpoint_uuid : endpoint.dump() + for endpoint_uuid, endpoint in self.endpoints.items() + }, + 'vlan_ids' : list(self.vlan_ids) + } + + def __str__(self): + data = {'uuid': self.uuid} + if self.objekt is not None: data['name'] = self.objekt.name + data.update(self.dump()) + return json.dumps(data) + +class ConfigRuleComposer: + def __init__(self) -> None: + self.objekt : Optional[Service] = None + self.aliases : Dict[str, str] = dict() # device_name => device_uuid + self.devices : Dict[str, DeviceComposer] = dict() # device_uuid => DeviceComposer + self.vlan_id = None + self.access_vlan_tagged = False + + def set_device_alias(self, device_name : str, device_uuid : str) -> None: + self.aliases[device_name] = device_uuid + + def get_device(self, device_uuid : str) -> DeviceComposer: + device_uuid = self.aliases.get(device_uuid, device_uuid) + if device_uuid not in self.devices: + self.devices[device_uuid] = DeviceComposer(device_uuid) + return self.devices[device_uuid] + + def configure(self, service_obj : Service, settings : Optional[TreeNode]) -> None: + self.objekt = service_obj + if settings is None: + raise Exception('Service settings are required to extract vlan_id') + json_settings : Dict = settings.value or dict() + + if 'vlan_id' in json_settings: + self.vlan_id = _safe_int(json_settings['vlan_id']) + elif 'vlan-id' in json_settings: + self.vlan_id = _safe_int(json_settings['vlan-id']) + else: + MSG = 'VLAN ID not found. Tried: vlan_id and vlan-id. service_obj={:s} settings={:s}' + raise Exception(MSG.format(grpc_message_to_json_string(service_obj), str(settings))) + + if self.vlan_id is None: + MSG = 'Invalid VLAN ID value in service settings: {:s}' + raise Exception(MSG.format(str(json_settings))) + + access_vlan_tagged = json_settings.get('access_vlan_tagged', json_settings.get('access-vlan-tagged')) + if access_vlan_tagged is None: + self.access_vlan_tagged = False + else: + parsed = _safe_bool(access_vlan_tagged) + if parsed is None: + MSG = 'Invalid access_vlan_tagged value in service settings: {:s}' + LOGGER.warning(MSG.format(str(access_vlan_tagged))) + self.access_vlan_tagged = False + else: + self.access_vlan_tagged = parsed + + def get_config_rules( + self, network_instance_name : str = NETWORK_INSTANCE, delete : bool = False + ) -> Dict[str, List[Dict]]: + if self.vlan_id is None: + raise Exception('VLAN ID must be configured at service level before composing rules') + + return { + device_uuid : device.get_config_rules( + network_instance_name, self.vlan_id, + access_vlan_tagged=self.access_vlan_tagged, delete=delete + ) + for device_uuid, device in self.devices.items() + } + + def dump(self) -> Dict: + return { + 'devices' : { + device_uuid : device.dump() + for device_uuid, device in self.devices.items() + }, + 'vlan_id': self.vlan_id, + 'access_vlan_tagged': self.access_vlan_tagged, + } diff --git a/src/service/service/service_handlers/l2nm_gnmi_openconfig/L2NMGnmiOpenConfigServiceHandler.py b/src/service/service/service_handlers/l2nm_gnmi_openconfig/L2NMGnmiOpenConfigServiceHandler.py new file mode 100644 index 0000000000000000000000000000000000000000..baa164afa9baaa9ae5be3626a2c7fd6aaf9c086f --- /dev/null +++ b/src/service/service/service_handlers/l2nm_gnmi_openconfig/L2NMGnmiOpenConfigServiceHandler.py @@ -0,0 +1,189 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging +from typing import Any, Dict, List, Optional, Tuple, Union +from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method +from common.proto.context_pb2 import ConfigRule, ConnectionId, DeviceId, Service +from common.tools.object_factory.Connection import json_connection_id +from common.tools.object_factory.Device import json_device_id +from common.type_checkers.Checkers import chk_type +from service.service.service_handler_api._ServiceHandler import _ServiceHandler +from service.service.service_handler_api.SettingsHandler import SettingsHandler +from service.service.service_handler_api.Tools import get_device_endpoint_uuids, get_endpoint_matching +from service.service.task_scheduler.TaskExecutor import TaskExecutor +from service.service.tools.EndpointIdFormatters import endpointids_to_raw +from .ConfigRuleComposer import ConfigRuleComposer +from .VlanIdPropagator import VlanIdPropagator + +LOGGER = logging.getLogger(__name__) + +METRICS_POOL = MetricsPool('Service', 'Handler', labels={'handler': 'l2nm_gnmi_openconfig'}) + +class L2NMGnmiOpenConfigServiceHandler(_ServiceHandler): + def __init__( # pylint: disable=super-init-not-called + self, service : Service, task_executor : TaskExecutor, **settings + ) -> None: + self.__service = service + self.__task_executor = task_executor + self.__settings_handler = SettingsHandler(service.service_config, **settings) + self.__config_rule_composer = ConfigRuleComposer() + self.__vlan_id_propagator = VlanIdPropagator(self.__config_rule_composer) + self.__endpoint_map : Dict[Tuple[str, str], Tuple[str, str]] = dict() + + def _compose_config_rules(self, endpoints : List[Tuple[str, str, Optional[str]]]) -> None: + if len(endpoints) % 2 != 0: raise Exception('Number of endpoints should be even') + + service_settings = self.__settings_handler.get_service_settings() + self.__config_rule_composer.configure(self.__service, service_settings) + + for endpoint in endpoints: + device_uuid, endpoint_uuid = get_device_endpoint_uuids(endpoint) + + device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) + device_settings = self.__settings_handler.get_device_settings(device_obj) + self.__config_rule_composer.set_device_alias(device_obj.name, device_uuid) + _device = self.__config_rule_composer.get_device(device_obj.name) + _device.configure(device_obj, device_settings) + + endpoint_obj = get_endpoint_matching(device_obj, endpoint_uuid) + endpoint_settings = self.__settings_handler.get_endpoint_settings(device_obj, endpoint_obj) + _device.set_endpoint_alias(endpoint_obj.name, endpoint_uuid) + _endpoint = _device.get_endpoint(endpoint_obj.name) + _endpoint.configure(endpoint_obj, endpoint_settings) + + self.__endpoint_map[(device_uuid, endpoint_uuid)] = (device_obj.name, endpoint_obj.name) + + MSG = '[pre] config_rule_composer = {:s}' + LOGGER.debug(MSG.format(json.dumps(self.__config_rule_composer.dump()))) + + self.__vlan_id_propagator.compose(endpoints) + + MSG = '[post] config_rule_composer = {:s}' + LOGGER.debug(MSG.format(json.dumps(self.__config_rule_composer.dump()))) + + def _do_configurations( + self, config_rules_per_device : Dict[str, List[Dict]], endpoints : List[Tuple[str, str, Optional[str]]], + delete : bool = False + ) -> List[Union[bool, Exception]]: + # Configuration is done atomically on each device, all OK / all KO per device + results_per_device = dict() + for device_name,json_config_rules in config_rules_per_device.items(): + try: + device_obj = self.__config_rule_composer.get_device(device_name).objekt + if len(json_config_rules) == 0: continue + del device_obj.device_config.config_rules[:] + for json_config_rule in json_config_rules: + device_obj.device_config.config_rules.append(ConfigRule(**json_config_rule)) + self.__task_executor.configure_device(device_obj) + results_per_device[device_name] = True + except Exception as e: # pylint: disable=broad-exception-caught + verb = 'deconfigure' if delete else 'configure' + MSG = 'Unable to {:s} Device({:s}) : ConfigRules({:s})' + LOGGER.exception(MSG.format(verb, str(device_name), str(json_config_rules))) + results_per_device[device_name] = e + + results = [] + for endpoint in endpoints: + device_uuid, endpoint_uuid = get_device_endpoint_uuids(endpoint) + device_name, _ = self.__endpoint_map[(device_uuid, endpoint_uuid)] + if device_name not in results_per_device: continue + results.append(results_per_device[device_name]) + return results + + @metered_subclass_method(METRICS_POOL) + def SetEndpoint( + self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None + ) -> List[Union[bool, Exception]]: + chk_type('endpoints', endpoints, list) + if len(endpoints) == 0: return [] + #service_uuid = self.__service.service_id.service_uuid.uuid + connection = self.__task_executor.get_connection(ConnectionId(**json_connection_id(connection_uuid))) + connection_endpoint_ids = endpointids_to_raw(connection.path_hops_endpoint_ids) + self._compose_config_rules(connection_endpoint_ids) + #network_instance_name = service_uuid.split('-')[0] + #config_rules_per_device = self.__config_rule_composer.get_config_rules(network_instance_name, delete=False) + config_rules_per_device = self.__config_rule_composer.get_config_rules(delete=False) + LOGGER.debug('config_rules_per_device={:s}'.format(json.dumps(config_rules_per_device))) + results = self._do_configurations(config_rules_per_device, endpoints, delete=False) + LOGGER.debug('results={:s}'.format(str(results))) + return results + + @metered_subclass_method(METRICS_POOL) + def DeleteEndpoint( + self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None + ) -> List[Union[bool, Exception]]: + chk_type('endpoints', endpoints, list) + if len(endpoints) == 0: return [] + #service_uuid = self.__service.service_id.service_uuid.uuid + connection = self.__task_executor.get_connection(ConnectionId(**json_connection_id(connection_uuid))) + connection_endpoint_ids = endpointids_to_raw(connection.path_hops_endpoint_ids) + self._compose_config_rules(connection_endpoint_ids) + #network_instance_name = service_uuid.split('-')[0] + #config_rules_per_device = self.__config_rule_composer.get_config_rules(network_instance_name, delete=True) + config_rules_per_device = self.__config_rule_composer.get_config_rules(delete=True) + LOGGER.debug('config_rules_per_device={:s}'.format(json.dumps(config_rules_per_device))) + results = self._do_configurations(config_rules_per_device, endpoints, delete=True) + LOGGER.debug('results={:s}'.format(str(results))) + return results + + @metered_subclass_method(METRICS_POOL) + def SetConstraint(self, constraints : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + chk_type('constraints', constraints, list) + if len(constraints) == 0: return [] + + msg = '[SetConstraint] Method not implemented. Constraints({:s}) are being ignored.' + LOGGER.warning(msg.format(str(constraints))) + return [True for _ in range(len(constraints))] + + @metered_subclass_method(METRICS_POOL) + def DeleteConstraint(self, constraints : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + chk_type('constraints', constraints, list) + if len(constraints) == 0: return [] + + msg = '[DeleteConstraint] Method not implemented. Constraints({:s}) are being ignored.' + LOGGER.warning(msg.format(str(constraints))) + return [True for _ in range(len(constraints))] + + @metered_subclass_method(METRICS_POOL) + def SetConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + chk_type('resources', resources, list) + if len(resources) == 0: return [] + + results = [] + for resource in resources: + try: + resource_value = json.loads(resource[1]) + self.__settings_handler.set(resource[0], resource_value) + results.append(True) + except Exception as e: # pylint: disable=broad-except + LOGGER.exception('Unable to SetConfig({:s})'.format(str(resource))) + results.append(e) + + return results + + @metered_subclass_method(METRICS_POOL) + def DeleteConfig(self, resources : List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + chk_type('resources', resources, list) + if len(resources) == 0: return [] + + results = [] + for resource in resources: + try: + self.__settings_handler.delete(resource[0]) + except Exception as e: # pylint: disable=broad-except + LOGGER.exception('Unable to DeleteConfig({:s})'.format(str(resource))) + results.append(e) + + return results diff --git a/src/service/service/service_handlers/l2nm_gnmi_openconfig/VlanIdPropagator.py b/src/service/service/service_handlers/l2nm_gnmi_openconfig/VlanIdPropagator.py new file mode 100644 index 0000000000000000000000000000000000000000..69e2afb62a6f4a4543269cba59e03ebc1fdd3cb1 --- /dev/null +++ b/src/service/service/service_handlers/l2nm_gnmi_openconfig/VlanIdPropagator.py @@ -0,0 +1,87 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging +from typing import List, Optional, Tuple +from common.DeviceTypes import DeviceTypeEnum +from .ConfigRuleComposer import ConfigRuleComposer + +LOGGER = logging.getLogger(__name__) + +class VlanIdPropagator: + def __init__(self, config_rule_composer : ConfigRuleComposer) -> None: + self._config_rule_composer = config_rule_composer + self._router_types = { + DeviceTypeEnum.PACKET_ROUTER.value, + DeviceTypeEnum.EMULATED_PACKET_ROUTER.value, + DeviceTypeEnum.PACKET_POP.value, + DeviceTypeEnum.PACKET_RADIO_ROUTER.value, + DeviceTypeEnum.EMULATED_PACKET_RADIO_ROUTER.value, + } + + def _is_router_device(self, device) -> bool: + return device.objekt is not None and device.objekt.device_type in self._router_types + + def compose(self, connection_hop_list : List[Tuple[str, str, Optional[str]]]) -> None: + link_endpoints = self._compute_link_endpoints(connection_hop_list) + LOGGER.debug('link_endpoints = {:s}'.format(str(link_endpoints))) + + self._propagate_vlan_id(link_endpoints) + LOGGER.debug('config_rule_composer = {:s}'.format(json.dumps(self._config_rule_composer.dump()))) + + def _compute_link_endpoints( + self, connection_hop_list : List[Tuple[str, str, Optional[str]]] + ) -> List[Tuple[Tuple[str, str, Optional[str]], Tuple[str, str, Optional[str]]]]: + # In some cases connection_hop_list might contain repeated endpoints, remove them here. + added_connection_hops = set() + filtered_connection_hop_list = list() + for connection_hop in connection_hop_list: + if connection_hop in added_connection_hops: continue + filtered_connection_hop_list.append(connection_hop) + added_connection_hops.add(connection_hop) + connection_hop_list = filtered_connection_hop_list + + # In some cases connection_hop_list first and last items might be internal endpoints of + # devices instead of link endpoints. Filter those endpoints not reaching a new device. + if len(connection_hop_list) > 2 and connection_hop_list[0][0] == connection_hop_list[1][0]: + # same device on first 2 endpoints + connection_hop_list = connection_hop_list[1:] + if len(connection_hop_list) > 2 and connection_hop_list[-1][0] == connection_hop_list[-2][0]: + # same device on last 2 endpoints + connection_hop_list = connection_hop_list[:-1] + + num_connection_hops = len(connection_hop_list) + if num_connection_hops % 2 != 0: raise Exception('Number of connection hops must be even') + if num_connection_hops < 4: raise Exception('Number of connection hops must be >= 4') + + it_connection_hops = iter(connection_hop_list) + return list(zip(it_connection_hops, it_connection_hops)) + + def _propagate_vlan_id( + self, link_endpoints_list : List[Tuple[Tuple[str, str, Optional[str]], Tuple[str, str, Optional[str]]]] + ) -> None: + for link_endpoints in link_endpoints_list: + device_endpoint_a, device_endpoint_b = link_endpoints + + device_uuid_a, endpoint_uuid_a = device_endpoint_a[0:2] + device_a = self._config_rule_composer.get_device(device_uuid_a) + endpoint_a = device_a.get_endpoint(endpoint_uuid_a) + + device_uuid_b, endpoint_uuid_b = device_endpoint_b[0:2] + device_b = self._config_rule_composer.get_device(device_uuid_b) + endpoint_b = device_b.get_endpoint(endpoint_uuid_b) + + if self._is_router_device(device_a) and self._is_router_device(device_b): + endpoint_a.set_force_trunk() + endpoint_b.set_force_trunk() diff --git a/src/service/service/service_handlers/l2nm_gnmi_openconfig/__init__.py b/src/service/service/service_handlers/l2nm_gnmi_openconfig/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3ccc21c7db78aac26daa1f8c5ff8e1ffd3f35460 --- /dev/null +++ b/src/service/service/service_handlers/l2nm_gnmi_openconfig/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/service/service/service_handlers/l3nm_gnmi_openconfig/ConfigRuleComposer.py b/src/service/service/service_handlers/l3nm_gnmi_openconfig/ConfigRuleComposer.py index cf0eacab515d86e9ec9bc418adeb91f720d0b376..6857bce610ce63248e3f9c25d4bc4c1c853ca358 100644 --- a/src/service/service/service_handlers/l3nm_gnmi_openconfig/ConfigRuleComposer.py +++ b/src/service/service/service_handlers/l3nm_gnmi_openconfig/ConfigRuleComposer.py @@ -16,6 +16,7 @@ import json, logging, netaddr, re from typing import Dict, List, Optional, Set, Tuple from common.DeviceTypes import DeviceTypeEnum from common.proto.context_pb2 import ConfigActionEnum, Device, EndPoint, Service +from common.tools.grpc.Tools import grpc_message_to_json_string from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set from service.service.service_handler_api.AnyTreeTools import TreeNode @@ -94,7 +95,7 @@ class EndpointComposer: self.ipv4_address = json_settings['ip_address'] else: MSG = 'IP Address not found. Tried: address_ip and ip_address. endpoint_obj={:s} settings={:s}' - LOGGER.warning(MSG.format(str(endpoint_obj), str(settings))) + LOGGER.warning(MSG.format(grpc_message_to_json_string(endpoint_obj), str(settings))) if 'address_prefix' in json_settings: self.ipv4_prefix_len = json_settings['address_prefix'] @@ -102,7 +103,7 @@ class EndpointComposer: self.ipv4_prefix_len = json_settings['prefix_length'] else: MSG = 'IP Address Prefix not found. Tried: address_prefix and prefix_length. endpoint_obj={:s} settings={:s}' - LOGGER.warning(MSG.format(str(endpoint_obj), str(settings))) + LOGGER.warning(MSG.format(grpc_message_to_json_string(endpoint_obj), str(settings))) self.sub_interface_index = json_settings.get('index', 0) diff --git a/src/service/service/task_scheduler/TaskExecutor.py b/src/service/service/task_scheduler/TaskExecutor.py index 731db0011d3561c3d713780a88a701ea74f436b4..e2709d9bca485db317d6469f183621e74dcc8ae5 100644 --- a/src/service/service/task_scheduler/TaskExecutor.py +++ b/src/service/service/task_scheduler/TaskExecutor.py @@ -333,7 +333,14 @@ class TaskExecutor: #controllers.setdefault(device_type, dict())[controller.device_id.device_uuid.uuid] = controller devices.setdefault(device_type, dict())[controller.device_id.device_uuid.uuid] = controller - LOGGER.debug('[get_devices_from_connection] devices = {:s}'.format(str(devices))) + plain_devices = { + device_type : { + device_uuid : grpc_message_to_json_string(device_grpc) + for device_uuid, device_grpc in device_dict.items() + } + for device_type, device_dict in devices.items() + } + LOGGER.debug('[get_devices_from_connection] devices = {:s}'.format(str(plain_devices))) #LOGGER.debug('[get_devices_from_connection] controllers = {:s}'.format(str(controllers))) #if len(devices) == 0 and len(controllers) > 0: # return controllers @@ -375,7 +382,17 @@ class TaskExecutor: controller_uuid = controller.device_id.device_uuid.uuid devices.setdefault(device_type, dict())[controller_uuid] = (controller, controller_drivers) - LOGGER.debug('[get_devices_from_connection] devices = {:s}'.format(str(devices))) + plain_devices = { + device_type : { + device_uuid : { + 'grpc_object' : grpc_message_to_json_string(device_grpc), + 'device_drivers' : list(device_drivers) + } + for device_uuid, (device_grpc, device_drivers) in device_dict.items() + } + for device_type, device_dict in devices.items() + } + LOGGER.debug('[get_device_type_drivers_for_connection] devices = {:s}'.format(str(plain_devices))) return devices diff --git a/src/tests/.gitlab-ci.yml b/src/tests/.gitlab-ci.yml index 287d698c4630e73ee5c282e9f6ce707b60c6fe28..267d7ac23af5942bdec5065292e33b0b3b537d9d 100644 --- a/src/tests/.gitlab-ci.yml +++ b/src/tests/.gitlab-ci.yml @@ -27,6 +27,7 @@ include: - local: '/src/tests/ryu-openflow/.gitlab-ci.yml' - local: '/src/tests/qkd_end2end/.gitlab-ci.yml' - local: '/src/tests/acl_end2end/.gitlab-ci.yml' + - local: '/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml' - local: '/src/tests/tools/mock_tfs_nbi_dependencies/.gitlab-ci.yml' - local: '/src/tests/tools/mock_qkd_node/.gitlab-ci.yml' diff --git a/src/tests/eucnc24/.gitlab-ci.yml b/src/tests/eucnc24/.gitlab-ci.yml index ee99ea2715a777c976b4102c01907a34fbe97c4f..a4fbac0ab6dd368eb5dbd7ebced7605f8519a717 100644 --- a/src/tests/eucnc24/.gitlab-ci.yml +++ b/src/tests/eucnc24/.gitlab-ci.yml @@ -130,7 +130,7 @@ end2end_test eucnc24: - cp -R src/tests/${TEST_NAME}/clab/* /tmp/clab/${TEST_NAME} - tree -la /tmp/clab/${TEST_NAME} - cd /tmp/clab/${TEST_NAME} - - containerlab deploy --reconfigure --topo eucnc24.clab.yml + - containerlab deploy --reconfigure --topo ${TEST_NAME}.clab.yml - cd $RUNNER_PATH # Wait for initialization of Device NOSes @@ -138,9 +138,9 @@ end2end_test eucnc24: - docker ps -a # Dump configuration of the routers (before any configuration) - - containerlab exec --name eucnc24 --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" - - containerlab exec --name eucnc24 --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" - - containerlab exec --name eucnc24 --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" # Configure TeraFlowSDN deployment # Uncomment if DEBUG log level is needed for the components @@ -198,27 +198,27 @@ end2end_test eucnc24: $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-tfs-create.sh # Dump configuration of the routers (after configure TFS service) - - containerlab exec --name eucnc24 --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" - - containerlab exec --name eucnc24 --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" - - containerlab exec --name eucnc24 --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" # Run end-to-end test: test connectivity with ping - - export TEST1_10=$(containerlab exec --name eucnc24 --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.1.10' --format json) + - export TEST1_10=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.1.10' --format json) - echo $TEST1_10 - echo $TEST1_10 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' - - export TEST1_1=$(containerlab exec --name eucnc24 --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.1.1' --format json) + - export TEST1_1=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.1.1' --format json) - echo $TEST1_1 - echo $TEST1_1 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' - - export TEST2_1=$(containerlab exec --name eucnc24 --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.2.1' --format json) + - export TEST2_1=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.2.1' --format json) - echo $TEST2_1 - echo $TEST2_1 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' - - export TEST2_10=$(containerlab exec --name eucnc24 --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.2.10' --format json) + - export TEST2_10=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.2.10' --format json) - echo $TEST2_10 - echo $TEST2_10 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' - - export TEST3_1=$(containerlab exec --name eucnc24 --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.3.1' --format json) + - export TEST3_1=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.3.1' --format json) - echo $TEST3_1 - echo $TEST3_1 | grep -E '3 packets transmitted, 0 received, 100\% packet loss' - - export TEST3_10=$(containerlab exec --name eucnc24 --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.3.10' --format json) + - export TEST3_10=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.3.10' --format json) - echo $TEST3_10 - echo $TEST3_10 | grep -E '3 packets transmitted, 0 received, 100\% packet loss' @@ -230,9 +230,9 @@ end2end_test eucnc24: $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-tfs-remove.sh # Dump configuration of the routers (after deconfigure TFS service) - - containerlab exec --name eucnc24 --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" - - containerlab exec --name eucnc24 --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" - - containerlab exec --name eucnc24 --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" # Run end-to-end test: configure service IETF - > @@ -242,27 +242,27 @@ end2end_test eucnc24: $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-ietf-create.sh # Dump configuration of the routers (after configure IETF service) - - containerlab exec --name eucnc24 --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" - - containerlab exec --name eucnc24 --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" - - containerlab exec --name eucnc24 --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" # Run end-to-end test: test connectivity with ping - - export TEST1_10=$(containerlab exec --name eucnc24 --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.1.10' --format json) + - export TEST1_10=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.1.10' --format json) - echo $TEST1_10 - echo $TEST1_10 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' - - export TEST1_1=$(containerlab exec --name eucnc24 --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.1.1' --format json) + - export TEST1_1=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.1.1' --format json) - echo $TEST1_1 - echo $TEST1_1 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' - - export TEST2_1=$(containerlab exec --name eucnc24 --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.2.1' --format json) + - export TEST2_1=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.2.1' --format json) - echo $TEST2_1 - echo $TEST2_1 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' - - export TEST2_10=$(containerlab exec --name eucnc24 --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.2.10' --format json) + - export TEST2_10=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.2.10' --format json) - echo $TEST2_10 - echo $TEST2_10 | grep -E '3 packets transmitted, 3 received, 0\% packet loss' - - export TEST3_1=$(containerlab exec --name eucnc24 --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.3.1' --format json) + - export TEST3_1=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.3.1' --format json) - echo $TEST3_1 - echo $TEST3_1 | grep -E '3 packets transmitted, 0 received, 100\% packet loss' - - export TEST3_10=$(containerlab exec --name eucnc24 --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.3.10' --format json) + - export TEST3_10=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=dc1 --cmd 'ping -n -c3 172.16.3.10' --format json) - echo $TEST3_10 - echo $TEST3_10 | grep -E '3 packets transmitted, 0 received, 100\% packet loss' @@ -274,9 +274,9 @@ end2end_test eucnc24: $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-ietf-remove.sh # Dump configuration of the routers (after deconfigure IETF service) - - containerlab exec --name eucnc24 --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" - - containerlab exec --name eucnc24 --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" - - containerlab exec --name eucnc24 --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" # Run end-to-end test: cleanup scenario - > @@ -287,9 +287,9 @@ end2end_test eucnc24: after_script: # Dump configuration of the routers (on after_script) - - containerlab exec --name eucnc24 --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" - - containerlab exec --name eucnc24 --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" - - containerlab exec --name eucnc24 --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" # Dump TeraFlowSDN component logs - source src/tests/${TEST_NAME}/deploy_specs.sh @@ -303,8 +303,8 @@ end2end_test eucnc24: - RUNNER_PATH=`pwd` #- cd $PWD/src/tests/${TEST_NAME} - cd /tmp/clab/${TEST_NAME} - - containerlab destroy --topo eucnc24.clab.yml --cleanup || true - - sudo rm -rf clab-eucnc24/ .eucnc24.clab.yml.bak || true + - containerlab destroy --topo ${TEST_NAME}.clab.yml --cleanup || true + - sudo rm -rf clab-${TEST_NAME}/ .${TEST_NAME}.clab.yml.bak || true - cd $RUNNER_PATH - kubectl delete namespaces tfs || true - docker ps --all --quiet | xargs --no-run-if-empty docker stop diff --git a/src/tests/eucnc24/tests/Fixtures.py b/src/tests/eucnc24/tests/Fixtures.py index 5997e58c8100d8b89f2d9287fc57b9b3d1434ac4..aa37459a10e188723c9173b9258daf2372204e4d 100644 --- a/src/tests/eucnc24/tests/Fixtures.py +++ b/src/tests/eucnc24/tests/Fixtures.py @@ -15,29 +15,22 @@ import pytest from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient -from monitoring.client.MonitoringClient import MonitoringClient from service.client.ServiceClient import ServiceClient @pytest.fixture(scope='session') -def context_client(): +def context_client() -> ContextClient: _client = ContextClient() yield _client _client.close() @pytest.fixture(scope='session') -def device_client(): +def device_client() -> DeviceClient: _client = DeviceClient() yield _client _client.close() @pytest.fixture(scope='session') -def monitoring_client(): - _client = MonitoringClient() - yield _client - _client.close() - -@pytest.fixture(scope='session') -def service_client(): +def service_client() -> ServiceClient: _client = ServiceClient() yield _client _client.close() diff --git a/src/tests/eucnc24/tests/test_service_ietf_create.py b/src/tests/eucnc24/tests/test_service_ietf_create.py index f3a68801df4e9e0070d45ea4a398a5e766786ec4..c1d761f403b5fc0268ee819401f0f1c6a0536f77 100644 --- a/src/tests/eucnc24/tests/test_service_ietf_create.py +++ b/src/tests/eucnc24/tests/test_service_ietf_create.py @@ -13,7 +13,6 @@ # limitations under the License. import json, logging, os -from typing import Dict from common.Constants import DEFAULT_CONTEXT_NAME from common.proto.context_pb2 import ContextId, ServiceStatusEnum, ServiceTypeEnum from common.tools.grpc.Tools import grpc_message_to_json_string diff --git a/src/tests/eucnc24/tests/test_service_ietf_remove.py b/src/tests/eucnc24/tests/test_service_ietf_remove.py index 2c39208248700c2183871c1a4954ef997253a954..d0dad7a2d7ad2192fcd1344dd4dafcaf92eaec48 100644 --- a/src/tests/eucnc24/tests/test_service_ietf_remove.py +++ b/src/tests/eucnc24/tests/test_service_ietf_remove.py @@ -13,7 +13,7 @@ # limitations under the License. import logging, os -from typing import Dict, Set, Tuple +from typing import Set from common.Constants import DEFAULT_CONTEXT_NAME from common.proto.context_pb2 import ContextId, ServiceStatusEnum, ServiceTypeEnum from common.tools.grpc.Tools import grpc_message_to_json_string @@ -41,12 +41,16 @@ def test_service_ietf_removal( # Check there are no slices response = context_client.ListSlices(ADMIN_CONTEXT_ID) - LOGGER.warning('Slices[{:d}] = {:s}'.format(len(response.slices), grpc_message_to_json_string(response))) + LOGGER.warning('Slices[{:d}] = {:s}'.format( + len(response.slices), grpc_message_to_json_string(response) + )) assert len(response.slices) == 0 # Check there is 1 service response = context_client.ListServices(ADMIN_CONTEXT_ID) - LOGGER.warning('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) + LOGGER.warning('Services[{:d}] = {:s}'.format( + len(response.services), grpc_message_to_json_string(response) + )) assert len(response.services) == 1 service_uuids : Set[str] = set() @@ -66,7 +70,7 @@ def test_service_ietf_removal( # Identify service to delete assert len(service_uuids) == 1 - service_uuid = set(service_uuids).pop() + service_uuid = service_uuids.pop() URL = '/restconf/data/ietf-l3vpn-svc:l3vpn-svc/vpn-services/vpn-service={:s}/'.format(service_uuid) do_rest_delete_request(URL, logger=LOGGER, expected_status_codes={204}) diff --git a/src/tests/l2_vpn_gnmi_oc/.gitignore b/src/tests/l2_vpn_gnmi_oc/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..a47dc9eff49108f99e8869a9ea8981c1d6b321c1 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/.gitignore @@ -0,0 +1,19 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +clab-*/ +images/ +*.clab.yml.bak +*.tar +*.tar.gz diff --git a/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml b/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml new file mode 100644 index 0000000000000000000000000000000000000000..310770df5b377de818f2b5ed5cbe2ff6b76c430a --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/.gitlab-ci.yml @@ -0,0 +1,340 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Build, tag, and push the Docker image to the GitLab Docker registry +build l2_vpn_gnmi_oc: + variables: + TEST_NAME: 'l2_vpn_gnmi_oc' + stage: build + before_script: + - docker image prune --force + - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY + script: + - docker buildx build -t "${TEST_NAME}:latest" -f ./src/tests/${TEST_NAME}/Dockerfile . + - docker tag "${TEST_NAME}:latest" "$CI_REGISTRY_IMAGE/${TEST_NAME}:latest" + - docker push "$CI_REGISTRY_IMAGE/${TEST_NAME}:latest" + after_script: + - docker image prune --force + rules: + - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' + - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' + - changes: + - src/common/**/*.py + - proto/*.proto + - src/tests/${TEST_NAME}/**/*.{py,in,sh,yml} + - src/tests/${TEST_NAME}/Dockerfile + - .gitlab-ci.yml + +# Deploy TeraFlowSDN and Execute end-2-end test +end2end_test l2_vpn_gnmi_oc: + timeout: 45m + variables: + TEST_NAME: 'l2_vpn_gnmi_oc' + stage: end2end_test + # Disable to force running it after all other tasks + #needs: + # - build l2_vpn_gnmi_oc + before_script: + # Cleanup old ContainerLab scenarios + - containerlab destroy --all --cleanup || true + + # Do Docker cleanup + - docker ps --all --quiet | xargs --no-run-if-empty docker stop + - docker container prune --force + - docker ps --all --quiet | xargs --no-run-if-empty docker rm --force + - docker image prune --force + - docker network prune --force + - docker volume prune --all --force + - docker buildx prune --force + + # Check MicroK8s is ready + - microk8s status --wait-ready + - LOOP_MAX_ATTEMPTS=10 + - LOOP_COUNTER=0 + - > + while ! kubectl get pods --all-namespaces &> /dev/null; do + printf "%c" "." + sleep 1 + LOOP_COUNTER=$((LOOP_COUNTER + 1)) + if [ "$LOOP_COUNTER" -ge "$LOOP_MAX_ATTEMPTS" ]; then + echo "Max attempts reached, exiting the loop." + exit 1 + fi + done + - kubectl get pods --all-namespaces + + # Always delete Kubernetes namespaces + - export K8S_NAMESPACES=$(kubectl get namespace -o jsonpath='{.items[*].metadata.name}') + - echo "K8S_NAMESPACES=${K8S_NAMESPACES}" + + - export OLD_NATS_NAMESPACES=$(echo "${K8S_NAMESPACES}" | tr ' ' '\n' | grep -E '^nats') + - echo "OLD_NATS_NAMESPACES=${OLD_NATS_NAMESPACES}" + - > + for ns in ${OLD_NATS_NAMESPACES}; do + if [[ "$ns" == nats* ]]; then + if helm3 status "$ns" &>/dev/null; then + helm3 uninstall "$ns" -n "$ns" + else + echo "Release '$ns' not found, skipping..." + fi + fi + done + - export OLD_NAMESPACES=$(echo "${K8S_NAMESPACES}" | tr ' ' '\n' | grep -E '^(tfs|crdb|qdb|kafka|nats)') + - echo "OLD_NAMESPACES=${OLD_NAMESPACES}" + - kubectl delete namespace ${OLD_NAMESPACES} || true + + # Clean-up Kubernetes Failed pods + - > + kubectl get pods --all-namespaces --no-headers --field-selector=status.phase=Failed + -o custom-columns=NAMESPACE:.metadata.namespace,NAME:.metadata.name | + xargs --no-run-if-empty --max-args=2 kubectl delete pod --namespace + + # Login Docker repository + - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY + + script: + # Download Docker image to run the test + - docker pull "${CI_REGISTRY_IMAGE}/${TEST_NAME}:latest" + + # Check MicroK8s is ready + - microk8s status --wait-ready + - LOOP_MAX_ATTEMPTS=10 + - LOOP_COUNTER=0 + - > + while ! kubectl get pods --all-namespaces &> /dev/null; do + printf "%c" "." + sleep 1 + LOOP_COUNTER=$((LOOP_COUNTER + 1)) + if [ "$LOOP_COUNTER" -ge "$LOOP_MAX_ATTEMPTS" ]; then + echo "Max attempts reached, exiting the loop." + exit 1 + fi + done + - kubectl get pods --all-namespaces + + # Deploy ContainerLab Scenario + - RUNNER_PATH=`pwd` + #- cd $PWD/src/tests/${TEST_NAME} + - mkdir -p /tmp/clab/${TEST_NAME} + - cp -R src/tests/${TEST_NAME}/clab/* /tmp/clab/${TEST_NAME} + - tree -la /tmp/clab/${TEST_NAME} + - cd /tmp/clab/${TEST_NAME} + - containerlab deploy --reconfigure --topo ${TEST_NAME}.clab.yml + - cd $RUNNER_PATH + + # Wait for initialization of Device NOSes + - sleep 3 + - docker ps -a + + # Dump configuration of the routers (before any configuration) + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + + # Configure TeraFlowSDN deployment + # Uncomment if DEBUG log level is needed for the components + #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/contextservice.yaml + #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/deviceservice.yaml + #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="frontend").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/pathcompservice.yaml + #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/serviceservice.yaml + #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/nbiservice.yaml + + - source src/tests/${TEST_NAME}/deploy_specs.sh + #- export TFS_REGISTRY_IMAGES="${CI_REGISTRY_IMAGE}" + #- export TFS_SKIP_BUILD="YES" + #- export TFS_IMAGE_TAG="latest" + #- echo "TFS_REGISTRY_IMAGES=${CI_REGISTRY_IMAGE}" + + # Deploy TeraFlowSDN + - ./deploy/crdb.sh + - ./deploy/nats.sh + - ./deploy/kafka.sh + #- ./deploy/qdb.sh + - ./deploy/tfs.sh + - ./deploy/show.sh + + ## Wait for Context to be subscribed to NATS + ## WARNING: this loop is infinite if there is no subscriber (such as monitoring). + ## Investigate if we can use a counter to limit the number of iterations. + ## For now, keep it commented out. + #- LOOP_MAX_ATTEMPTS=180 + #- LOOP_COUNTER=0 + #- > + # while ! kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server 2>&1 | grep -q 'Subscriber is Ready? True'; do + # echo "Attempt: $LOOP_COUNTER" + # kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server 2>&1; + # sleep 1; + # LOOP_COUNTER=$((LOOP_COUNTER + 1)) + # if [ "$LOOP_COUNTER" -ge "$LOOP_MAX_ATTEMPTS" ]; then + # echo "Max attempts reached, exiting the loop." + # break + # fi + # done + - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server + + - | + ping_check() { + local SRC=$1 DST_IP=$2 PATTERN=$3 + local OUTPUT + OUTPUT=$(containerlab exec --name ${TEST_NAME} --label clab-node-name=${SRC} --cmd "ping -n -c3 ${DST_IP}" --format json) + echo "$OUTPUT" + if echo "$OUTPUT" | grep -E "$PATTERN" >/dev/null; then + echo "PASSED ${SRC}->${DST_IP} + else + echo "FAILED ${SRC}->${DST_IP} + fi + echo "$OUTPUT" | grep -E "$PATTERN" + } + + # Run end-to-end test: onboard scenario + - > + docker run -t --rm --name ${TEST_NAME} --network=host + --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh" + --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" + $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-onboarding.sh + + # Dump configuration of the routers (after configure TFS service) + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + + # Run end-to-end test: test no connectivity with ping + - ping_check "dc1" "172.16.1.10" "3 packets transmitted, 3 received, 0% packet loss" + - ping_check "dc1" "172.16.1.20" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss" + - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss" + + # Run end-to-end test: configure service TFS + - > + docker run -t --rm --name ${TEST_NAME} --network=host + --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh" + --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" + $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-tfs-create.sh + + # Give time to routers for being configured and stabilized + - sleep 60 + + # Dump configuration of the routers (after configure TFS service) + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + + # Run end-to-end test: test connectivity with ping + - ping_check "dc1" "172.16.1.10" "3 packets transmitted, 3 received, 0% packet loss" + - ping_check "dc1" "172.16.1.20" "3 packets transmitted, 3 received, 0% packet loss" + - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss" + + # Run end-to-end test: deconfigure service TFS + - > + docker run -t --rm --name ${TEST_NAME} --network=host + --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh" + --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" + $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-tfs-remove.sh + + # Give time to routers for being configured and stabilized + - sleep 60 + + # Dump configuration of the routers (after deconfigure TFS service) + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + + # Run end-to-end test: test no connectivity with ping + - ping_check "dc1" "172.16.1.10" "3 packets transmitted, 3 received, 0% packet loss" + - ping_check "dc1" "172.16.1.20" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss" + - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss" + + # Run end-to-end test: configure service IETF + - > + docker run -t --rm --name ${TEST_NAME} --network=host + --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh" + --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" + $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-ietf-create.sh + + # Give time to routers for being configured and stabilized + - sleep 60 + + # Dump configuration of the routers (after configure IETF service) + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + + # Run end-to-end test: test connectivity with ping + - ping_check "dc1" "172.16.1.10" "3 packets transmitted, 3 received, 0% packet loss" + - ping_check "dc1" "172.16.1.20" "3 packets transmitted, 3 received, 0% packet loss" + - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss" + + # Run end-to-end test: deconfigure service IETF + - > + docker run -t --rm --name ${TEST_NAME} --network=host + --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh" + --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" + $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-service-ietf-remove.sh + + # Give time to routers for being configured and stabilized + - sleep 60 + + # Dump configuration of the routers (after deconfigure IETF service) + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + + # Run end-to-end test: test no connectivity with ping + - ping_check "dc1" "172.16.1.10" "3 packets transmitted, 3 received, 0% packet loss" + - ping_check "dc1" "172.16.1.20" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss" + - ping_check "dc1" "172.16.1.30" "3 packets transmitted, 0 received,( [\+]{0,1}[0-9]+ error[s]{0,1},)? 100% packet loss" + + # Run end-to-end test: cleanup scenario + - > + docker run -t --rm --name ${TEST_NAME} --network=host + --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh" + --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" + $CI_REGISTRY_IMAGE/${TEST_NAME}:latest /var/teraflow/run-cleanup.sh + + after_script: + # Dump configuration of the routers (on after_script) + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r1 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r2 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + - containerlab exec --name ${TEST_NAME} --label clab-node-name=r3 --cmd "Cli --command \"enable"$'\n'$"show running-config\"" + + # Dump TeraFlowSDN component logs + - source src/tests/${TEST_NAME}/deploy_specs.sh + - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server + - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/deviceservice -c server + - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/pathcompservice -c frontend + - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/serviceservice -c server + - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/nbiservice -c server + + # Clean up + - RUNNER_PATH=`pwd` + #- cd $PWD/src/tests/${TEST_NAME} + - cd /tmp/clab/${TEST_NAME} + - containerlab destroy --topo ${TEST_NAME}.clab.yml --cleanup || true + - sudo rm -rf clab-${TEST_NAME}/ .${TEST_NAME}.clab.yml.bak || true + - cd $RUNNER_PATH + - kubectl delete namespaces tfs || true + - docker ps --all --quiet | xargs --no-run-if-empty docker stop + - docker container prune --force + - docker ps --all --quiet | xargs --no-run-if-empty docker rm --force + - docker network prune --force + - docker volume prune --all --force + - docker image prune --force + + #coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/' + rules: + - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' + - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' + artifacts: + when: always + reports: + junit: ./src/tests/${TEST_NAME}/report_*.xml diff --git a/src/tests/l2_vpn_gnmi_oc/Dockerfile b/src/tests/l2_vpn_gnmi_oc/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..e091adc53b9e33bc8ce3852e56b6fdc2e5cd8def --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/Dockerfile @@ -0,0 +1,86 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM python:3.9-slim + +# Install dependencies +RUN apt-get --yes --quiet --quiet update && \ + apt-get --yes --quiet --quiet install wget g++ git && \ + rm -rf /var/lib/apt/lists/* + +# Set Python to show logs as they occur +ENV PYTHONUNBUFFERED=0 + +# Get generic Python packages +RUN python3 -m pip install --upgrade 'pip==25.2' +RUN python3 -m pip install --upgrade 'setuptools==79.0.0' 'wheel==0.45.1' +RUN python3 -m pip install --upgrade 'pip-tools==7.3.0' + +# Get common Python packages +# Note: this step enables sharing the previous Docker build steps among all the Python components +WORKDIR /var/teraflow +COPY common_requirements.in common_requirements.in +RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in +RUN python3 -m pip install -r common_requirements.txt + +# Add common files into working directory +WORKDIR /var/teraflow/common +COPY src/common/. ./ +RUN rm -rf proto + +# Create proto sub-folder, copy .proto files, and generate Python code +RUN mkdir -p /var/teraflow/common/proto +WORKDIR /var/teraflow/common/proto +RUN touch __init__.py +COPY proto/*.proto ./ +RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto +RUN rm *.proto +RUN find . -type f -exec sed -i -E 's/^(import\ .*)_pb2/from . \1_pb2/g' {} \; + +# Create component sub-folders, get specific Python packages +RUN mkdir -p /var/teraflow/tests/l2_vpn_gnmi_oc +WORKDIR /var/teraflow/tests/l2_vpn_gnmi_oc +COPY src/tests/l2_vpn_gnmi_oc/requirements.in requirements.in +RUN pip-compile --quiet --output-file=requirements.txt requirements.in +RUN python3 -m pip install -r requirements.txt + +# Add component files into working directory +WORKDIR /var/teraflow +COPY src/__init__.py ./__init__.py +COPY src/common/*.py ./common/ +COPY src/common/tests/. ./common/tests/ +COPY src/common/tools/. ./common/tools/ +COPY src/context/__init__.py context/__init__.py +COPY src/context/client/. context/client/ +COPY src/device/__init__.py device/__init__.py +COPY src/device/client/. device/client/ +COPY src/monitoring/__init__.py monitoring/__init__.py +COPY src/monitoring/client/. monitoring/client/ +COPY src/service/__init__.py service/__init__.py +COPY src/service/client/. service/client/ +COPY src/slice/__init__.py slice/__init__.py +COPY src/slice/client/. slice/client/ +COPY src/vnt_manager/__init__.py vnt_manager/__init__.py +COPY src/vnt_manager/client/. vnt_manager/client/ +COPY src/tests/*.py ./tests/ +COPY src/tests/l2_vpn_gnmi_oc/__init__.py ./tests/l2_vpn_gnmi_oc/__init__.py +COPY src/tests/l2_vpn_gnmi_oc/data/. ./tests/l2_vpn_gnmi_oc/data/ +COPY src/tests/l2_vpn_gnmi_oc/tests/. ./tests/l2_vpn_gnmi_oc/tests/ +COPY src/tests/l2_vpn_gnmi_oc/scripts/. ./ + +RUN apt-get --yes --quiet --quiet update && \ + apt-get --yes --quiet --quiet install tree && \ + rm -rf /var/lib/apt/lists/* + +RUN tree -la /var/teraflow diff --git a/src/tests/l2_vpn_gnmi_oc/README.md b/src/tests/l2_vpn_gnmi_oc/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0bd02d5f9974553d77b0dc3b3496f001c73c3ebf --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/README.md @@ -0,0 +1,128 @@ +# L2 VPN test with gNMI/OpenConfig + +## Emulated DataPlane Deployment +- ContainerLab +- Scenario +- Descriptor + +## TeraFlowSDN Deployment +```bash +cd ~/tfs-ctrl +source ~/tfs-ctrl/src/tests/l2_vpn_gnmi_oc/deploy_specs.sh +./deploy/all.sh +``` + +# ContainerLab - Arista cEOS - Commands + +## Download and install ContainerLab +```bash +sudo bash -c "$(curl -sL https://get.containerlab.dev)" -- -v 0.59.0 +``` + +## Download Arista cEOS image and create Docker image +```bash +cd ~/tfs-ctrl/src/tests/l2_vpn_gnmi_oc/ +docker import arista/cEOS64-lab-4.33.5M.tar ceos:4.33.5M +``` + +## Deploy scenario +```bash +cd ~/tfs-ctrl/src/tests/l2_vpn_gnmi_oc/ +sudo containerlab deploy --topo l2_vpn_gnmi_oc.clab.yml +``` + +## Inspect scenario +```bash +cd ~/tfs-ctrl/src/tests/l2_vpn_gnmi_oc/ +sudo containerlab inspect --topo l2_vpn_gnmi_oc.clab.yml +``` + +## Destroy scenario +```bash +cd ~/tfs-ctrl/src/tests/l2_vpn_gnmi_oc/ +sudo containerlab destroy --topo l2_vpn_gnmi_oc.clab.yml +sudo rm -rf clab-l2_vpn_gnmi_oc/ .l2_vpn_gnmi_oc.clab.yml.bak +``` + +## Access cEOS Bash/CLI +```bash +docker exec -it clab-l2_vpn_gnmi_oc-r1 bash +docker exec -it clab-l2_vpn_gnmi_oc-r2 bash +docker exec -it clab-l2_vpn_gnmi_oc-r3 bash +docker exec -it clab-l2_vpn_gnmi_oc-r1 Cli +docker exec -it clab-l2_vpn_gnmi_oc-r2 Cli +docker exec -it clab-l2_vpn_gnmi_oc-r3 Cli +``` + +## Configure ContainerLab clients +```bash +docker exec -it clab-l2_vpn_gnmi_oc-dc1 bash + ip link set address 00:c1:ab:00:01:0a dev eth1 + ip link set eth1 up + ip link add link eth1 name eth1.125 type vlan id 125 + ip address add 172.16.1.10/24 dev eth1.125 + ip link set eth1.125 up + ping 172.16.1.20 + +docker exec -it clab-l2_vpn_gnmi_oc-dc2 bash + ip link set address 00:c1:ab:00:01:14 dev eth1 + ip link set eth1 up + ip link add link eth1 name eth1.125 type vlan id 125 + ip address add 172.16.1.20/24 dev eth1.125 + ip link set eth1.125 up + ping 172.16.1.10 +``` + +## Install gNMIc +```bash +sudo bash -c "$(curl -sL https://get-gnmic.kmrd.dev)" +``` + +## gNMI Capabilities request +```bash +gnmic --address clab-l2_vpn_gnmi_oc-r1 --port 6030 --username admin --password admin --insecure capabilities +``` + +## gNMI Get request +```bash +gnmic --address clab-l2_vpn_gnmi_oc-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf get --path / > r1.json +gnmic --address clab-l2_vpn_gnmi_oc-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf get --path /interfaces/interface > r1-ifaces.json +``` + +## gNMI Set request +```bash +gnmic --address clab-l2_vpn_gnmi_oc-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf set --update-path /system/config/hostname --update-value srl11 +gnmic --address clab-l2_vpn_gnmi_oc-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf get --path /system/config/hostname + +gnmic --address clab-l2_vpn_gnmi_oc-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf set \ +--update-path '/network-instances/network-instance[name=default]/vlans/vlan[vlan-id=200]/config/vlan-id' --update-value 200 \ +--update-path '/interfaces/interface[name=Ethernet10]/config/name' --update-value '"Ethernet10"' \ +--update-path '/interfaces/interface[name=Ethernet10]/ethernet/switched-vlan/config/interface-mode' --update-value '"ACCESS"' \ +--update-path '/interfaces/interface[name=Ethernet10]/ethernet/switched-vlan/config/access-vlan' --update-value 200 \ +--update-path '/interfaces/interface[name=Ethernet2]/config/name' --update-value '"Ethernet2"' \ +--update-path '/interfaces/interface[name=Ethernet2]/ethernet/switched-vlan/config/interface-mode' --update-value '"TRUNK"' +--update-path '/interfaces/interface[name=Ethernet2]/ethernet/switched-vlan/config/trunk-vlans' --update-value 200 + +``` + +## Subscribe request +```bash +gnmic --address clab-l2_vpn_gnmi_oc-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf subscribe --path /interfaces/interface[name=Management0]/state/ + +# In another terminal, you can generate traffic opening SSH connection +ssh admin@clab-l2_vpn_gnmi_oc-r1 +``` + +# Check configurations done: +```bash +gnmic --address clab-l2_vpn_gnmi_oc-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf get --path '/' > r1-all.json +gnmic --address clab-l2_vpn_gnmi_oc-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf get --path '/network-instances' > r1-nis.json +gnmic --address clab-l2_vpn_gnmi_oc-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf get --path '/interfaces' > r1-ifs.json +``` + +# Delete elements: +```bash +--address clab-l2_vpn_gnmi_oc-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf set --delete '/network-instances/network-instance[name=b19229e8]' +--address clab-l2_vpn_gnmi_oc-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf set --delete '/interfaces/interface[name=ethernet-1/1]/subinterfaces/subinterface[index=0]' +--address clab-l2_vpn_gnmi_oc-r1 --port 6030 --username admin --password admin --insecure --encoding json_ietf set --delete '/interfaces/interface[name=ethernet-1/2]/subinterfaces/subinterface[index=0]' +``` diff --git a/src/tests/l2_vpn_gnmi_oc/__init__.py b/src/tests/l2_vpn_gnmi_oc/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3ccc21c7db78aac26daa1f8c5ff8e1ffd3f35460 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/tests/l2_vpn_gnmi_oc/clab/cfg-static-pseudowires/r1-l2-vpn.cfg b/src/tests/l2_vpn_gnmi_oc/clab/cfg-static-pseudowires/r1-l2-vpn.cfg new file mode 100644 index 0000000000000000000000000000000000000000..93b91da828a464cb1aaa477b0783465422f275cb --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/clab/cfg-static-pseudowires/r1-l2-vpn.cfg @@ -0,0 +1,74 @@ +! Command: show running-config +! device: r1 (cEOSLab, EOS-4.33.5M-43712898.4335M (engineering build)) +! +no aaa root +! +username admin privilege 15 role network-admin secret sha512 $6$OmfaAwJRg/r44r5U$9Fca1O1G6Bgsd4NKwSyvdRJcHHk71jHAR3apDWAgSTN/t/j1iroEhz5J36HjWjOF/jEVC/R8Wa60VmbX6.cr70 +! +management api http-commands + no shutdown +! +no service interface inactive port-id allocation disabled +! +transceiver qsfp default-mode 4x10G +! +service routing protocols model multi-agent +! +hostname r1 +! +spanning-tree mode mstp +! +system l1 + unsupported speed action error + unsupported error-correction action error +! +management api gnmi + transport grpc default +! +management api netconf + transport ssh default +! +interface Ethernet2 + no switchport + ip address 10.0.12.1/30 + mpls ldp interface +! +interface Ethernet10 + no switchport +! +interface Loopback0 + ip address 1.1.1.1/32 +! +interface Management0 + ip address 172.20.20.101/24 +! +ip routing +! +ip route 0.0.0.0/0 172.20.20.1 +ip route 2.2.2.2/32 10.0.12.2 +! +mpls ip +! +mpls ldp + router-id interface Loopback0 + no shutdown + ! + pseudowires + pseudowire pw-dc1-dc2 + neighbor 2.2.2.2 + pseudowire-id 100 + mtu 1500 +! +patch panel + patch dc1-pw + connector 1 interface Ethernet10 dot1q vlan 100 + connector 2 pseudowire ldp pw-dc1-dc2 +! +router multicast + ipv4 + software-forwarding kernel + ! + ipv6 + software-forwarding kernel +! +end diff --git a/src/tests/l2_vpn_gnmi_oc/clab/cfg-static-pseudowires/r2-l2-vpn.cfg b/src/tests/l2_vpn_gnmi_oc/clab/cfg-static-pseudowires/r2-l2-vpn.cfg new file mode 100644 index 0000000000000000000000000000000000000000..f90737f82f5c148a8d8bfffef01a01ab14697f99 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/clab/cfg-static-pseudowires/r2-l2-vpn.cfg @@ -0,0 +1,74 @@ +! Command: show running-config +! device: r2 (cEOSLab, EOS-4.33.5M-43712898.4335M (engineering build)) +! +no aaa root +! +username admin privilege 15 role network-admin secret sha512 $6$OmfaAwJRg/r44r5U$9Fca1O1G6Bgsd4NKwSyvdRJcHHk71jHAR3apDWAgSTN/t/j1iroEhz5J36HjWjOF/jEVC/R8Wa60VmbX6.cr70 +! +management api http-commands + no shutdown +! +no service interface inactive port-id allocation disabled +! +transceiver qsfp default-mode 4x10G +! +service routing protocols model multi-agent +! +hostname r2 +! +spanning-tree mode mstp +! +system l1 + unsupported speed action error + unsupported error-correction action error +! +management api gnmi + transport grpc default +! +management api netconf + transport ssh default +! +interface Ethernet1 + no switchport + ip address 10.0.12.2/30 + mpls ldp interface +! +interface Ethernet10 + no switchport +! +interface Loopback0 + ip address 2.2.2.2/32 +! +interface Management0 + ip address 172.20.20.102/24 +! +ip routing +! +ip route 0.0.0.0/0 172.20.20.1 +ip route 1.1.1.1/32 10.0.12.1 +! +mpls ip +! +mpls ldp + router-id interface Loopback0 + no shutdown + ! + pseudowires + pseudowire pw-dc1-dc2 + neighbor 1.1.1.1 + pseudowire-id 100 + mtu 1500 +! +patch panel + patch dc2-pw + connector 1 interface Ethernet10 dot1q vlan 100 + connector 2 pseudowire ldp pw-dc1-dc2 +! +router multicast + ipv4 + software-forwarding kernel + ! + ipv6 + software-forwarding kernel +! +end diff --git a/src/tests/l2_vpn_gnmi_oc/clab/l2_vpn_gnmi_oc.clab.yml b/src/tests/l2_vpn_gnmi_oc/clab/l2_vpn_gnmi_oc.clab.yml new file mode 100644 index 0000000000000000000000000000000000000000..611dd90b316a8e71be18d327f0fbce77a17ac2a4 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/clab/l2_vpn_gnmi_oc.clab.yml @@ -0,0 +1,80 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# TFS - Arista devices + Linux clients + +name: l2_vpn_gnmi_oc + +mgmt: + network: mgmt-net + ipv4-subnet: 172.20.20.0/24 + +topology: + kinds: + arista_ceos: + kind: arista_ceos + #image: ceos:4.30.4M + #image: ceos:4.31.2F + #image: ceos:4.31.5M # tested, works + #image: ceos:4.32.0F + #image: ceos:4.33.5M + #image: ceos:4.34.4M + image: ceos:4.32.2F + #image: ceos:4.32.2.1F + #image: ceos:4.33.1F # does not work, libyang.util.LibyangError: failed to parse data tree: No module named "openconfig-platform-healthz" in the context. + linux: + kind: linux + image: ghcr.io/hellt/network-multitool:latest + + nodes: + r1: + kind: arista_ceos + mgmt-ipv4: 172.20.20.101 + startup-config: r1-startup.cfg + + r2: + kind: arista_ceos + mgmt-ipv4: 172.20.20.102 + startup-config: r2-startup.cfg + + r3: + kind: arista_ceos + mgmt-ipv4: 172.20.20.103 + startup-config: r3-startup.cfg + + dc1: + kind: linux + mgmt-ipv4: 172.20.20.201 + exec: + - ip link set address 00:c1:ab:00:01:0a dev eth1 + - ip link set eth1 up + - ip link add link eth1 name eth1.125 type vlan id 125 + - ip address add 172.16.1.10/24 dev eth1.125 + - ip link set eth1.125 up + + dc2: + kind: linux + mgmt-ipv4: 172.20.20.202 + exec: + - ip link set address 00:c1:ab:00:01:14 dev eth1 + - ip link set eth1 up + - ip link add link eth1 name eth1.125 type vlan id 125 + - ip address add 172.16.1.20/24 dev eth1.125 + - ip link set eth1.125 up + + links: + - endpoints: ["r1:eth2", "r2:eth1"] + - endpoints: ["r2:eth3", "r3:eth2"] + - endpoints: ["r1:eth10", "dc1:eth1"] + - endpoints: ["r3:eth10", "dc2:eth1"] diff --git a/src/tests/l2_vpn_gnmi_oc/clab/r1-startup.cfg b/src/tests/l2_vpn_gnmi_oc/clab/r1-startup.cfg new file mode 100644 index 0000000000000000000000000000000000000000..712797deb76a3345e4a756c6bee9d6f3d13400db --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/clab/r1-startup.cfg @@ -0,0 +1,48 @@ +! device: r1 (cEOSLab, EOS-4.34.4M) +! +no aaa root +! +username admin privilege 15 role network-admin secret sha512 $6$OmfaAwJRg/r44r5U$9Fca1O1G6Bgsd4NKwSyvdRJcHHk71jHAR3apDWAgSTN/t/j1iroEhz5J36HjWjOF/jEVC/R8Wa60VmbX6.cr70 +! +management api http-commands + no shutdown +! +no service interface inactive port-id allocation disabled +! +transceiver qsfp default-mode 4x10G +! +service routing protocols model multi-agent +! +hostname r1 +! +spanning-tree mode mstp +! +system l1 + unsupported speed action error + unsupported error-correction action error +! +management api gnmi + transport grpc default +! +management api netconf + transport ssh default +! +interface Ethernet2 +! +interface Ethernet10 +! +interface Management0 + ip address 172.20.20.101/24 +! +ip routing +! +ip route 0.0.0.0/0 172.20.20.1 +! +router multicast + ipv4 + software-forwarding kernel + ! + ipv6 + software-forwarding kernel +! +end diff --git a/src/tests/l2_vpn_gnmi_oc/clab/r2-startup.cfg b/src/tests/l2_vpn_gnmi_oc/clab/r2-startup.cfg new file mode 100644 index 0000000000000000000000000000000000000000..6a1133703d0e195f58c0a8cd0b42a84068ef90b2 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/clab/r2-startup.cfg @@ -0,0 +1,48 @@ +! device: r2 (cEOSLab, EOS-4.34.4M) +! +no aaa root +! +username admin privilege 15 role network-admin secret sha512 $6$OmfaAwJRg/r44r5U$9Fca1O1G6Bgsd4NKwSyvdRJcHHk71jHAR3apDWAgSTN/t/j1iroEhz5J36HjWjOF/jEVC/R8Wa60VmbX6.cr70 +! +management api http-commands + no shutdown +! +no service interface inactive port-id allocation disabled +! +transceiver qsfp default-mode 4x10G +! +service routing protocols model multi-agent +! +hostname r2 +! +spanning-tree mode mstp +! +system l1 + unsupported speed action error + unsupported error-correction action error +! +management api gnmi + transport grpc default +! +management api netconf + transport ssh default +! +interface Ethernet1 +! +interface Ethernet3 +! +interface Management0 + ip address 172.20.20.102/24 +! +ip routing +! +ip route 0.0.0.0/0 172.20.20.1 +! +router multicast + ipv4 + software-forwarding kernel + ! + ipv6 + software-forwarding kernel +! +end diff --git a/src/tests/l2_vpn_gnmi_oc/clab/r3-startup.cfg b/src/tests/l2_vpn_gnmi_oc/clab/r3-startup.cfg new file mode 100644 index 0000000000000000000000000000000000000000..946de6f77a5a2803fccebef4f3777e7102608225 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/clab/r3-startup.cfg @@ -0,0 +1,48 @@ +! device: r3 (cEOSLab, EOS-4.34.4M) +! +no aaa root +! +username admin privilege 15 role network-admin secret sha512 $6$OmfaAwJRg/r44r5U$9Fca1O1G6Bgsd4NKwSyvdRJcHHk71jHAR3apDWAgSTN/t/j1iroEhz5J36HjWjOF/jEVC/R8Wa60VmbX6.cr70 +! +management api http-commands + no shutdown +! +no service interface inactive port-id allocation disabled +! +transceiver qsfp default-mode 4x10G +! +service routing protocols model multi-agent +! +hostname r3 +! +spanning-tree mode mstp +! +system l1 + unsupported speed action error + unsupported error-correction action error +! +management api gnmi + transport grpc default +! +management api netconf + transport ssh default +! +interface Ethernet2 +! +interface Ethernet10 +! +interface Management0 + ip address 172.20.20.103/24 +! +ip routing +! +ip route 0.0.0.0/0 172.20.20.1 +! +router multicast + ipv4 + software-forwarding kernel + ! + ipv6 + software-forwarding kernel +! +end diff --git a/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json b/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json new file mode 100644 index 0000000000000000000000000000000000000000..95bea42eacba9440fc5e68a035d3a9ff5a3e59a5 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/data/ietf-l2vpn-service.json @@ -0,0 +1,81 @@ +{ + "ietf-l2vpn-svc:l2vpn-svc": { + "vpn-services": {"vpn-service": [ + { + "vpn-id": "ietf-l2vpn-svc", + "vpn-svc-type": "vpws", + "svc-topo": "any-to-any", + "customer-name": "somebody", + "ce-vlan-preservation": true, + "ce-vlan-cos-preservation": true, + "frame-delivery": { + "multicast-gp-port-mapping": "ietf-l2vpn-svc:static-mapping" + } + } + ]}, + "sites": { + "site": [ + { + "site-id": "site_DC1", + "management": {"type": "ietf-l2vpn-svc:provider-managed"}, + "locations": {"location": [{"location-id": "DC1"}]}, + "devices": {"device": [{"device-id": "dc1", "location": "DC1"}]}, + "default-ce-vlan-id": 1, + "site-network-accesses": { + "site-network-access": [ + { + "network-access-id": "eth1", + "type": "ietf-l2vpn-svc:multipoint", + "device-reference": "dc1", + "vpn-attachment": { + "vpn-id": "ietf-l2vpn-svc", + "site-role": "ietf-l2vpn-svc:any-to-any-role" + }, + "bearer": {"bearer-reference": "r1:Ethernet10"}, + "service": {"svc-mtu": 1400}, + "connection": { + "encapsulation-type": "vlan", + "tagged-interface": { + "type": "ietf-l2vpn-svc:dot1q", + "dot1q-vlan-tagged": {"cvlan-id": 125} + }, + "oam": {"md-name": "fake-md-name", "md-level": 0} + } + } + ] + } + }, + { + "site-id": "site_DC2", + "management": {"type": "ietf-l2vpn-svc:provider-managed"}, + "locations": {"location": [{"location-id": "DC2"}]}, + "devices": {"device": [{"device-id": "dc2", "location": "DC2"}]}, + "default-ce-vlan-id": 1, + "site-network-accesses": { + "site-network-access": [ + { + "network-access-id": "eth1", + "type": "ietf-l2vpn-svc:multipoint", + "device-reference": "dc2", + "vpn-attachment": { + "vpn-id": "ietf-l2vpn-svc", + "site-role": "ietf-l2vpn-svc:any-to-any-role" + }, + "bearer": {"bearer-reference": "r3:Ethernet10"}, + "service": {"svc-mtu": 1400}, + "connection": { + "encapsulation-type": "vlan", + "tagged-interface": { + "type": "ietf-l2vpn-svc:dot1q", + "dot1q-vlan-tagged": {"cvlan-id": 125} + }, + "oam": {"md-name": "fake-md-name", "md-level": 0} + } + } + ] + } + } + ] + } + } +} diff --git a/src/tests/l2_vpn_gnmi_oc/data/tfs-service-vlan-125-tagged.json b/src/tests/l2_vpn_gnmi_oc/data/tfs-service-vlan-125-tagged.json new file mode 100644 index 0000000000000000000000000000000000000000..c5cc6b5e94930f947058c9b677148a3c78232186 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/data/tfs-service-vlan-125-tagged.json @@ -0,0 +1,21 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, "service_uuid": {"uuid": "tfs-l2vpn-svc-125"} + }, + "service_type": "SERVICETYPE_L2NM", + "service_status": {"service_status": "SERVICESTATUS_PLANNED"}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "dc1"}}, "endpoint_uuid": {"uuid": "int"}}, + {"device_id": {"device_uuid": {"uuid": "dc2"}}, "endpoint_uuid": {"uuid": "int"}} + ], + "service_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": { + "resource_key": "/settings", + "resource_value": {"vlan_id": 125, "access_vlan_tagged": true} + }} + ]} + } + ] +} diff --git a/src/tests/l2_vpn_gnmi_oc/data/tfs-service-vlan-125-untagged.json b/src/tests/l2_vpn_gnmi_oc/data/tfs-service-vlan-125-untagged.json new file mode 100644 index 0000000000000000000000000000000000000000..f7031955531fc08dbf99836a17abbf7346b88b4b --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/data/tfs-service-vlan-125-untagged.json @@ -0,0 +1,21 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, "service_uuid": {"uuid": "tfs-l2vpn-svc-125"} + }, + "service_type": "SERVICETYPE_L2NM", + "service_status": {"service_status": "SERVICESTATUS_PLANNED"}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "dc1"}}, "endpoint_uuid": {"uuid": "int"}}, + {"device_id": {"device_uuid": {"uuid": "dc2"}}, "endpoint_uuid": {"uuid": "int"}} + ], + "service_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": { + "resource_key": "/settings", + "resource_value": {"vlan_id": 125} + }} + ]} + } + ] +} diff --git a/src/tests/l2_vpn_gnmi_oc/data/tfs-topology.json b/src/tests/l2_vpn_gnmi_oc/data/tfs-topology.json new file mode 100644 index 0000000000000000000000000000000000000000..ac87af62d31e4728c12687c525233d8e840d7441 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/data/tfs-topology.json @@ -0,0 +1,126 @@ +{ + "contexts": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}} + ], + "topologies": [ + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}} + ], + "devices": [ + { + "device_id": {"device_uuid": {"uuid": "dc1"}}, "device_type": "emu-datacenter", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "eth1", "type": "copper"}, {"uuid": "int", "type": "copper"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "dc2"}}, "device_type": "emu-datacenter", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "eth1", "type": "copper"}, {"uuid": "int", "type": "copper"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "r1"}}, "device_type": "packet-router", + "device_drivers": ["DEVICEDRIVER_GNMI_OPENCONFIG"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.20.20.101"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "6030"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { + "username": "admin", "password": "admin", "use_tls": false + }}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "r2"}}, "device_type": "packet-router", + "device_drivers": ["DEVICEDRIVER_GNMI_OPENCONFIG"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.20.20.102"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "6030"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { + "username": "admin", "password": "admin", "use_tls": false + }}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "r3"}}, "device_type": "packet-router", + "device_drivers": ["DEVICEDRIVER_GNMI_OPENCONFIG"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.20.20.103"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "6030"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { + "username": "admin", "password": "admin", "use_tls": false + }}} + ]} + } + ], + "links": [ + { + "link_id": {"link_uuid": {"uuid": "r1/Ethernet2==r2/Ethernet1"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "r1"}}, "endpoint_uuid": {"uuid": "Ethernet2"}}, + {"device_id": {"device_uuid": {"uuid": "r2"}}, "endpoint_uuid": {"uuid": "Ethernet1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "r2/Ethernet1==r1/Ethernet2"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "r2"}}, "endpoint_uuid": {"uuid": "Ethernet1"}}, + {"device_id": {"device_uuid": {"uuid": "r1"}}, "endpoint_uuid": {"uuid": "Ethernet2"}} + ] + }, + + { + "link_id": {"link_uuid": {"uuid": "r2/Ethernet3==r3/Ethernet2"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "r2"}}, "endpoint_uuid": {"uuid": "Ethernet3"}}, + {"device_id": {"device_uuid": {"uuid": "r3"}}, "endpoint_uuid": {"uuid": "Ethernet2"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "r3/Ethernet2==r2/Ethernet3"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "r3"}}, "endpoint_uuid": {"uuid": "Ethernet2"}}, + {"device_id": {"device_uuid": {"uuid": "r2"}}, "endpoint_uuid": {"uuid": "Ethernet3"}} + ] + }, + + { + "link_id": {"link_uuid": {"uuid": "r1/Ethernet10==dc1/eth1"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "r1"}}, "endpoint_uuid": {"uuid": "Ethernet10"}}, + {"device_id": {"device_uuid": {"uuid": "dc1"}}, "endpoint_uuid": {"uuid": "eth1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "dc1/eth1==r1/Ethernet10"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "dc1"}}, "endpoint_uuid": {"uuid": "eth1"}}, + {"device_id": {"device_uuid": {"uuid": "r1"}}, "endpoint_uuid": {"uuid": "Ethernet10"}} + ] + }, + + { + "link_id": {"link_uuid": {"uuid": "r3/Ethernet10==dc2/eth1"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "r3"}}, "endpoint_uuid": {"uuid": "Ethernet10"}}, + {"device_id": {"device_uuid": {"uuid": "dc2"}}, "endpoint_uuid": {"uuid": "eth1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "dc2/eth1==r3/Ethernet10"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "dc2"}}, "endpoint_uuid": {"uuid": "eth1"}}, + {"device_id": {"device_uuid": {"uuid": "r3"}}, "endpoint_uuid": {"uuid": "Ethernet10"}} + ] + } + ] +} diff --git a/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-cli-dc1.sh b/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-cli-dc1.sh new file mode 100755 index 0000000000000000000000000000000000000000..94c4d7d5181bc90abda5692d41742853574a0a74 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-cli-dc1.sh @@ -0,0 +1,16 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +docker exec -it clab-l2_vpn_gnmi_oc-dc1 bash diff --git a/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-cli-dc2.sh b/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-cli-dc2.sh new file mode 100755 index 0000000000000000000000000000000000000000..9d6e84b1f376cfe02afdf99fa098fbeaa7a35b21 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-cli-dc2.sh @@ -0,0 +1,16 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +docker exec -it clab-l2_vpn_gnmi_oc-dc2 bash diff --git a/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-cli-r1.sh b/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-cli-r1.sh new file mode 100755 index 0000000000000000000000000000000000000000..26d39cfcd3a406e46648d83b84f1c0ac5a9020d8 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-cli-r1.sh @@ -0,0 +1,16 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +docker exec -it clab-l2_vpn_gnmi_oc-r1 Cli diff --git a/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-cli-r2.sh b/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-cli-r2.sh new file mode 100755 index 0000000000000000000000000000000000000000..e6ee51ec92bcd071f279c6721050f4d81d80c7e0 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-cli-r2.sh @@ -0,0 +1,16 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +docker exec -it clab-l2_vpn_gnmi_oc-r2 Cli diff --git a/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-cli-r3.sh b/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-cli-r3.sh new file mode 100755 index 0000000000000000000000000000000000000000..d817ccffda0f9863cb6ee429e85741c9af675abb --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-cli-r3.sh @@ -0,0 +1,16 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +docker exec -it clab-l2_vpn_gnmi_oc-r3 Cli diff --git a/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-deploy.sh b/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-deploy.sh new file mode 100755 index 0000000000000000000000000000000000000000..294f680b21d1b7f24d2cb820bd8e8a2b6b123065 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-deploy.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cd ~/tfs-ctrl/src/tests/l2_vpn_gnmi_oc +sudo containerlab deploy --topo clab/l2_vpn_gnmi_oc.clab.yml diff --git a/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-destroy.sh b/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-destroy.sh new file mode 100755 index 0000000000000000000000000000000000000000..68f7c30b1d74e1d942eeb39422c27e6477781ef1 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-destroy.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cd ~/tfs-ctrl/src/tests/l2_vpn_gnmi_oc +sudo containerlab destroy --topo clab/l2_vpn_gnmi_oc.clab.yml +sudo rm -rf clab/clab-l2_vpn_gnmi_oc/ clab/.l2_vpn_gnmi_oc.clab.yml.bak diff --git a/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-inspect.sh b/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-inspect.sh new file mode 100755 index 0000000000000000000000000000000000000000..2a8325f59d076dd76ce6305f980680133884b384 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/deploy-scripts/clab-inspect.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cd ~/tfs-ctrl/src/tests/l2_vpn_gnmi_oc +sudo containerlab inspect --topo l2_vpn_gnmi_oc.clab.yml diff --git a/src/tests/l2_vpn_gnmi_oc/deploy_specs.sh b/src/tests/l2_vpn_gnmi_oc/deploy_specs.sh new file mode 100755 index 0000000000000000000000000000000000000000..72cd25b58a02f442838bab866cc969680c073ebc --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/deploy_specs.sh @@ -0,0 +1,208 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# ----- TeraFlowSDN ------------------------------------------------------------ + +# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" + +# Set the list of components, separated by spaces, you want to build images for, and deploy. +#export TFS_COMPONENTS="context device pathcomp service slice nbi webui load_generator" +export TFS_COMPONENTS="context device pathcomp service nbi" + +# Uncomment to activate Monitoring (old) +#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring" + +# Uncomment to activate Monitoring Framework (new) +#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics automation" + +# Uncomment to activate QoS Profiles +#export TFS_COMPONENTS="${TFS_COMPONENTS} qos_profile" + +# Uncomment to activate BGP-LS Speaker +#export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker" + +# Uncomment to activate Optical Controller +# To manage optical connections, "service" requires "opticalcontroller" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "opticalcontroller" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} opticalcontroller service ${AFTER}" +#fi + +# Uncomment to activate ZTP +#export TFS_COMPONENTS="${TFS_COMPONENTS} ztp" + +# Uncomment to activate Policy Manager +#export TFS_COMPONENTS="${TFS_COMPONENTS} policy" + +# Uncomment to activate Optical CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager" + +# Uncomment to activate L3 CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector" + +# Uncomment to activate TE +#export TFS_COMPONENTS="${TFS_COMPONENTS} te" + +# Uncomment to activate Forecaster +#export TFS_COMPONENTS="${TFS_COMPONENTS} forecaster" + +# Uncomment to activate E2E Orchestrator +#export TFS_COMPONENTS="${TFS_COMPONENTS} e2e_orchestrator" + +# Uncomment to activate DLT and Interdomain +#export TFS_COMPONENTS="${TFS_COMPONENTS} interdomain dlt" +#if [[ "$TFS_COMPONENTS" == *"dlt"* ]]; then +# export KEY_DIRECTORY_PATH="src/dlt/gateway/keys/priv_sk" +# export CERT_DIRECTORY_PATH="src/dlt/gateway/keys/cert.pem" +# export TLS_CERT_PATH="src/dlt/gateway/keys/ca.crt" +#fi + +# Uncomment to activate QKD App +# To manage QKD Apps, "service" requires "qkd_app" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "qkd_app" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} qkd_app service ${AFTER}" +#fi + + +# Set the tag you want to use for your images. +export TFS_IMAGE_TAG="dev" + +# Set the name of the Kubernetes namespace to deploy TFS to. +export TFS_K8S_NAMESPACE="tfs" + +# Set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" + +# Uncomment to monitor performance of components +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml" + +# Uncomment when deploying Optical CyberSecurity +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml" + +# Set the new Grafana admin password +export TFS_GRAFANA_PASSWORD="admin123+" + +# Disable skip-build flag to rebuild the Docker images. +export TFS_SKIP_BUILD="" + + +# ----- CockroachDB ------------------------------------------------------------ + +# Set the namespace where CockroackDB will be deployed. +export CRDB_NAMESPACE="crdb" + +# Set the external port CockroackDB Postgre SQL interface will be exposed to. +export CRDB_EXT_PORT_SQL="26257" + +# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to. +export CRDB_EXT_PORT_HTTP="8081" + +# Set the database username to be used by Context. +export CRDB_USERNAME="tfs" + +# Set the database user's password to be used by Context. +export CRDB_PASSWORD="tfs123" + +# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/crdb.sh for additional details +export CRDB_DEPLOY_MODE="single" + +# Disable flag for dropping database, if it exists. +export CRDB_DROP_DATABASE_IF_EXISTS="YES" + +# Disable flag for re-deploying CockroachDB from scratch. +export CRDB_REDEPLOY="" + + +# ----- NATS ------------------------------------------------------------------- + +# Set the namespace where NATS will be deployed. +export NATS_NAMESPACE="nats" + +# Set the external port NATS Client interface will be exposed to. +export NATS_EXT_PORT_CLIENT="4222" + +# Set the external port NATS HTTP Mgmt GUI interface will be exposed to. +export NATS_EXT_PORT_HTTP="8222" + +# Set NATS installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/nats.sh for additional details +export NATS_DEPLOY_MODE="single" + +# Disable flag for re-deploying NATS from scratch. +export NATS_REDEPLOY="" + + +# ----- QuestDB ---------------------------------------------------------------- + +# Set the namespace where QuestDB will be deployed. +export QDB_NAMESPACE="qdb" + +# Set the external port QuestDB Postgre SQL interface will be exposed to. +export QDB_EXT_PORT_SQL="8812" + +# Set the external port QuestDB Influx Line Protocol interface will be exposed to. +export QDB_EXT_PORT_ILP="9009" + +# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to. +export QDB_EXT_PORT_HTTP="9000" + +# Set the database username to be used for QuestDB. +export QDB_USERNAME="admin" + +# Set the database user's password to be used for QuestDB. +export QDB_PASSWORD="quest" + +# Set the table name to be used by Monitoring for KPIs. +export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis" + +# Set the table name to be used by Slice for plotting groups. +export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups" + +# Disable flag for dropping tables if they exist. +export QDB_DROP_TABLES_IF_EXIST="YES" + +# Disable flag for re-deploying QuestDB from scratch. +export QDB_REDEPLOY="" + + +# ----- K8s Observability ------------------------------------------------------ + +# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to. +export PROM_EXT_PORT_HTTP="9090" + +# Set the external port Grafana HTTP Dashboards will be exposed to. +export GRAF_EXT_PORT_HTTP="3000" + + +# ----- Apache Kafka ----------------------------------------------------------- + +# Set the namespace where Apache Kafka will be deployed. +export KFK_NAMESPACE="kafka" + +# Set the port Apache Kafka server will be exposed to. +export KFK_SERVER_PORT="9092" + +# Set the flag to YES for redeploying of Apache Kafka +export KFK_REDEPLOY="" diff --git a/src/tests/l2_vpn_gnmi_oc/redeploy-tfs.sh b/src/tests/l2_vpn_gnmi_oc/redeploy-tfs.sh new file mode 100755 index 0000000000000000000000000000000000000000..d3e80c1084ae8b52351a0a488b53bca65bba7f9b --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/redeploy-tfs.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source ~/tfs-ctrl/src/tests/l2_vpn_gnmi_oc/deploy_specs.sh +./deploy/all.sh diff --git a/src/tests/l2_vpn_gnmi_oc/requirements.in b/src/tests/l2_vpn_gnmi_oc/requirements.in new file mode 100644 index 0000000000000000000000000000000000000000..5c92783a232a5bbe18b4dd6d0e6735e3ce8414c2 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/requirements.in @@ -0,0 +1,15 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +requests==2.27.* diff --git a/src/tests/l2_vpn_gnmi_oc/scripts/run-cleanup.sh b/src/tests/l2_vpn_gnmi_oc/scripts/run-cleanup.sh new file mode 100755 index 0000000000000000000000000000000000000000..0bdae7627017f5b9e32aa2560114e23e8aebb1cc --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/scripts/run-cleanup.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source /var/teraflow/tfs_runtime_env_vars.sh +export PYTHONPATH=/var/teraflow +pytest --verbose --log-level=INFO \ + --junitxml=/opt/results/report_cleanup.xml \ + /var/teraflow/tests/l2_vpn_gnmi_oc/tests/test_cleanup.py diff --git a/src/tests/l2_vpn_gnmi_oc/scripts/run-onboarding.sh b/src/tests/l2_vpn_gnmi_oc/scripts/run-onboarding.sh new file mode 100755 index 0000000000000000000000000000000000000000..36b2ab29282e0a761ad8f44a7922af78ba27608d --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/scripts/run-onboarding.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source /var/teraflow/tfs_runtime_env_vars.sh +export PYTHONPATH=/var/teraflow +pytest --verbose --log-level=INFO \ + --junitxml=/opt/results/report_onboarding.xml \ + /var/teraflow/tests/l2_vpn_gnmi_oc/tests/test_onboarding.py diff --git a/src/tests/l2_vpn_gnmi_oc/scripts/run-service-ietf-create.sh b/src/tests/l2_vpn_gnmi_oc/scripts/run-service-ietf-create.sh new file mode 100755 index 0000000000000000000000000000000000000000..4c60250bb3bccfdc3d042fdaf652fb139e97fbc9 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/scripts/run-service-ietf-create.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source /var/teraflow/tfs_runtime_env_vars.sh +export PYTHONPATH=/var/teraflow +pytest --verbose --log-level=INFO \ + --junitxml=/opt/results/report_service_ietf_create.xml \ + /var/teraflow/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_create.py diff --git a/src/tests/l2_vpn_gnmi_oc/scripts/run-service-ietf-remove.sh b/src/tests/l2_vpn_gnmi_oc/scripts/run-service-ietf-remove.sh new file mode 100755 index 0000000000000000000000000000000000000000..02fabe7d7e9c47ffa7561b529c145903476bf267 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/scripts/run-service-ietf-remove.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source /var/teraflow/tfs_runtime_env_vars.sh +export PYTHONPATH=/var/teraflow +pytest --verbose --log-level=INFO \ + --junitxml=/opt/results/report_service_ietf_remove.xml \ + /var/teraflow/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_remove.py diff --git a/src/tests/l2_vpn_gnmi_oc/scripts/run-service-tfs-create.sh b/src/tests/l2_vpn_gnmi_oc/scripts/run-service-tfs-create.sh new file mode 100755 index 0000000000000000000000000000000000000000..91dc1f2aa67997c3c4be74a2f8b7a5fc729c0284 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/scripts/run-service-tfs-create.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source /var/teraflow/tfs_runtime_env_vars.sh +export PYTHONPATH=/var/teraflow +pytest --verbose --log-level=INFO \ + --junitxml=/opt/results/report_service_tfs_create.xml \ + /var/teraflow/tests/l2_vpn_gnmi_oc/tests/test_service_tfs_create.py diff --git a/src/tests/l2_vpn_gnmi_oc/scripts/run-service-tfs-remove.sh b/src/tests/l2_vpn_gnmi_oc/scripts/run-service-tfs-remove.sh new file mode 100755 index 0000000000000000000000000000000000000000..d170a6ef430704b2e7345607095d0895897c424f --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/scripts/run-service-tfs-remove.sh @@ -0,0 +1,20 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source /var/teraflow/tfs_runtime_env_vars.sh +export PYTHONPATH=/var/teraflow +pytest --verbose --log-level=INFO \ + --junitxml=/opt/results/report_service_tfs_remove.xml \ + /var/teraflow/tests/l2_vpn_gnmi_oc/tests/test_service_tfs_remove.py diff --git a/src/tests/l2_vpn_gnmi_oc/tests/Fixtures.py b/src/tests/l2_vpn_gnmi_oc/tests/Fixtures.py new file mode 100644 index 0000000000000000000000000000000000000000..aa37459a10e188723c9173b9258daf2372204e4d --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/tests/Fixtures.py @@ -0,0 +1,36 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from service.client.ServiceClient import ServiceClient + +@pytest.fixture(scope='session') +def context_client() -> ContextClient: + _client = ContextClient() + yield _client + _client.close() + +@pytest.fixture(scope='session') +def device_client() -> DeviceClient: + _client = DeviceClient() + yield _client + _client.close() + +@pytest.fixture(scope='session') +def service_client() -> ServiceClient: + _client = ServiceClient() + yield _client + _client.close() diff --git a/src/tests/l2_vpn_gnmi_oc/tests/Tools.py b/src/tests/l2_vpn_gnmi_oc/tests/Tools.py new file mode 100644 index 0000000000000000000000000000000000000000..bbee845cd57f8dcb57e19f1f8ecc71940e99df30 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/tests/Tools.py @@ -0,0 +1,109 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum, logging, requests +from typing import Any, Dict, List, Optional, Set, Union +from common.Constants import ServiceNameEnum +from common.Settings import get_service_host, get_service_port_http + +NBI_ADDRESS = get_service_host(ServiceNameEnum.NBI) +NBI_PORT = get_service_port_http(ServiceNameEnum.NBI) +NBI_USERNAME = 'admin' +NBI_PASSWORD = 'admin' +NBI_BASE_URL = '' + +class RestRequestMethod(enum.Enum): + GET = 'get' + POST = 'post' + PUT = 'put' + PATCH = 'patch' + DELETE = 'delete' + +EXPECTED_STATUS_CODES : Set[int] = { + requests.codes['OK' ], + requests.codes['CREATED' ], + requests.codes['ACCEPTED' ], + requests.codes['NO_CONTENT'], +} + +def do_rest_request( + method : RestRequestMethod, url : str, body : Optional[Any] = None, timeout : int = 10, + allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, + logger : Optional[logging.Logger] = None +) -> Optional[Union[Dict, List]]: + request_url = 'http://{:s}:{:s}@{:s}:{:d}{:s}{:s}'.format( + NBI_USERNAME, NBI_PASSWORD, NBI_ADDRESS, NBI_PORT, str(NBI_BASE_URL), url + ) + + if logger is not None: + msg = 'Request: {:s} {:s}'.format(str(method.value).upper(), str(request_url)) + if body is not None: msg += ' body={:s}'.format(str(body)) + logger.warning(msg) + reply = requests.request(method.value, request_url, timeout=timeout, json=body, allow_redirects=allow_redirects) + if logger is not None: + logger.warning('Reply: {:s}'.format(str(reply.text))) + assert reply.status_code in expected_status_codes, 'Reply failed with status code {:d}'.format(reply.status_code) + + if reply.content and len(reply.content) > 0: return reply.json() + return None + +def do_rest_get_request( + url : str, body : Optional[Any] = None, timeout : int = 10, + allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, + logger : Optional[logging.Logger] = None +) -> Optional[Union[Dict, List]]: + return do_rest_request( + RestRequestMethod.GET, url, body=body, timeout=timeout, allow_redirects=allow_redirects, + expected_status_codes=expected_status_codes, logger=logger + ) + +def do_rest_post_request( + url : str, body : Optional[Any] = None, timeout : int = 10, + allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, + logger : Optional[logging.Logger] = None +) -> Optional[Union[Dict, List]]: + return do_rest_request( + RestRequestMethod.POST, url, body=body, timeout=timeout, allow_redirects=allow_redirects, + expected_status_codes=expected_status_codes, logger=logger + ) + +def do_rest_put_request( + url : str, body : Optional[Any] = None, timeout : int = 10, + allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, + logger : Optional[logging.Logger] = None +) -> Optional[Union[Dict, List]]: + return do_rest_request( + RestRequestMethod.PUT, url, body=body, timeout=timeout, allow_redirects=allow_redirects, + expected_status_codes=expected_status_codes, logger=logger + ) + +def do_rest_patch_request( + url : str, body : Optional[Any] = None, timeout : int = 10, + allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, + logger : Optional[logging.Logger] = None +) -> Optional[Union[Dict, List]]: + return do_rest_request( + RestRequestMethod.PATCH, url, body=body, timeout=timeout, allow_redirects=allow_redirects, + expected_status_codes=expected_status_codes, logger=logger + ) + +def do_rest_delete_request( + url : str, body : Optional[Any] = None, timeout : int = 10, + allow_redirects : bool = True, expected_status_codes : Set[int] = EXPECTED_STATUS_CODES, + logger : Optional[logging.Logger] = None +) -> Optional[Union[Dict, List]]: + return do_rest_request( + RestRequestMethod.DELETE, url, body=body, timeout=timeout, allow_redirects=allow_redirects, + expected_status_codes=expected_status_codes, logger=logger + ) diff --git a/src/tests/l2_vpn_gnmi_oc/tests/__init__.py b/src/tests/l2_vpn_gnmi_oc/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3ccc21c7db78aac26daa1f8c5ff8e1ffd3f35460 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/tests/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/tests/l2_vpn_gnmi_oc/tests/test_cleanup.py b/src/tests/l2_vpn_gnmi_oc/tests/test_cleanup.py new file mode 100644 index 0000000000000000000000000000000000000000..20afb5fe02d63f64de45fe87830e8996302c4395 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/tests/test_cleanup.py @@ -0,0 +1,44 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, os +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import ContextId +from common.tools.descriptor.Loader import DescriptorLoader, validate_empty_scenario +from common.tools.object_factory.Context import json_context_id +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from .Fixtures import context_client, device_client # pylint: disable=unused-import + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'tfs-topology.json') +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) + +def test_scenario_cleanup( + context_client : ContextClient, # pylint: disable=redefined-outer-name + device_client : DeviceClient, # pylint: disable=redefined-outer-name +) -> None: + # Verify the scenario has no services/slices + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 0 + assert len(response.slice_ids) == 0 + + # Load descriptors and validate the base scenario + descriptor_loader = DescriptorLoader( + descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client) + descriptor_loader.validate() + descriptor_loader.unload() + validate_empty_scenario(context_client) diff --git a/src/tests/l2_vpn_gnmi_oc/tests/test_onboarding.py b/src/tests/l2_vpn_gnmi_oc/tests/test_onboarding.py new file mode 100644 index 0000000000000000000000000000000000000000..763d7da171c99b781a6d25fc01e3c10c340bfb43 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/tests/test_onboarding.py @@ -0,0 +1,67 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, os, time +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import ContextId, DeviceOperationalStatusEnum, Empty +from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results, validate_empty_scenario +from common.tools.object_factory.Context import json_context_id +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from .Fixtures import context_client, device_client # pylint: disable=unused-import + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'tfs-topology.json') +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) + +def test_scenario_onboarding( + context_client : ContextClient, # pylint: disable=redefined-outer-name + device_client : DeviceClient, # pylint: disable=redefined-outer-name +) -> None: + validate_empty_scenario(context_client) + + descriptor_loader = DescriptorLoader( + descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client) + results = descriptor_loader.process() + check_descriptor_load_results(results, descriptor_loader) + descriptor_loader.validate() + + # Verify the scenario has no services/slices + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 0 + assert len(response.slice_ids) == 0 + +def test_scenario_devices_enabled( + context_client : ContextClient, # pylint: disable=redefined-outer-name +) -> None: + """ + This test validates that the devices are enabled. + """ + DEVICE_OP_STATUS_ENABLED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED + + num_devices = -1 + num_devices_enabled, num_retry = 0, 0 + while (num_devices != num_devices_enabled) and (num_retry < 10): + time.sleep(1.0) + response = context_client.ListDevices(Empty()) + num_devices = len(response.devices) + num_devices_enabled = 0 + for device in response.devices: + if device.device_operational_status != DEVICE_OP_STATUS_ENABLED: continue + num_devices_enabled += 1 + LOGGER.info('Num Devices enabled: {:d}/{:d}'.format(num_devices_enabled, num_devices)) + num_retry += 1 + assert num_devices_enabled == num_devices diff --git a/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_create.py b/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_create.py new file mode 100644 index 0000000000000000000000000000000000000000..480aee615487ec7aa96ae0f3321ffd1f56b8a73d --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_create.py @@ -0,0 +1,70 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging, os +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import ContextId, ServiceStatusEnum, ServiceTypeEnum +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from context.client.ContextClient import ContextClient +from .Fixtures import context_client # pylint: disable=unused-import +from .Tools import do_rest_get_request, do_rest_post_request + + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +REQUEST_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'ietf-l2vpn-service.json') +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) + + +# pylint: disable=redefined-outer-name, unused-argument +def test_service_ietf_creation( + context_client : ContextClient, +): + # Issue service creation request + with open(REQUEST_FILE, 'r', encoding='UTF-8') as f: + svc1_data = json.load(f) + URL = '/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services' + do_rest_post_request(URL, body=svc1_data, logger=LOGGER, expected_status_codes={201}) + vpn_id = svc1_data['ietf-l2vpn-svc:l2vpn-svc']['vpn-services']['vpn-service'][0]['vpn-id'] + + URL = '/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services/vpn-service={:s}/'.format(vpn_id) + service_data = do_rest_get_request(URL, logger=LOGGER, expected_status_codes={200}) + service_uuid = service_data['service-id'] + + # Verify service was created + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 1 + assert len(response.slice_ids) == 0 + + # Check there is 1 service + response = context_client.ListServices(ADMIN_CONTEXT_ID) + LOGGER.warning('Services[{:d}] = {:s}'.format( + len(response.services), grpc_message_to_json_string(response) + )) + assert len(response.services) == 1 + + for service in response.services: + service_id = service.service_id + assert service_id.service_uuid.uuid == service_uuid + assert service.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE + assert service.service_type == ServiceTypeEnum.SERVICETYPE_L2NM + + response = context_client.ListConnections(service_id) + LOGGER.warning(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( + grpc_message_to_json_string(service_id), len(response.connections), + grpc_message_to_json_string(response) + )) + assert len(response.connections) == 1 diff --git a/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_remove.py b/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_remove.py new file mode 100644 index 0000000000000000000000000000000000000000..58aa2321dd7ae6e065320c6bfe57731eba870fc7 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/tests/test_service_ietf_remove.py @@ -0,0 +1,81 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, os +from typing import Set +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import ContextId, ServiceStatusEnum, ServiceTypeEnum +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from context.client.ContextClient import ContextClient +from .Fixtures import context_client # pylint: disable=unused-import +from .Tools import do_rest_delete_request + + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +REQUEST_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'ietf-l2vpn-service.json') +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) + + +# pylint: disable=redefined-outer-name, unused-argument +def test_service_ietf_removal( + context_client : ContextClient, # pylint: disable=redefined-outer-name +): + # Verify the scenario has 1 service and 0 slices + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 1 + assert len(response.slice_ids) == 0 + + # Check there are no slices + response = context_client.ListSlices(ADMIN_CONTEXT_ID) + LOGGER.warning('Slices[{:d}] = {:s}'.format( + len(response.slices), grpc_message_to_json_string(response) + )) + assert len(response.slices) == 0 + + # Check there is 1 service + response = context_client.ListServices(ADMIN_CONTEXT_ID) + LOGGER.warning('Services[{:d}] = {:s}'.format( + len(response.services), grpc_message_to_json_string(response) + )) + assert len(response.services) == 1 + + service_uuids : Set[str] = set() + for service in response.services: + service_id = service.service_id + assert service.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE + assert service.service_type == ServiceTypeEnum.SERVICETYPE_L2NM + + response = context_client.ListConnections(service_id) + LOGGER.warning(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( + grpc_message_to_json_string(service_id), len(response.connections), + grpc_message_to_json_string(response) + )) + assert len(response.connections) == 1 + + service_uuids.add(service_id.service_uuid.uuid) + + # Identify service to delete + assert len(service_uuids) == 1 + service_uuid = service_uuids.pop() + + URL = '/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services/vpn-service={:s}/'.format(service_uuid) + do_rest_delete_request(URL, logger=LOGGER, expected_status_codes={204}) + + # Verify the scenario has no services/slices + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 0 + assert len(response.slice_ids) == 0 diff --git a/src/tests/l2_vpn_gnmi_oc/tests/test_service_tfs_create.py b/src/tests/l2_vpn_gnmi_oc/tests/test_service_tfs_create.py new file mode 100644 index 0000000000000000000000000000000000000000..3e7abd640972c0852d57c82cc654e45c485c3d57 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/tests/test_service_tfs_create.py @@ -0,0 +1,79 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, os +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import ContextId, ServiceStatusEnum, ServiceTypeEnum +from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from service.client.ServiceClient import ServiceClient +from .Fixtures import context_client, device_client, service_client # pylint: disable=unused-import + + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +DESCRIPTOR_FILE = os.path.join( + os.path.dirname(os.path.abspath(__file__)), + '..', 'data', 'tfs-service-vlan-125-tagged.json' +) +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) + + +def test_service_tfs_creation( + context_client : ContextClient, # pylint: disable=redefined-outer-name + device_client : DeviceClient, # pylint: disable=redefined-outer-name + service_client : ServiceClient, # pylint: disable=redefined-outer-name +): + # Load descriptors and validate the base scenario + descriptor_loader = DescriptorLoader( + descriptors_file=DESCRIPTOR_FILE, context_client=context_client, + device_client=device_client, service_client=service_client + ) + results = descriptor_loader.process() + check_descriptor_load_results(results, descriptor_loader) + + # Verify the scenario has 1 service and 0 slices + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 1 + assert len(response.slice_ids) == 0 + + # Check there are no slices + response = context_client.ListSlices(ADMIN_CONTEXT_ID) + LOGGER.warning('Slices[{:d}] = {:s}'.format( + len(response.slices), grpc_message_to_json_string(response) + )) + assert len(response.slices) == 0 + + # Check there is 1 service + response = context_client.ListServices(ADMIN_CONTEXT_ID) + LOGGER.warning('Services[{:d}] = {:s}'.format( + len(response.services), grpc_message_to_json_string(response) + )) + assert len(response.services) == 1 + + for service in response.services: + service_id = service.service_id + assert service.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE + assert service.service_type == ServiceTypeEnum.SERVICETYPE_L2NM + + response = context_client.ListConnections(service_id) + LOGGER.warning(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( + grpc_message_to_json_string(service_id), len(response.connections), + grpc_message_to_json_string(response) + )) + assert len(response.connections) == 1 diff --git a/src/tests/l2_vpn_gnmi_oc/tests/test_service_tfs_remove.py b/src/tests/l2_vpn_gnmi_oc/tests/test_service_tfs_remove.py new file mode 100644 index 0000000000000000000000000000000000000000..137848f53adcfa8c4d06b203167bb1b47b55cf88 --- /dev/null +++ b/src/tests/l2_vpn_gnmi_oc/tests/test_service_tfs_remove.py @@ -0,0 +1,83 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, os +from typing import Set, Tuple +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import ContextId, ServiceId, ServiceStatusEnum, ServiceTypeEnum +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Service import json_service_id +from context.client.ContextClient import ContextClient +from service.client.ServiceClient import ServiceClient +from .Fixtures import context_client, service_client # pylint: disable=unused-import + + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +DESCRIPTOR_FILE = os.path.join( + os.path.dirname(os.path.abspath(__file__)), + '..', 'data', 'tfs-service-vlan-125-tagged.json' +) +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) + + +def test_service_tfs_removal( + context_client : ContextClient, # pylint: disable=redefined-outer-name + service_client : ServiceClient, # pylint: disable=redefined-outer-name +): + # Verify the scenario has 1 service and 0 slices + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 1 + assert len(response.slice_ids) == 0 + + # Check there are no slices + response = context_client.ListSlices(ADMIN_CONTEXT_ID) + LOGGER.warning('Slices[{:d}] = {:s}'.format(len(response.slices), grpc_message_to_json_string(response))) + assert len(response.slices) == 0 + + # Check there is 1 service + response = context_client.ListServices(ADMIN_CONTEXT_ID) + LOGGER.warning('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) + assert len(response.services) == 1 + + context_service_uuids : Set[Tuple[str, str]] = set() + for service in response.services: + service_id = service.service_id + assert service.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE + assert service.service_type == ServiceTypeEnum.SERVICETYPE_L2NM + + response = context_client.ListConnections(service_id) + LOGGER.warning(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( + grpc_message_to_json_string(service_id), len(response.connections), + grpc_message_to_json_string(response) + )) + assert len(response.connections) == 1 + + context_uuid = service_id.context_id.context_uuid.uuid + service_uuid = service_id.service_uuid.uuid + context_service_uuids.add((context_uuid, service_uuid)) + + # Identify service to delete + assert len(context_service_uuids) == 1 + context_uuid, service_uuid = set(context_service_uuids).pop() + + # Delete Service + service_client.DeleteService(ServiceId(**json_service_id(service_uuid, json_context_id(context_uuid)))) + + # Verify the scenario has no services/slices + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 0 + assert len(response.slice_ids) == 0 diff --git a/src/tests/ofc22/tests/test_functional_create_service.py b/src/tests/ofc22/tests/test_functional_create_service.py index 1a4dcd325e875d2f75ef73b0b3569acb01955030..09a1afcde03890d28cc85ff6fb600fa065e79ac6 100644 --- a/src/tests/ofc22/tests/test_functional_create_service.py +++ b/src/tests/ofc22/tests/test_functional_create_service.py @@ -49,7 +49,7 @@ def test_service_creation(context_client : ContextClient, osm_wim : MockOSM): # # Ensure slices and services are created response = context_client.ListSlices(ADMIN_CONTEXT_ID) LOGGER.info('Slices[{:d}] = {:s}'.format(len(response.slices), grpc_message_to_json_string(response))) - assert len(response.slices) == 1 # OSM slice + assert len(response.slices) == 0 # no slice should be created response = context_client.ListServices(ADMIN_CONTEXT_ID) LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) diff --git a/src/tests/ofc22/tests/test_functional_delete_service.py b/src/tests/ofc22/tests/test_functional_delete_service.py index 88677b0fe043f7dfa18204976ca0f1a2bca18a07..cdc3894b36d42275d6be7f973fdc9afcae84ee98 100644 --- a/src/tests/ofc22/tests/test_functional_delete_service.py +++ b/src/tests/ofc22/tests/test_functional_delete_service.py @@ -33,7 +33,7 @@ def test_service_removal(context_client : ContextClient, osm_wim : MockOSM): # p # Ensure slices and services are created response = context_client.ListSlices(ADMIN_CONTEXT_ID) LOGGER.info('Slices[{:d}] = {:s}'.format(len(response.slices), grpc_message_to_json_string(response))) - assert len(response.slices) == 1 # OSM slice + assert len(response.slices) == 0 # no slice should be created response = context_client.ListServices(ADMIN_CONTEXT_ID) LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) diff --git a/src/tests/tools/mock_osm/Tools.py b/src/tests/tools/mock_osm/Tools.py index 56b0c11d2ad5ae044380fca190fcf1f48d771e8a..4ddc7974da5dc618f72d74bf98995fa0ddd5d830 100644 --- a/src/tests/tools/mock_osm/Tools.py +++ b/src/tests/tools/mock_osm/Tools.py @@ -12,14 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict, Optional +from typing import Dict, Optional, Tuple def compose_service_endpoint_id(site_id : str, endpoint_id : Dict): device_uuid = endpoint_id['device_id']['device_uuid']['uuid'] endpoint_uuid = endpoint_id['endpoint_uuid']['uuid'] return ':'.join([site_id, device_uuid, endpoint_uuid]) -def wim_mapping(site_id, ce_endpoint_id, pe_device_id : Optional[Dict] = None, priority=None, redundant=[]): +def wim_mapping( + site_id, ce_endpoint_id, pe_device_id : Optional[Dict] = None, + bearer_prefix : Optional[str] = None, priority=None, redundant=[] +) -> Tuple[str, Dict]: ce_device_uuid = ce_endpoint_id['device_id']['device_uuid']['uuid'] ce_endpoint_uuid = ce_endpoint_id['endpoint_uuid']['uuid'] service_endpoint_id = compose_service_endpoint_id(site_id, ce_endpoint_id) @@ -28,6 +31,8 @@ def wim_mapping(site_id, ce_endpoint_id, pe_device_id : Optional[Dict] = None, p else: pe_device_uuid = pe_device_id['device_uuid']['uuid'] bearer = '{:s}:{:s}'.format(ce_device_uuid, pe_device_uuid) + if bearer_prefix is not None: + bearer = '{:s}:{:s}'.format(bearer_prefix, bearer) mapping = { 'service_endpoint_id': service_endpoint_id, 'datacenter_id': site_id, 'device_id': ce_device_uuid, 'device_interface_id': ce_endpoint_uuid, diff --git a/src/tests/tools/mock_osm/WimconnectorIETFL2VPN.py b/src/tests/tools/mock_osm/WimconnectorIETFL2VPN.py index aa4ca045f41ffdc69d2ebf2fcd9b5db99ce45dbe..de940a7d2546885fe50dedbe8aa7d402730f6aa6 100644 --- a/src/tests/tools/mock_osm/WimconnectorIETFL2VPN.py +++ b/src/tests/tools/mock_osm/WimconnectorIETFL2VPN.py @@ -193,7 +193,7 @@ class WimconnectorIETFL2VPN(SdnConnectorBase): vpn_service_l = {"ietf-l2vpn-svc:vpn-service": vpn_service_list} response_service_creation = None conn_info = [] - self.logger.info("Sending vpn-service :{}".format(vpn_service_l)) + self.logger.info("Sending vpn-service : {:s}".format(str(vpn_service_l))) try: endpoint_service_creation = ( @@ -319,6 +319,8 @@ class WimconnectorIETFL2VPN(SdnConnectorBase): conn_info_d["mapping"] = None conn_info.append(conn_info_d) + self.logger.info("Sending site_network_accesses : {:s}".format(str(site_network_accesses))) + try: endpoint_site_network_access_creation = ( "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/"