diff --git a/src/service/service/service_handlers/__init__.py b/src/service/service/service_handlers/__init__.py index c05a230d0a1567ef57722996086c8bbd9db25ecb..2e417abe409859f76d25dde11ccc98c1b956eb0d 100644 --- a/src/service/service/service_handlers/__init__.py +++ b/src/service/service/service_handlers/__init__.py @@ -28,6 +28,7 @@ from .microwave.MicrowaveServiceHandler import MicrowaveServiceHandler from .p4_dummy_l1.p4_dummy_l1_service_handler import P4DummyL1ServiceHandler from .p4_fabric_tna_int.p4_fabric_tna_int_service_handler import P4FabricINTServiceHandler from .p4_fabric_tna_l2_simple.p4_fabric_tna_l2_simple_service_handler import P4FabricL2SimpleServiceHandler +from .p4_fabric_tna_l3.p4_fabric_tna_l3_service_handler import P4FabricL3ServiceHandler from .tapi_tapi.TapiServiceHandler import TapiServiceHandler from .tapi_xr.TapiXrServiceHandler import TapiXrServiceHandler from .e2e_orch.E2EOrchestratorServiceHandler import E2EOrchestratorServiceHandler @@ -125,6 +126,12 @@ SERVICE_HANDLERS = [ FilterFieldEnum.DEVICE_DRIVER: DeviceDriverEnum.DEVICEDRIVER_P4, } ]), + (P4FabricL3ServiceHandler, [ + { + FilterFieldEnum.SERVICE_TYPE: ServiceTypeEnum.SERVICETYPE_L3NM, + FilterFieldEnum.DEVICE_DRIVER: DeviceDriverEnum.DEVICEDRIVER_P4, + } + ]), (L2NM_IETFL2VPN_ServiceHandler, [ { FilterFieldEnum.SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_L2NM, diff --git a/src/service/service/service_handlers/p4_fabric_tna_l3/__init__.py b/src/service/service/service_handlers/p4_fabric_tna_l3/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..023830645e0fcb60e3f8583674a954810af222f2 --- /dev/null +++ b/src/service/service/service_handlers/p4_fabric_tna_l3/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/service/service/service_handlers/p4_fabric_tna_l3/p4_fabric_tna_l3_service_handler.py b/src/service/service/service_handlers/p4_fabric_tna_l3/p4_fabric_tna_l3_service_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..849d1db920466af6052e660d32c29c702edf39e7 --- /dev/null +++ b/src/service/service/service_handlers/p4_fabric_tna_l3/p4_fabric_tna_l3_service_handler.py @@ -0,0 +1,505 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Service handler for P4-based static L3 routing using the SD-Fabric P4 dataplane +for BMv2 and Intel Tofino switches. +""" + +import logging +from typing import Any, List, Dict, Optional, Tuple, Union +from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method +from common.proto.context_pb2 import ConfigActionEnum, DeviceId, Service, Device +from common.tools.object_factory.Device import json_device_id +from common.type_checkers.Checkers import chk_type, chk_address_mac, chk_address_ipv4, chk_prefix_len_ipv4 +from service.service.service_handler_api._ServiceHandler import _ServiceHandler +from service.service.service_handler_api.SettingsHandler import SettingsHandler +from service.service.service_handlers.p4_fabric_tna_commons.p4_fabric_tna_commons import * +from service.service.task_scheduler.TaskExecutor import TaskExecutor + +LOGGER = logging.getLogger(__name__) + +METRICS_POOL = MetricsPool('Service', 'Handler', labels={'handler': 'p4_fabric_tna_l3'}) + +class P4FabricL3ServiceHandler(_ServiceHandler): + def __init__( # pylint: disable=super-init-not-called + self, service : Service, task_executor : TaskExecutor, **settings # type: ignore + ) -> None: + """ Initialize Driver. + Parameters: + service + The service instance (gRPC message) to be managed. + task_executor + An instance of Task Executor providing access to the + service handlers factory, the context and device clients, + and an internal cache of already-loaded gRPC entities. + **settings + Extra settings required by the service handler. + + """ + self.__service_label = "P4 static L3 connectivity service" + self.__service = service + self.__task_executor = task_executor + self.__settings_handler = SettingsHandler(self.__service.service_config, **settings) + + self._init_settings() + self._parse_settings() + self._print_settings() + + @metered_subclass_method(METRICS_POOL) + def SetEndpoint( + self, endpoints : List[Tuple[str, str, Optional[str]]], + connection_uuid : Optional[str] = None + ) -> List[Union[bool, Exception]]: + """ Create/Update service endpoints from a list. + Parameters: + endpoints: List[Tuple[str, str, Optional[str]]] + List of tuples, each containing a device_uuid, + endpoint_uuid and, optionally, the topology_uuid + of the endpoint to be added. + connection_uuid : Optional[str] + If specified, is the UUID of the connection this endpoint is associated to. + Returns: + results: List[Union[bool, Exception]] + List of results for endpoint changes requested. + Return values must be in the same order as the requested + endpoints. If an endpoint is properly added, True must be + returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('endpoints', endpoints, list) + if len(endpoints) == 0: return [] + + LOGGER.info("{} - Provision service configuration".format( + self.__service_label)) + + visited = set() + results = [] + for endpoint in endpoints: + device_uuid, endpoint_uuid = endpoint[0:2] + device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) + device_name = device.name + + LOGGER.info("Device {}".format(device_name)) + LOGGER.info("\t | Service endpoint UUID: {}".format(endpoint_uuid)) + + port_id = find_port_id_in_endpoint_list(device.device_endpoints, endpoint_uuid) + LOGGER.info("\t | Service port ID: {}".format(port_id)) + + dev_port_key = device_name + "-" + PORT_PREFIX + str(port_id) + + # Skip already visited device ports + if dev_port_key in visited: + continue + + rules = [] + actual_rules = -1 + applied_rules, failed_rules = 0, -1 + + # Create and apply rules + try: + rules = self._create_rules( + device_obj=device, port_id=port_id, action=ConfigActionEnum.CONFIGACTION_SET) + actual_rules = len(rules) + applied_rules, failed_rules = apply_rules( + task_executor=self.__task_executor, + device_obj=device, + json_config_rules=rules + ) + except Exception as ex: + LOGGER.error("Failed to insert L3 rules on device {} due to {}".format(device.name, ex)) + finally: + rules.clear() + + # Ensure correct status + results.append(True) if (failed_rules == 0) and (applied_rules == actual_rules) \ + else results.append(False) + + # You should no longer visit this device port again + visited.add(dev_port_key) + + LOGGER.info("Installed {}/{} L3 rules on device {} and port {}".format( + applied_rules, actual_rules, device_name, port_id)) + + return results + + @metered_subclass_method(METRICS_POOL) + def DeleteEndpoint( + self, endpoints : List[Tuple[str, str, Optional[str]]], + connection_uuid : Optional[str] = None + ) -> List[Union[bool, Exception]]: + """ Delete service endpoints from a list. + Parameters: + endpoints: List[Tuple[str, str, Optional[str]]] + List of tuples, each containing a device_uuid, + endpoint_uuid, and the topology_uuid of the endpoint + to be removed. + connection_uuid : Optional[str] + If specified, is the UUID of the connection this endpoint is associated to. + Returns: + results: List[Union[bool, Exception]] + List of results for endpoint deletions requested. + Return values must be in the same order as the requested + endpoints. If an endpoint is properly deleted, True must be + returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('endpoints', endpoints, list) + if len(endpoints) == 0: return [] + + LOGGER.info("{} - Deprovision service configuration".format( + self.__service_label)) + + visited = set() + results = [] + for endpoint in endpoints: + device_uuid, endpoint_uuid = endpoint[0:2] + device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) + device_name = device.name + + LOGGER.info("Device {}".format(device_name)) + LOGGER.info("\t | Service endpoint UUID: {}".format(endpoint_uuid)) + + port_id = find_port_id_in_endpoint_list(device.device_endpoints, endpoint_uuid) + LOGGER.info("\t | Service port ID: {}".format(port_id)) + + dev_port_key = device_name + "-" + PORT_PREFIX + str(port_id) + + # Skip already visited device ports + if dev_port_key in visited: + continue + + rules = [] + actual_rules = -1 + applied_rules, failed_rules = 0, -1 + + # Create and apply rules + try: + rules = self._create_rules( + device_obj=device, port_id=port_id, action=ConfigActionEnum.CONFIGACTION_DELETE) + actual_rules = len(rules) + applied_rules, failed_rules = apply_rules( + task_executor=self.__task_executor, + device_obj=device, + json_config_rules=rules + ) + except Exception as ex: + LOGGER.error("Failed to insert L3 rules on device {} due to {}".format(device.name, ex)) + finally: + rules.clear() + + # Ensure correct status + results.append(True) if (failed_rules == 0) and (applied_rules == actual_rules) \ + else results.append(False) + + # You should no longer visit this device port again + visited.add(dev_port_key) + + LOGGER.info("Deleted {}/{} L3 rules from device {} and port {}".format( + applied_rules, actual_rules, device_name, port_id)) + + return results + + @metered_subclass_method(METRICS_POOL) + def SetConstraint(self, constraints: List[Tuple[str, Any]]) \ + -> List[Union[bool, Exception]]: + """ Create/Update service constraints. + Parameters: + constraints: List[Tuple[str, Any]] + List of tuples, each containing a constraint_type and the + new constraint_value to be set. + Returns: + results: List[Union[bool, Exception]] + List of results for constraint changes requested. + Return values must be in the same order as the requested + constraints. If a constraint is properly set, True must be + returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('constraints', constraints, list) + if len(constraints) == 0: return [] + + msg = '[SetConstraint] Method not implemented. Constraints({:s}) are being ignored.' + LOGGER.warning(msg.format(str(constraints))) + return [True for _ in range(len(constraints))] + + @metered_subclass_method(METRICS_POOL) + def DeleteConstraint(self, constraints: List[Tuple[str, Any]]) \ + -> List[Union[bool, Exception]]: + """ Delete service constraints. + Parameters: + constraints: List[Tuple[str, Any]] + List of tuples, each containing a constraint_type pointing + to the constraint to be deleted, and a constraint_value + containing possible additionally required values to locate + the constraint to be removed. + Returns: + results: List[Union[bool, Exception]] + List of results for constraint deletions requested. + Return values must be in the same order as the requested + constraints. If a constraint is properly deleted, True must + be returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('constraints', constraints, list) + if len(constraints) == 0: return [] + + msg = '[DeleteConstraint] Method not implemented. Constraints({:s}) are being ignored.' + LOGGER.warning(msg.format(str(constraints))) + return [True for _ in range(len(constraints))] + + @metered_subclass_method(METRICS_POOL) + def SetConfig(self, resources: List[Tuple[str, Any]]) \ + -> List[Union[bool, Exception]]: + """ Create/Update configuration for a list of service resources. + Parameters: + resources: List[Tuple[str, Any]] + List of tuples, each containing a resource_key pointing to + the resource to be modified, and a resource_value + containing the new value to be set. + Returns: + results: List[Union[bool, Exception]] + List of results for resource key changes requested. + Return values must be in the same order as the requested + resource keys. If a resource is properly set, True must be + returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('resources', resources, list) + if len(resources) == 0: return [] + + msg = '[SetConfig] Method not implemented. Resources({:s}) are being ignored.' + LOGGER.warning(msg.format(str(resources))) + return [True for _ in range(len(resources))] + + @metered_subclass_method(METRICS_POOL) + def DeleteConfig(self, resources: List[Tuple[str, Any]]) \ + -> List[Union[bool, Exception]]: + """ Delete configuration for a list of service resources. + Parameters: + resources: List[Tuple[str, Any]] + List of tuples, each containing a resource_key pointing to + the resource to be modified, and a resource_value containing + possible additionally required values to locate the value + to be removed. + Returns: + results: List[Union[bool, Exception]] + List of results for resource key deletions requested. + Return values must be in the same order as the requested + resource keys. If a resource is properly deleted, True must + be returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('resources', resources, list) + if len(resources) == 0: return [] + + msg = '[SetConfig] Method not implemented. Resources({:s}) are being ignored.' + LOGGER.warning(msg.format(str(resources))) + return [True for _ in range(len(resources))] + + def _init_settings(self): + self.__switch_info = {} + self.__port_map = {} + + try: + self.__settings = self.__settings_handler.get('/settings') + LOGGER.info("{} with settings: {}".format(self.__service_label, self.__settings)) + except Exception as ex: + self.__settings = {} + LOGGER.error("Failed to parse service settings: {}".format(ex)) + + def _default_settings(self): + port_list = [ + { + PORT_ID: 1, + PORT_TYPE: "host" + }, + { + PORT_ID: 2, + PORT_TYPE: "host" + }, + ] + routing_list = [ + { + PORT_ID: 1, + IPV4_DST: "10.158.72.11", + IPV4_PREFIX_LEN: 32, + MAC_SRC: "fa:16:3e:e2:af:28", + MAC_DST: "fa:16:3e:75:9c:e5" + }, + { + PORT_ID: 2, + IPV4_DST: "172.16.10.9", + IPV4_PREFIX_LEN: 32, + MAC_SRC: "fa:16:3e:75:9c:e5", + MAC_DST: "fa:16:3e:e2:af:28" + } + ] + switch_info = { + "p4-sw1": { + ARCH: TARGET_ARCH_V1MODEL, + DPID: 1, + PORT_LIST: port_list, + ROUTING_LIST: routing_list + } + } + self.__settings = { + SWITCH_INFO: switch_info + } + + port_map = { + "p4-sw1": { + "port-1": { + PORT_ID: 1, + PORT_TYPE: PORT_TYPE_HOST, + ROUTING_LIST: [ + { + IPV4_DST: "10.158.72.11", + IPV4_PREFIX_LEN: 32, + MAC_SRC: "fa:16:3e:e2:af:28", + MAC_DST: "fa:16:3e:75:9c:e5" + } + ] + }, + "port-2": { + PORT_ID: 2, + PORT_TYPE: PORT_TYPE_HOST, + ROUTING_LIST: [ + { + IPV4_DST: "172.16.10.9", + IPV4_PREFIX_LEN: 32, + MAC_SRC: "fa:16:3e:75:9c:e5", + MAC_DST: "fa:16:3e:e2:af:28" + } + ] + } + } + } + + def _parse_settings(self): + #TODO: Pass settings in a correct way + try: + self.__switch_info = self.__settings[SWITCH_INFO] + except Exception as ex: + LOGGER.error("Failed to parse settings: {}".format(ex)) + self._default_settings() #TODO: Remove when bug is fixed + self.__switch_info = self.__settings[SWITCH_INFO] + assert isinstance(self.__switch_info, dict), "Switch info object must be a map with switch names as keys" + + for switch_name, switch_info in self.__switch_info.items(): + assert switch_name, "Invalid P4 switch name" + assert isinstance(switch_info, dict), "Switch {} info must be a map with arch, dpid, and fwd_list items)" + assert switch_info[ARCH] in SUPPORTED_TARGET_ARCH_LIST, \ + "Switch {} - Supported P4 architectures are: {}".format(switch_name, ','.join(SUPPORTED_TARGET_ARCH_LIST)) + switch_dpid = switch_info[DPID] + assert switch_dpid > 0, "Switch {} - P4 switch dataplane ID must be a positive integer".format(switch_name, switch_info[DPID]) + + # Port list + port_list = switch_info[PORT_LIST] + assert isinstance(port_list, list), "Switch {} port list must be a list with port_id and port_type items)" + for port in port_list: + port_id = port[PORT_ID] + assert port_id >= 0, "Switch {} - Invalid P4 switch port ID".format(switch_name) + port_type = port[PORT_TYPE] + assert port_type in PORT_TYPES_STR_VALID, "Switch {} - Valid P4 switch port types are: {}".format( + switch_name, ','.join(PORT_TYPES_STR_VALID)) + + if switch_name not in self.__port_map: + self.__port_map[switch_name] = {} + port_key = PORT_PREFIX + str(port_id) + if port_key not in self.__port_map[switch_name]: + self.__port_map[switch_name][port_key] = {} + self.__port_map[switch_name][port_key][PORT_ID] = port_id + self.__port_map[switch_name][port_key][PORT_TYPE] = port_type + self.__port_map[switch_name][port_key][ROUTING_LIST] = [] + + # Routing list + routing_list = switch_info[ROUTING_LIST] + assert isinstance(routing_list, list), "Switch {} routing list be a list)" + for rt_entry in routing_list: + port_id = rt_entry[PORT_ID] + assert port_id >= 0, "Invalid port ID: {}".format(port_id) + ipv4_dst = rt_entry[IPV4_DST] + assert chk_address_ipv4(ipv4_dst), "Invalid destination IPv4 address {}".format(ipv4_dst) + ipv4_prefix_len = rt_entry[IPV4_PREFIX_LEN] + assert chk_prefix_len_ipv4(ipv4_prefix_len), "Invalid IPv4 address prefix length {}".format(ipv4_prefix_len) + mac_src = rt_entry[MAC_SRC] + assert chk_address_mac(mac_src), "Invalid source MAC address {}".format(mac_src) + mac_dst = rt_entry[MAC_DST] + assert chk_address_mac(mac_dst), "Invalid destination MAC address {}".format(mac_dst) + + # Retrieve entry from the port map + switch_port_entry = self._get_switch_port_in_port_map(switch_name, port_id) + + # Add routing entry + switch_port_entry[ROUTING_LIST].append( + { + PORT_ID: port_id, + IPV4_DST: ipv4_dst, + IPV4_PREFIX_LEN: ipv4_prefix_len, + MAC_SRC: mac_src, + MAC_DST: mac_dst + } + ) + + def _print_settings(self): + LOGGER.info("--------------- {} settings ---------------".format(self.__service.name)) + LOGGER.info("--- Topology info") + for switch_name, switch_info in self.__switch_info.items(): + LOGGER.info("\t Device {}".format(switch_name)) + LOGGER.info("\t\t| Target P4 architecture: {}".format(switch_info[ARCH])) + LOGGER.info("\t\t| Data plane ID: {}".format(switch_info[DPID])) + LOGGER.info("\t\t| Port map: {}".format(self.__port_map[switch_name])) + LOGGER.info("-------------------------------------------------------") + + def _get_switch_port_in_port_map(self, switch_name : str, port_id : int) -> Dict: + assert switch_name, "A valid switch name must be used as a key to the port map" + assert port_id > 0, "A valid switch port ID must be used as a key to a switch's port map" + switch_entry = self.__port_map[switch_name] + assert switch_entry, "Switch {} does not exist in the port map".format(switch_name) + port_key = PORT_PREFIX + str(port_id) + assert switch_entry[port_key], "Port with ID {} does not exist in the switch map".format(port_id) + + return switch_entry[port_key] + + def _get_routing_list_of_switch_port(self, switch_name : str, port_id : int) -> List [Tuple]: + switch_port_entry = self._get_switch_port_in_port_map(switch_name, port_id) + return switch_port_entry[ROUTING_LIST] + + def _create_rules(self, device_obj : Device, port_id : int, action : ConfigActionEnum): # type: ignore + dev_name = device_obj.name + + rules = [] + + ### Static routing rules + routing_list = self._get_routing_list_of_switch_port(switch_name=dev_name, port_id=port_id) + for rt_entry in routing_list: + try: + rules += rules_set_up_next_routing_simple( + egress_port=port_id, + eth_src=rt_entry[MAC_SRC], + eth_dst=rt_entry[MAC_DST], + action=action + ) + rules += rules_set_up_routing( + ipv4_dst=rt_entry[IPV4_DST], + ipv4_prefix_len=rt_entry[IPV4_PREFIX_LEN], + egress_port=port_id, + action=action + ) + except Exception as ex: + LOGGER.error("Error while creating static L3 routing rules") + raise Exception(ex) + + return rules diff --git a/src/tests/p4-fabric-tna/README.md b/src/tests/p4-fabric-tna/README.md index b96bb02e989a08b99423f00eb115fa68e94b199d..f6bc2dd0cfa3731da44116274c3f5dfb73d07dc8 100644 --- a/src/tests/p4-fabric-tna/README.md +++ b/src/tests/p4-fabric-tna/README.md @@ -139,6 +139,20 @@ cd ~/tfs-ctrl/ bash src/tests/p4-fabric-tna/run_test_03b_service_deprovision_l2.sh ``` +#### Provision L3 network service via the Service API + +```shell +cd ~/tfs-ctrl/ +bash src/tests/p4-fabric-tna/run_test_04a_service_provision_l3.sh +``` + +#### Deprovision L3 network service via the Service API + +```shell +cd ~/tfs-ctrl/ +bash src/tests/p4-fabric-tna/run_test_04b_service_deprovision_l3.sh +``` + #### Provision INT service via the Service API ```shell diff --git a/src/tests/p4-fabric-tna/descriptors/service-create-l3.json b/src/tests/p4-fabric-tna/descriptors/service-create-l3.json new file mode 100644 index 0000000000000000000000000000000000000000..7d8153e7ad6d2404d8bdf1b1fc31da4b7be2f801 --- /dev/null +++ b/src/tests/p4-fabric-tna/descriptors/service-create-l3.json @@ -0,0 +1,67 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, "service_uuid": {"uuid": "p4-service-l3"} + }, + "name": "p4-service-l3", + "service_type": "SERVICETYPE_L3NM", + "service_status": {"service_status": "SERVICESTATUS_PLANNED"}, + "service_endpoint_ids": [ + { + "device_id": {"device_uuid": {"uuid": "p4-sw1"}}, + "endpoint_uuid": {"uuid": "1"} + }, + { + "device_id": {"device_uuid": {"uuid": "p4-sw1"}}, + "endpoint_uuid": {"uuid": "2"} + } + ], + "service_config": { + "config_rules": [ + { + "action": "CONFIGACTION_SET", + "custom": { + "resource_key": "/settings", + "resource_value": { + "switch_info": { + "p4-sw1": { + "arch": "v1model", + "dpid": 1, + "port_list": [ + { + "port_id": 1, + "port_type": "host" + }, + { + "port_id": 2, + "port_type": "host" + } + ], + "routing_list": [ + { + "port_id": 1, + "ipv4_dst": "10.158.72.11", + "ipv4_prefix_len": 32, + "mac_src": "fa:16:3e:e2:af:28", + "mac_dst": "fa:16:3e:75:9c:e5" + }, + { + "port_id": 2, + "ipv4_dst": "172.16.10.9", + "ipv4_prefix_len": 32, + "mac_src": "fa:16:3e:75:9c:e5", + "mac_dst": "fa:16:3e:e2:af:28" + } + ] + } + } + } + } + } + ] + }, + "service_constraints": [] + } + ] +} diff --git a/src/tests/p4-fabric-tna/run_test_04a_service_provision_l3.sh b/src/tests/p4-fabric-tna/run_test_04a_service_provision_l3.sh new file mode 100755 index 0000000000000000000000000000000000000000..96c629370d4e3c126c04a4ded496f4ba4b6c16f1 --- /dev/null +++ b/src/tests/p4-fabric-tna/run_test_04a_service_provision_l3.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source tfs_runtime_env_vars.sh +python3 -m pytest --verbose src/tests/p4-fabric-tna/tests-service/test_functional_service_provision_l3.py diff --git a/src/tests/p4-fabric-tna/run_test_04b_service_deprovision_l3.sh b/src/tests/p4-fabric-tna/run_test_04b_service_deprovision_l3.sh new file mode 100755 index 0000000000000000000000000000000000000000..fdc1d72ac574aa21590f52b53407f9194a3e4633 --- /dev/null +++ b/src/tests/p4-fabric-tna/run_test_04b_service_deprovision_l3.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source tfs_runtime_env_vars.sh +python3 -m pytest --verbose src/tests/p4-fabric-tna/tests-service/test_functional_service_deprovision_l3.py diff --git a/src/tests/p4-fabric-tna/tests-service/test_functional_service_deprovision_l3.py b/src/tests/p4-fabric-tna/tests-service/test_functional_service_deprovision_l3.py new file mode 100644 index 0000000000000000000000000000000000000000..d349a08747802746a1f9d04d53a165e4359b70ce --- /dev/null +++ b/src/tests/p4-fabric-tna/tests-service/test_functional_service_deprovision_l3.py @@ -0,0 +1,78 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from common.proto.context_pb2 import ServiceId, ServiceStatusEnum, ServiceTypeEnum +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Service import json_service_id +from context.client.ContextClient import ContextClient +from service.client.ServiceClient import ServiceClient +from tests.Fixtures import context_client, service_client # pylint: disable=unused-import +from tests.tools.test_tools_p4 import * + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +def test_service_deletion_l3( + context_client : ContextClient, # pylint: disable=redefined-outer-name + service_client : ServiceClient # pylint: disable=redefined-outer-name +) -> None: + # Get the current number of devices + response = context_client.ListDevices(ADMIN_CONTEXT_ID) + LOGGER.warning('Devices[{:d}] = {:s}'.format(len(response.devices), grpc_message_to_json_string(response))) + + # Total devices + dev_nb = len(response.devices) + assert dev_nb == DEV_NB + + # P4 devices + p4_dev_nb = identify_number_of_p4_devices(response.devices) + assert p4_dev_nb == P4_DEV_NB + + # Get the current number of rules in the P4 devices + p4_rules_before_deletion = get_number_of_rules(response.devices) + + # Get the current number of services + response = context_client.ListServices(ADMIN_CONTEXT_ID) + services_nb_before_deletion = len(response.services) + assert verify_active_service_type(response.services, ServiceTypeEnum.SERVICETYPE_L3NM) + + for service in response.services: + # Ignore services of other types + if service.service_type != ServiceTypeEnum.SERVICETYPE_L3NM: + continue + + service_id = service.service_id + assert service_id + + service_uuid = service_id.service_uuid.uuid + context_uuid = service_id.context_id.context_uuid.uuid + assert service.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE + + # Delete L3 service + service_client.DeleteService(ServiceId(**json_service_id(service_uuid, json_context_id(context_uuid)))) + + # Get an updated view of the services + response = context_client.ListServices(ADMIN_CONTEXT_ID) + services_nb_after_deletion = len(response.services) + assert services_nb_after_deletion == services_nb_before_deletion - 1, "Exactly one new service must be deleted" + + # Get an updated view of the devices + response = context_client.ListDevices(ADMIN_CONTEXT_ID) + p4_rules_after_deletion = get_number_of_rules(response.devices) + + rules_diff = p4_rules_before_deletion - p4_rules_after_deletion + + assert p4_rules_after_deletion < p4_rules_before_deletion, "L3 service must contain some rules" + assert rules_diff == P4_DEV_NB * L3_RULES, "L3 service must contain {} rules per device".format(L3_RULES) diff --git a/src/tests/p4-fabric-tna/tests-service/test_functional_service_provision_l3.py b/src/tests/p4-fabric-tna/tests-service/test_functional_service_provision_l3.py new file mode 100644 index 0000000000000000000000000000000000000000..9c0009b14472ddf055d41a8bf4923c64a9f5c151 --- /dev/null +++ b/src/tests/p4-fabric-tna/tests-service/test_functional_service_provision_l3.py @@ -0,0 +1,73 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from common.proto.context_pb2 import ServiceStatusEnum, ServiceTypeEnum +from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results +from common.tools.grpc.Tools import grpc_message_to_json_string +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from service.client.ServiceClient import ServiceClient +from tests.Fixtures import context_client, device_client, service_client # pylint: disable=unused-import +from tests.tools.test_tools_p4 import * + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +def test_service_creation_l3( + context_client : ContextClient, # pylint: disable=redefined-outer-name + device_client : DeviceClient, # pylint: disable=redefined-outer-name + service_client : ServiceClient # pylint: disable=redefined-outer-name +) -> None: + # Get the current number of services + response = context_client.ListServices(ADMIN_CONTEXT_ID) + services_nb_before = len(response.services) + + # Get the current number of devices + response = context_client.ListDevices(ADMIN_CONTEXT_ID) + LOGGER.warning('Devices[{:d}] = {:s}'.format(len(response.devices), grpc_message_to_json_string(response))) + + # Total devices + dev_nb = len(response.devices) + assert dev_nb == DEV_NB + + # P4 devices + p4_dev_nb = identify_number_of_p4_devices(response.devices) + assert p4_dev_nb == P4_DEV_NB + + # Get the current number of rules in the P4 devices + p4_rules_before = get_number_of_rules(response.devices) + + # Load service + descriptor_loader = DescriptorLoader( + descriptors_file=DESC_FILE_SERVICE_CREATE_L3, + context_client=context_client, device_client=device_client, service_client=service_client + ) + results = descriptor_loader.process() + check_descriptor_load_results(results, descriptor_loader) + + # Get an updated view of the services + response = context_client.ListServices(ADMIN_CONTEXT_ID) + services_nb_after = len(response.services) + assert services_nb_after == services_nb_before + 1, "Exactly one new service must be in place" + assert verify_active_service_type(response.services, ServiceTypeEnum.SERVICETYPE_L3NM) + + # Get an updated view of the devices + response = context_client.ListDevices(ADMIN_CONTEXT_ID) + p4_rules_after = get_number_of_rules(response.devices) + + rules_diff = p4_rules_after - p4_rules_before + + assert p4_rules_after > p4_rules_before, "L3 service must install some rules" + assert rules_diff == P4_DEV_NB * L3_RULES, "L3 service must install {} rules per device".format(L3_RULES)