diff --git a/proto/context.proto b/proto/context.proto index fb0111e14212dd1f5c467ff10a20c5d02045e4eb..52599784736550dab339db88e68bd43c5fdcc8da 100644 --- a/proto/context.proto +++ b/proto/context.proto @@ -199,13 +199,13 @@ message Device { DeviceId controller_id = 9; // Identifier of node controlling the actual device } -message Component { //Defined previously to this section - Tested OK - Uuid component_uuid = 1; - string name = 2; - string type = 3; +message Component { // Defined previously in this section + Uuid component_uuid = 1; + string name = 2; + string type = 3; map<string, string> attributes = 4; // dict[attr.name => json.dumps(attr.value)] - string parent = 5; + string parent = 5; } message DeviceConfig { @@ -271,6 +271,7 @@ enum LinkTypeEnum { LINKTYPE_FIBER = 2; LINKTYPE_RADIO = 3; LINKTYPE_VIRTUAL = 4; + LINKTYPE_MANAGEMENT = 5; } message LinkAttributes { @@ -325,6 +326,9 @@ enum ServiceTypeEnum { SERVICETYPE_E2E = 5; SERVICETYPE_OPTICAL_CONNECTIVITY = 6; SERVICETYPE_QKD = 7; + SERVICETYPE_L1NM = 8; + SERVICETYPE_INT = 9; + SERVICETYPE_ACL = 10; } enum ServiceStatusEnum { diff --git a/src/common/tools/object_factory/Service.py b/src/common/tools/object_factory/Service.py index 74c18323015f1035be1effbe27f8d48a2a5d6d1f..0d039ac35ba9a3e7630138315da6582729d9d789 100644 --- a/src/common/tools/object_factory/Service.py +++ b/src/common/tools/object_factory/Service.py @@ -90,6 +90,6 @@ def json_service_p4_planned( ): return json_service( - service_uuid, ServiceTypeEnum.SERVICETYPE_L2NM, context_id=json_context_id(context_uuid), + service_uuid, ServiceTypeEnum.SERVICETYPE_L1NM, context_id=json_context_id(context_uuid), status=ServiceStatusEnum.SERVICESTATUS_PLANNED, endpoint_ids=endpoint_ids, constraints=constraints, config_rules=config_rules) \ No newline at end of file diff --git a/src/common/type_checkers/Checkers.py b/src/common/type_checkers/Checkers.py index e1bbe3f06b75d0a1bfc5873019776b1517f50192..e797d64414847aabb402f0bbec17dd65f9c7ec6b 100644 --- a/src/common/type_checkers/Checkers.py +++ b/src/common/type_checkers/Checkers.py @@ -13,6 +13,8 @@ # limitations under the License. import re +import ipaddress +from ctypes import c_uint16, sizeof from typing import Any, Container, Dict, List, Optional, Pattern, Set, Sized, Tuple, Union def chk_none(name : str, value : Any, reason=None) -> Any: @@ -107,3 +109,89 @@ def chk_options(name : str, value : Any, options : Container) -> Any: msg = '{}({}) is not one of options({}).' raise ValueError(msg.format(str(name), str(value), str(options))) return value + +# MAC address checker +mac_pattern = re.compile(r"^([\da-fA-F]{2}:){5}([\da-fA-F]{2})$") + +def chk_address_mac(mac_addr : str): + """ + Check whether input string is a valid MAC address or not. + + :param mac_addr: string-based MAC address + :return: boolean status + """ + return mac_pattern.match(mac_addr) is not None + +# IPv4/IPv6 address checkers +IPV4_LOCALHOST = "localhost" + +def chk_address_ipv4(ip_addr : str): + """ + Check whether input string is a valid IPv4 address or not. + + :param ip_addr: string-based IPv4 address + :return: boolean status + """ + if ip_addr == IPV4_LOCALHOST: + return True + try: + addr = ipaddress.ip_address(ip_addr) + return isinstance(addr, ipaddress.IPv4Address) + except ValueError: + return False + +def chk_prefix_len_ipv4(ip_prefix_len : int): + """ + Check whether input integer is a valid IPv4 address prefix length. + + :param ip_prefix_len: IPv4 address prefix length + :return: boolean status + """ + return 0 <= ip_prefix_len <= 32 + +def chk_address_ipv6(ip_addr : str): + """ + Check whether input string is a valid IPv6 address or not. + + :param ip_addr: string-based IPv6 address + :return: boolean status + """ + try: + addr = ipaddress.ip_address(ip_addr) + return isinstance(addr, ipaddress.IPv6Address) + except ValueError: + return False + + +# VLAN ID checker +VLAN_ID_MIN = 1 +VLAN_ID_MAX = 4094 + +def chk_vlan_id(vlan_id : int): + return VLAN_ID_MIN <= vlan_id <= VLAN_ID_MAX + + +# Transport port checker + +def limits(c_int_type): + """ + Discover limits of numerical type. + + :param c_int_type: numerical type + :return: tuple of numerical type's limits + """ + signed = c_int_type(-1).value < c_int_type(0).value + bit_size = sizeof(c_int_type) * 8 + signed_limit = 2 ** (bit_size - 1) + return (-signed_limit, signed_limit - 1) \ + if signed else (0, 2 * signed_limit - 1) + +def chk_transport_port(trans_port : int): + """ + Check whether input is a valid transport port number or not. + + :param trans_port: transport port number + :return: boolean status + """ + lim = limits(c_uint16) + return lim[0] <= trans_port <= lim[1] diff --git a/src/context/service/database/models/enums/LinkType.py b/src/context/service/database/models/enums/LinkType.py index 68624af845ea813aa5ca886de97861852a294516..97eacdd8b647869e9c1393770ad78aafea1152cc 100644 --- a/src/context/service/database/models/enums/LinkType.py +++ b/src/context/service/database/models/enums/LinkType.py @@ -18,15 +18,16 @@ from ._GrpcToEnum import grpc_to_enum # IMPORTANT: Entries of enum class ORM_LinkTypeEnum should be named as in # the proto files removing the prefixes. For example, proto item -# LinkTypeEnum.DEVICEDRIVER_COPPER should be included as COPPER. +# LinkTypeEnum.COPPER should be included as COPPER. # If item name does not match, automatic mapping of proto enums # to database enums will fail. class ORM_LinkTypeEnum(enum.Enum): - UNKNOWN = LinkTypeEnum.LINKTYPE_UNKNOWN - COPPER = LinkTypeEnum.LINKTYPE_COPPER - FIBER = LinkTypeEnum.LINKTYPE_FIBER - RADIO = LinkTypeEnum.LINKTYPE_RADIO - VIRTUAL = LinkTypeEnum.LINKTYPE_VIRTUAL + UNKNOWN = LinkTypeEnum.LINKTYPE_UNKNOWN + COPPER = LinkTypeEnum.LINKTYPE_COPPER + FIBER = LinkTypeEnum.LINKTYPE_FIBER + RADIO = LinkTypeEnum.LINKTYPE_RADIO + VIRTUAL = LinkTypeEnum.LINKTYPE_VIRTUAL + MANAGEMENT = LinkTypeEnum.LINKTYPE_MANAGEMENT grpc_to_enum__link_type_enum = functools.partial( grpc_to_enum, LinkTypeEnum, ORM_LinkTypeEnum diff --git a/src/context/service/database/models/enums/ServiceType.py b/src/context/service/database/models/enums/ServiceType.py index 45f849a2643a328284e200f1718b02191fab9563..899c71acf7bbf72379bf22de9d7badb04994155b 100644 --- a/src/context/service/database/models/enums/ServiceType.py +++ b/src/context/service/database/models/enums/ServiceType.py @@ -25,11 +25,14 @@ class ORM_ServiceTypeEnum(enum.Enum): UNKNOWN = ServiceTypeEnum.SERVICETYPE_UNKNOWN L3NM = ServiceTypeEnum.SERVICETYPE_L3NM L2NM = ServiceTypeEnum.SERVICETYPE_L2NM + L1NM = ServiceTypeEnum.SERVICETYPE_L1NM TAPI_CONNECTIVITY_SERVICE = ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE TE = ServiceTypeEnum.SERVICETYPE_TE E2E = ServiceTypeEnum.SERVICETYPE_E2E OPTICAL_CONNECTIVITY = ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY QKD = ServiceTypeEnum.SERVICETYPE_QKD + INT = ServiceTypeEnum.SERVICETYPE_INT + ACL = ServiceTypeEnum.SERVICETYPE_ACL grpc_to_enum__service_type = functools.partial( grpc_to_enum, ServiceTypeEnum, ORM_ServiceTypeEnum) diff --git a/src/device/service/drivers/p4/p4_common.py b/src/device/service/drivers/p4/p4_common.py index b55296a65922de93370a66301254cacf9ca7220a..b4c0d88323e7a2066c1cb73525edb6cca5b6af1a 100644 --- a/src/device/service/drivers/p4/p4_common.py +++ b/src/device/service/drivers/p4/p4_common.py @@ -23,16 +23,15 @@ as well as static variables used by various P4 driver components. """ import logging +import ipaddress +import macaddress import math -import re import socket -import ipaddress from typing import Any, Dict, List, Optional, Tuple -from ctypes import c_uint16, sizeof -import macaddress from common.type_checkers.Checkers import \ - chk_attribute, chk_string, chk_type, chk_issubclass + chk_attribute, chk_string, chk_type, chk_issubclass,\ + chk_address_mac, chk_address_ipv4, chk_address_ipv6 try: from .p4_exception import UserBadValueError except ImportError: @@ -60,17 +59,6 @@ LOGGER = logging.getLogger(__name__) # MAC address encoding/decoding -mac_pattern = re.compile(r"^([\da-fA-F]{2}:){5}([\da-fA-F]{2})$") - - -def matches_mac(mac_addr_string): - """ - Check whether input string is a valid MAC address or not. - - :param mac_addr_string: string-based MAC address - :return: boolean status - """ - return mac_pattern.match(mac_addr_string) is not None def encode_mac(mac_addr_string): @@ -94,23 +82,6 @@ def decode_mac(encoded_mac_addr): # IP address encoding/decoding -IPV4_LOCALHOST = "localhost" - - -def matches_ipv4(ip_addr_string): - """ - Check whether input string is a valid IPv4 address or not. - - :param ip_addr_string: string-based IPv4 address - :return: boolean status - """ - if ip_addr_string == IPV4_LOCALHOST: - return True - try: - addr = ipaddress.ip_address(ip_addr_string) - return isinstance(addr, ipaddress.IPv4Address) - except ValueError: - return False def encode_ipv4(ip_addr_string): @@ -133,20 +104,6 @@ def decode_ipv4(encoded_ip_addr): return socket.inet_ntoa(encoded_ip_addr) -def matches_ipv6(ip_addr_string): - """ - Check whether input string is a valid IPv6 address or not. - - :param ip_addr_string: string-based IPv6 address - :return: boolean status - """ - try: - addr = ipaddress.ip_address(ip_addr_string) - return isinstance(addr, ipaddress.IPv6Address) - except ValueError: - return False - - def encode_ipv6(ip_addr_string): """ Convert string-based IPv6 address into bytes. @@ -170,31 +127,6 @@ def decode_ipv6(encoded_ip_addr): # Numerical encoding/decoding -def limits(c_int_type): - """ - Discover limits of numerical type. - - :param c_int_type: numerical type - :return: tuple of numerical type's limits - """ - signed = c_int_type(-1).value < c_int_type(0).value - bit_size = sizeof(c_int_type) * 8 - signed_limit = 2 ** (bit_size - 1) - return (-signed_limit, signed_limit - 1) \ - if signed else (0, 2 * signed_limit - 1) - - -def valid_port(port): - """ - Check whether input is a valid port number or not. - - :param port: port number - :return: boolean status - """ - lim = limits(c_uint16) - return lim[0] <= port <= lim[1] - - def bitwidth_to_bytes(bitwidth): """ Convert number of bits to number of bytes. @@ -245,11 +177,11 @@ def encode(variable, bitwidth): if isinstance(variable, int): encoded_bytes = encode_num(variable, bitwidth) elif isinstance(variable, str): - if matches_mac(variable): + if chk_address_mac(variable): encoded_bytes = encode_mac(variable) - elif matches_ipv4(variable): + elif chk_address_ipv4(variable): encoded_bytes = encode_ipv4(variable) - elif matches_ipv6(variable): + elif chk_address_ipv6(variable): encoded_bytes = encode_ipv6(variable) else: try: @@ -471,7 +403,7 @@ def parse_integer_list_from_json(resource, resource_list, resource_item): return integers_list def process_optional_string_field( - #TODO: Consider adding this in common methdos as it is taken by the Emulated driver + #TODO: Consider adding this in common methods as it is taken by the Emulated driver endpoint_data : Dict[str, Any], field_name : str, endpoint_resource_value : Dict[str, Any] ) -> None: field_value = chk_attribute(field_name, endpoint_data, 'endpoint_data', default=None) diff --git a/src/device/service/drivers/p4/p4_driver.py b/src/device/service/drivers/p4/p4_driver.py index c89a42baddaf45737ebfcf26a665f0a6beb8544f..0b109529d49e51b61430da510b9b53942623df77 100644 --- a/src/device/service/drivers/p4/p4_driver.py +++ b/src/device/service/drivers/p4/p4_driver.py @@ -22,10 +22,10 @@ import logging import threading from typing import Any, Iterator, List, Optional, Tuple, Union from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method -from common.type_checkers.Checkers import chk_type, chk_length, chk_string +from common.type_checkers.Checkers import chk_type, chk_length, chk_string, \ + chk_address_ipv4, chk_address_ipv6, chk_transport_port from device.service.driver_api._Driver import RESOURCE_ENDPOINTS, RESOURCE_RULES -from .p4_common import matches_ipv4, matches_ipv6, valid_port,\ - compose_resource_endpoints, parse_resource_string_from_json,\ +from .p4_common import compose_resource_endpoints,\ P4_ATTR_DEV_ID, P4_ATTR_DEV_NAME, P4_ATTR_DEV_ENDPOINTS,\ P4_ATTR_DEV_VENDOR, P4_ATTR_DEV_HW_VER, P4_ATTR_DEV_SW_VER,\ P4_ATTR_DEV_P4BIN, P4_ATTR_DEV_P4INFO, P4_ATTR_DEV_TIMEOUT,\ @@ -336,9 +336,9 @@ class P4Driver(_Driver): :return: void or exception in case of validation error """ # Device endpoint information - assert matches_ipv4(self.__address) or (matches_ipv6(self.__address)),\ + assert chk_address_ipv4(self.__address) or (chk_address_ipv6(self.__address)),\ f"{self.__address} not a valid IPv4 or IPv6 address" - assert valid_port(self.__port), \ + assert chk_transport_port(self.__port), \ f"{self.__port} not a valid transport port" self.__grpc_endpoint = f"{self.__address}:{self.__port}" @@ -395,18 +395,24 @@ class P4Driver(_Driver): # Path to P4 binary file if P4_ATTR_DEV_P4BIN in self.__settings: self.__p4bin_path = self.__settings.get(P4_ATTR_DEV_P4BIN) - assert os.path.exists(self.__p4bin_path),\ - "Invalid path to p4bin file: {}".format(self.__p4bin_path) - assert P4_ATTR_DEV_P4INFO in self.__settings,\ - "p4info and p4bin settings must be provided together" + if not os.path.exists(self.__p4bin_path): + LOGGER.warning( + "Invalid path to p4bin file: {}".format(self.__p4bin_path)) + self.__p4bin_path = "" + else: + assert P4_ATTR_DEV_P4INFO in self.__settings,\ + "p4info and p4bin settings must be provided together" # Path to P4 info file if P4_ATTR_DEV_P4INFO in self.__settings: self.__p4info_path = self.__settings.get(P4_ATTR_DEV_P4INFO) - assert os.path.exists(self.__p4info_path),\ - "Invalid path to p4info file: {}".format(self.__p4info_path) - assert P4_ATTR_DEV_P4BIN in self.__settings,\ - "p4info and p4bin settings must be provided together" + if not os.path.exists(self.__p4info_path): + LOGGER.warning( + "Invalid path to p4info file: {}".format(self.__p4info_path)) + self.__p4info_path = "" + else: + assert P4_ATTR_DEV_P4BIN in self.__settings,\ + "p4info and p4bin settings must be provided together" if (not self.__p4bin_path) or (not self.__p4info_path): LOGGER.warning( diff --git a/src/device/tests/test_internal_p4.py b/src/device/tests/test_internal_p4.py index af99bb86be2dba91cf71cf9a6822fdf0b6acb613..48501bfd81eeb2f47a8593e1d786fdc341e4f4c0 100644 --- a/src/device/tests/test_internal_p4.py +++ b/src/device/tests/test_internal_p4.py @@ -17,11 +17,13 @@ Internal P4 driver tests. """ import pytest +from common.type_checkers.Checkers import chk_address_mac, \ + chk_address_ipv4, chk_address_ipv6 from device.service.drivers.p4.p4_driver import P4Driver from device.service.drivers.p4.p4_common import ( - matches_mac, encode_mac, decode_mac, encode, - matches_ipv4, encode_ipv4, decode_ipv4, - matches_ipv6, encode_ipv6, decode_ipv6, + encode_mac, decode_mac, encode, + encode_ipv4, decode_ipv4, + encode_ipv6, decode_ipv6, encode_num, decode_num ) from .device_p4 import( @@ -172,10 +174,10 @@ def test_p4_common_mac(): :return: void """ wrong_mac = "aa:bb:cc:dd:ee" - assert not matches_mac(wrong_mac) + assert not chk_address_mac(wrong_mac) mac = "aa:bb:cc:dd:ee:fe" - assert matches_mac(mac) + assert chk_address_mac(mac) enc_mac = encode_mac(mac) assert enc_mac == b'\xaa\xbb\xcc\xdd\xee\xfe',\ "String-based MAC address to bytes failed" @@ -193,13 +195,13 @@ def test_p4_common_ipv4(): :return: void """ - assert not matches_ipv4("10.0.0.1.5") - assert not matches_ipv4("256.0.0.1") - assert not matches_ipv4("256.0.1") - assert not matches_ipv4("10001") + assert not chk_address_ipv4("10.0.0.1.5") + assert not chk_address_ipv4("256.0.0.1") + assert not chk_address_ipv4("256.0.1") + assert not chk_address_ipv4("10001") ipv4 = "10.0.0.1" - assert matches_ipv4(ipv4) + assert chk_address_ipv4(ipv4) enc_ipv4 = encode_ipv4(ipv4) assert enc_ipv4 == b'\x0a\x00\x00\x01',\ "String-based IPv4 address to bytes failed" @@ -214,11 +216,11 @@ def test_p4_common_ipv6(): :return: void """ - assert not matches_ipv6('10.0.0.1') - assert matches_ipv6('2001:0000:85a3::8a2e:370:1111') + assert not chk_address_ipv6('10.0.0.1') + assert chk_address_ipv6('2001:0000:85a3::8a2e:370:1111') ipv6 = "1:2:3:4:5:6:7:8" - assert matches_ipv6(ipv6) + assert chk_address_ipv6(ipv6) enc_ipv6 = encode_ipv6(ipv6) assert enc_ipv6 == \ b'\x00\x01\x00\x02\x00\x03\x00\x04\x00\x05\x00\x06\x00\x07\x00\x08',\ diff --git a/src/service/service/service_handler_api/FilterFields.py b/src/service/service/service_handler_api/FilterFields.py index 34f5ce59a2c962a7ceb26bfd256eb772e26daa2e..170f34a61742a03db7e74c7b9ab0e8614c738058 100644 --- a/src/service/service/service_handler_api/FilterFields.py +++ b/src/service/service/service_handler_api/FilterFields.py @@ -23,11 +23,14 @@ SERVICE_TYPE_VALUES = { ServiceTypeEnum.SERVICETYPE_UNKNOWN, ServiceTypeEnum.SERVICETYPE_L3NM, ServiceTypeEnum.SERVICETYPE_L2NM, + ServiceTypeEnum.SERVICETYPE_L1NM, ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE, ServiceTypeEnum.SERVICETYPE_TE, ServiceTypeEnum.SERVICETYPE_E2E, ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY, ServiceTypeEnum.SERVICETYPE_QKD, + ServiceTypeEnum.SERVICETYPE_INT, + ServiceTypeEnum.SERVICETYPE_ACL, } DEVICE_DRIVER_VALUES = { diff --git a/src/service/service/service_handlers/__init__.py b/src/service/service/service_handlers/__init__.py index f63866d9d1dec875fe61cc55121050e5bb43e01f..7c00d5a850c8484db5cc83e17f2272dc678a419b 100644 --- a/src/service/service/service_handlers/__init__.py +++ b/src/service/service/service_handlers/__init__.py @@ -25,7 +25,11 @@ from .l3nm_ietf_actn.L3NMIetfActnServiceHandler import L3NMIetfActnServiceHandle from .l3nm_nce.L3NMNCEServiceHandler import L3NMNCEServiceHandler from .l3slice_ietfslice.L3SliceIETFSliceServiceHandler import L3NMSliceIETFSliceServiceHandler from .microwave.MicrowaveServiceHandler import MicrowaveServiceHandler -from .p4.p4_service_handler import P4ServiceHandler +from .p4_dummy_l1.p4_dummy_l1_service_handler import P4DummyL1ServiceHandler +from .p4_fabric_tna_int.p4_fabric_tna_int_service_handler import P4FabricINTServiceHandler +from .p4_fabric_tna_l2_simple.p4_fabric_tna_l2_simple_service_handler import P4FabricL2SimpleServiceHandler +from .p4_fabric_tna_l3.p4_fabric_tna_l3_service_handler import P4FabricL3ServiceHandler +from .p4_fabric_tna_acl.p4_fabric_tna_acl_service_handler import P4FabricACLServiceHandler from .tapi_tapi.TapiServiceHandler import TapiServiceHandler from .tapi_xr.TapiXrServiceHandler import TapiXrServiceHandler from .optical_tfs.OpticalTfsServiceHandler import OpticalTfsServiceHandler @@ -105,12 +109,36 @@ SERVICE_HANDLERS = [ FilterFieldEnum.DEVICE_DRIVER : [DeviceDriverEnum.DEVICEDRIVER_IETF_NETWORK_TOPOLOGY, DeviceDriverEnum.DEVICEDRIVER_ONF_TR_532], } ]), - (P4ServiceHandler, [ + (P4DummyL1ServiceHandler, [ + { + FilterFieldEnum.SERVICE_TYPE: ServiceTypeEnum.SERVICETYPE_L1NM, + FilterFieldEnum.DEVICE_DRIVER: DeviceDriverEnum.DEVICEDRIVER_P4, + } + ]), + (P4FabricINTServiceHandler, [ + { + FilterFieldEnum.SERVICE_TYPE: ServiceTypeEnum.SERVICETYPE_INT, + FilterFieldEnum.DEVICE_DRIVER: DeviceDriverEnum.DEVICEDRIVER_P4, + } + ]), + (P4FabricL2SimpleServiceHandler, [ { FilterFieldEnum.SERVICE_TYPE: ServiceTypeEnum.SERVICETYPE_L2NM, FilterFieldEnum.DEVICE_DRIVER: DeviceDriverEnum.DEVICEDRIVER_P4, } ]), + (P4FabricL3ServiceHandler, [ + { + FilterFieldEnum.SERVICE_TYPE: ServiceTypeEnum.SERVICETYPE_L3NM, + FilterFieldEnum.DEVICE_DRIVER: DeviceDriverEnum.DEVICEDRIVER_P4, + } + ]), + (P4FabricACLServiceHandler, [ + { + FilterFieldEnum.SERVICE_TYPE: ServiceTypeEnum.SERVICETYPE_ACL, + FilterFieldEnum.DEVICE_DRIVER: DeviceDriverEnum.DEVICEDRIVER_P4, + } + ]), (L2NM_IETFL2VPN_ServiceHandler, [ { FilterFieldEnum.SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_L2NM, diff --git a/src/service/service/service_handlers/p4/__init__.py b/src/service/service/service_handlers/p4_dummy_l1/__init__.py similarity index 100% rename from src/service/service/service_handlers/p4/__init__.py rename to src/service/service/service_handlers/p4_dummy_l1/__init__.py diff --git a/src/service/service/service_handlers/p4/p4_service_handler.py b/src/service/service/service_handlers/p4_dummy_l1/p4_dummy_l1_service_handler.py similarity index 94% rename from src/service/service/service_handlers/p4/p4_service_handler.py rename to src/service/service/service_handlers/p4_dummy_l1/p4_dummy_l1_service_handler.py index 49bedbb22f0c9ea18cf3e3b2c8444bd1c956f4ff..6e9141cafc4146e9a942f4b22d88700886c3e584 100644 --- a/src/service/service/service_handlers/p4/p4_service_handler.py +++ b/src/service/service/service_handlers/p4_dummy_l1/p4_dummy_l1_service_handler.py @@ -13,7 +13,8 @@ # limitations under the License. """ -P4 service handler for the TeraFlowSDN controller. +P4 service handler for L1 connectivity services +(in-port to out-port or input endpoint to output endpoint). """ import logging @@ -28,7 +29,7 @@ from service.service.task_scheduler.TaskExecutor import TaskExecutor LOGGER = logging.getLogger(__name__) -METRICS_POOL = MetricsPool('Service', 'Handler', labels={'handler': 'p4'}) +METRICS_POOL = MetricsPool('Service', 'Handler', labels={'handler': 'p4_dummy_l1'}) def create_rule_set(endpoint_a, endpoint_b): return json_config_rule_set( @@ -79,14 +80,13 @@ def find_names(uuid_a, uuid_b, device_endpoints): endpoint_a = endpoint.name elif endpoint.endpoint_id.endpoint_uuid.uuid == uuid_b: endpoint_b = endpoint.name - + return (endpoint_a, endpoint_b) -class P4ServiceHandler(_ServiceHandler): - def __init__(self, - service: Service, - task_executor : TaskExecutor, - **settings) -> None: +class P4DummyL1ServiceHandler(_ServiceHandler): + def __init__( # pylint: disable=super-init-not-called + self, service : Service, task_executor : TaskExecutor, **settings + ) -> None: """ Initialize Driver. Parameters: service @@ -106,7 +106,7 @@ class P4ServiceHandler(_ServiceHandler): self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None ) -> List[Union[bool, Exception]]: - """ Create/Update service endpoints form a list. + """ Create/Update service endpoints from a list. Parameters: endpoints: List[Tuple[str, str, Optional[str]]] List of tuples, each containing a device_uuid, @@ -126,21 +126,21 @@ class P4ServiceHandler(_ServiceHandler): if len(endpoints) == 0: return [] service_uuid = self.__service.service_id.service_uuid.uuid + LOGGER.info("SetEndpoint - Service {}".format(service_uuid)) history = {} - results = [] index = {} i = 0 - for endpoint in endpoints: + for endpoint in endpoints: device_uuid, endpoint_uuid = endpoint[0:2] # ignore topology_uuid by now - if device_uuid in history: + if device_uuid in history: try: matched_endpoint_uuid = history.pop(device_uuid) device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) del device.device_config.config_rules[:] - + # Find names from uuids (endpoint_a, endpoint_b) = find_names(matched_endpoint_uuid, endpoint_uuid, device.device_endpoints) if endpoint_a is None: @@ -151,14 +151,14 @@ class P4ServiceHandler(_ServiceHandler): raise Exception('Unable to find name of endpoint({:s})'.format(str(endpoint_uuid))) # One way - rule = create_rule_set(endpoint_a, endpoint_b) + rule = create_rule_set(endpoint_a, endpoint_b) device.device_config.config_rules.append(ConfigRule(**rule)) # The other way - rule = create_rule_set(endpoint_b, endpoint_a) + rule = create_rule_set(endpoint_b, endpoint_a) device.device_config.config_rules.append(ConfigRule(**rule)) self.__task_executor.configure_device(device) - + results.append(True) results[index[device_uuid]] = True except Exception as e: @@ -177,7 +177,7 @@ class P4ServiceHandler(_ServiceHandler): self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None ) -> List[Union[bool, Exception]]: - """ Delete service endpoints form a list. + """ Delete service endpoints from a list. Parameters: endpoints: List[Tuple[str, str, Optional[str]]] List of tuples, each containing a device_uuid, @@ -197,15 +197,15 @@ class P4ServiceHandler(_ServiceHandler): if len(endpoints) == 0: return [] service_uuid = self.__service.service_id.service_uuid.uuid + LOGGER.info("DeleteEndpoint - Service {}".format(service_uuid)) history = {} - results = [] index = {} i = 0 - for endpoint in endpoints: + for endpoint in endpoints: device_uuid, endpoint_uuid = endpoint[0:2] # ignore topology_uuid by now - if device_uuid in history: + if device_uuid in history: try: matched_endpoint_uuid = history.pop(device_uuid) device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) @@ -222,14 +222,14 @@ class P4ServiceHandler(_ServiceHandler): raise Exception('Unable to find name of endpoint({:s})'.format(str(endpoint_uuid))) # One way - rule = create_rule_del(endpoint_a, endpoint_b) + rule = create_rule_del(endpoint_a, endpoint_b) device.device_config.config_rules.append(ConfigRule(**rule)) # The other way - rule = create_rule_del(endpoint_b, endpoint_a) + rule = create_rule_del(endpoint_b, endpoint_a) device.device_config.config_rules.append(ConfigRule(**rule)) self.__task_executor.configure_device(device) - + results.append(True) results[index[device_uuid]] = True except Exception as e: @@ -338,4 +338,4 @@ class P4ServiceHandler(_ServiceHandler): msg = '[SetConfig] Method not implemented. Resources({:s}) are being ignored.' LOGGER.warning(msg.format(str(resources))) - return [True for _ in range(len(resources))] \ No newline at end of file + return [True for _ in range(len(resources))] diff --git a/src/tests/p4-int-routing-acl/__init__.py b/src/service/service/service_handlers/p4_fabric_tna_acl/__init__.py similarity index 100% rename from src/tests/p4-int-routing-acl/__init__.py rename to src/service/service/service_handlers/p4_fabric_tna_acl/__init__.py diff --git a/src/service/service/service_handlers/p4_fabric_tna_acl/p4_fabric_tna_acl_config.py b/src/service/service/service_handlers/p4_fabric_tna_acl/p4_fabric_tna_acl_config.py new file mode 100644 index 0000000000000000000000000000000000000000..09dbcc5aaae818388bb2033d6953ab69cde8f098 --- /dev/null +++ b/src/service/service/service_handlers/p4_fabric_tna_acl/p4_fabric_tna_acl_config.py @@ -0,0 +1,39 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Common objects and methods for In-band Network Telemetry (INT) dataplane +based on the SD-Fabric dataplane model. +This dataplane covers both software based and hardware-based Stratum-enabled P4 switches, +such as the BMv2 software switch and Intel's Tofino/Tofino-2 switches. + +SD-Fabric repo: https://github.com/stratum/fabric-tna +SD-Fabric docs: https://docs.sd-fabric.org/master/index.html +""" + +import logging + +from service.service.service_handlers.p4_fabric_tna_commons.p4_fabric_tna_commons import * + +LOGGER = logging.getLogger(__name__) + +# ACL service handler settings +ACL = "acl" +ACTION = "action" +ACTION_DROP = "drop" +ACTION_ALLOW = "allow" +ACTION_LIST = [ACTION_ALLOW, ACTION_DROP] + +def is_valid_acl_action(action : str) -> bool: + return action in ACTION_LIST diff --git a/src/service/service/service_handlers/p4_fabric_tna_acl/p4_fabric_tna_acl_service_handler.py b/src/service/service/service_handlers/p4_fabric_tna_acl/p4_fabric_tna_acl_service_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..b57086a29d36fccb06f103c9f0f6e3eda3148e45 --- /dev/null +++ b/src/service/service/service_handlers/p4_fabric_tna_acl/p4_fabric_tna_acl_service_handler.py @@ -0,0 +1,479 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Service handler for P4-based access control using the SD-Fabric P4 dataplane +for BMv2 and Intel Tofino switches. +""" + +import logging +from typing import Any, List, Dict, Optional, Tuple, Union +from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method +from common.proto.context_pb2 import ConfigActionEnum, DeviceId, Service, Device +from common.tools.object_factory.Device import json_device_id +from common.type_checkers.Checkers import chk_type, chk_address_ipv4, chk_prefix_len_ipv4,\ + chk_transport_port +from service.service.service_handler_api._ServiceHandler import _ServiceHandler +from service.service.service_handler_api.SettingsHandler import SettingsHandler +from service.service.service_handlers.p4_fabric_tna_commons.p4_fabric_tna_commons import * +from service.service.task_scheduler.TaskExecutor import TaskExecutor + +from .p4_fabric_tna_acl_config import * + +LOGGER = logging.getLogger(__name__) + +METRICS_POOL = MetricsPool('Service', 'Handler', labels={'handler': 'p4_fabric_tna_acl'}) + +class P4FabricACLServiceHandler(_ServiceHandler): + def __init__( # pylint: disable=super-init-not-called + self, service : Service, task_executor : TaskExecutor, **settings # type: ignore + ) -> None: + """ Initialize Driver. + Parameters: + service + The service instance (gRPC message) to be managed. + task_executor + An instance of Task Executor providing access to the + service handlers factory, the context and device clients, + and an internal cache of already-loaded gRPC entities. + **settings + Extra settings required by the service handler. + + """ + self.__service_label = "P4 Access Control connectivity service" + self.__service = service + self.__task_executor = task_executor + self.__settings_handler = SettingsHandler(self.__service.service_config, **settings) + + self._init_settings() + self._parse_settings() + self._print_settings() + + @metered_subclass_method(METRICS_POOL) + def SetEndpoint( + self, endpoints : List[Tuple[str, str, Optional[str]]], + connection_uuid : Optional[str] = None + ) -> List[Union[bool, Exception]]: + """ Create/Update service endpoints from a list. + Parameters: + endpoints: List[Tuple[str, str, Optional[str]]] + List of tuples, each containing a device_uuid, + endpoint_uuid and, optionally, the topology_uuid + of the endpoint to be added. + connection_uuid : Optional[str] + If specified, is the UUID of the connection this endpoint is associated to. + Returns: + results: List[Union[bool, Exception]] + List of results for endpoint changes requested. + Return values must be in the same order as the requested + endpoints. If an endpoint is properly added, True must be + returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('endpoints', endpoints, list) + if len(endpoints) == 0: return [] + + LOGGER.info("{} - Provision service configuration".format( + self.__service_label)) + + visited = set() + results = [] + for endpoint in endpoints: + device_uuid, endpoint_uuid = endpoint[0:2] + device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) + device_name = device.name + + LOGGER.info("Device {}".format(device_name)) + LOGGER.info("\t | Service endpoint UUID: {}".format(endpoint_uuid)) + + port_id = find_port_id_in_endpoint_list(device.device_endpoints, endpoint_uuid) + LOGGER.info("\t | Service port ID: {}".format(port_id)) + + try: + # Check if this port is part of the ACL configuration + _ = self._get_switch_port_in_port_map(device_name, port_id) + except Exception: + LOGGER.warning("Switch {} endpoint {} is not part of the ACL configuration".format(device_name, port_id)) + results.append(False) + continue + + dev_port_key = device_name + "-" + PORT_PREFIX + str(port_id) + + # Skip already visited device ports + if dev_port_key in visited: + continue + + rules = [] + actual_rules = -1 + applied_rules, failed_rules = 0, -1 + + # Create and apply rules + try: + rules = self._create_rules( + device_obj=device, port_id=port_id, action=ConfigActionEnum.CONFIGACTION_SET) + actual_rules = len(rules) + applied_rules, failed_rules = apply_rules( + task_executor=self.__task_executor, + device_obj=device, + json_config_rules=rules + ) + except Exception as ex: + LOGGER.error("Failed to insert ACL rules on device {} due to {}".format(device.name, ex)) + finally: + rules.clear() + + # Ensure correct status + results.append(True) if (failed_rules == 0) and (applied_rules == actual_rules) \ + else results.append(False) + + # You should no longer visit this device port again + visited.add(dev_port_key) + + LOGGER.info("Installed {}/{} ACL rules on device {} and port {}".format( + applied_rules, actual_rules, device_name, port_id)) + + return results + + @metered_subclass_method(METRICS_POOL) + def DeleteEndpoint( + self, endpoints : List[Tuple[str, str, Optional[str]]], + connection_uuid : Optional[str] = None + ) -> List[Union[bool, Exception]]: + """ Delete service endpoints from a list. + Parameters: + endpoints: List[Tuple[str, str, Optional[str]]] + List of tuples, each containing a device_uuid, + endpoint_uuid, and the topology_uuid of the endpoint + to be removed. + connection_uuid : Optional[str] + If specified, is the UUID of the connection this endpoint is associated to. + Returns: + results: List[Union[bool, Exception]] + List of results for endpoint deletions requested. + Return values must be in the same order as the requested + endpoints. If an endpoint is properly deleted, True must be + returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('endpoints', endpoints, list) + if len(endpoints) == 0: return [] + + LOGGER.info("{} - Deprovision service configuration".format( + self.__service_label)) + + visited = set() + results = [] + for endpoint in endpoints: + device_uuid, endpoint_uuid = endpoint[0:2] + device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) + device_name = device.name + + LOGGER.info("Device {}".format(device_name)) + LOGGER.info("\t | Service endpoint UUID: {}".format(endpoint_uuid)) + + port_id = find_port_id_in_endpoint_list(device.device_endpoints, endpoint_uuid) + LOGGER.info("\t | Service port ID: {}".format(port_id)) + + try: + # Check if this port is part of the ACL configuration + _ = self._get_switch_port_in_port_map(device_name, port_id) + except Exception: + LOGGER.warning("Switch {} endpoint {} is not part of the ACL configuration".format(device_name, port_id)) + results.append(False) + continue + + dev_port_key = device_name + "-" + PORT_PREFIX + str(port_id) + + # Skip already visited device ports + if dev_port_key in visited: + continue + + rules = [] + actual_rules = -1 + applied_rules, failed_rules = 0, -1 + + # Create and apply rules + try: + rules = self._create_rules( + device_obj=device, port_id=port_id, action=ConfigActionEnum.CONFIGACTION_DELETE) + actual_rules = len(rules) + applied_rules, failed_rules = apply_rules( + task_executor=self.__task_executor, + device_obj=device, + json_config_rules=rules + ) + except Exception as ex: + LOGGER.error("Failed to insert ACL rules on device {} due to {}".format(device.name, ex)) + finally: + rules.clear() + + # Ensure correct status + results.append(True) if (failed_rules == 0) and (applied_rules == actual_rules) \ + else results.append(False) + + # You should no longer visit this device port again + visited.add(dev_port_key) + + LOGGER.info("Deleted {}/{} ACL rules from device {} and port {}".format( + applied_rules, actual_rules, device_name, port_id)) + + return results + + @metered_subclass_method(METRICS_POOL) + def SetConstraint(self, constraints: List[Tuple[str, Any]]) \ + -> List[Union[bool, Exception]]: + """ Create/Update service constraints. + Parameters: + constraints: List[Tuple[str, Any]] + List of tuples, each containing a constraint_type and the + new constraint_value to be set. + Returns: + results: List[Union[bool, Exception]] + List of results for constraint changes requested. + Return values must be in the same order as the requested + constraints. If a constraint is properly set, True must be + returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('constraints', constraints, list) + if len(constraints) == 0: return [] + + msg = '[SetConstraint] Method not implemented. Constraints({:s}) are being ignored.' + LOGGER.warning(msg.format(str(constraints))) + return [True for _ in range(len(constraints))] + + @metered_subclass_method(METRICS_POOL) + def DeleteConstraint(self, constraints: List[Tuple[str, Any]]) \ + -> List[Union[bool, Exception]]: + """ Delete service constraints. + Parameters: + constraints: List[Tuple[str, Any]] + List of tuples, each containing a constraint_type pointing + to the constraint to be deleted, and a constraint_value + containing possible additionally required values to locate + the constraint to be removed. + Returns: + results: List[Union[bool, Exception]] + List of results for constraint deletions requested. + Return values must be in the same order as the requested + constraints. If a constraint is properly deleted, True must + be returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('constraints', constraints, list) + if len(constraints) == 0: return [] + + msg = '[DeleteConstraint] Method not implemented. Constraints({:s}) are being ignored.' + LOGGER.warning(msg.format(str(constraints))) + return [True for _ in range(len(constraints))] + + @metered_subclass_method(METRICS_POOL) + def SetConfig(self, resources: List[Tuple[str, Any]]) \ + -> List[Union[bool, Exception]]: + """ Create/Update configuration for a list of service resources. + Parameters: + resources: List[Tuple[str, Any]] + List of tuples, each containing a resource_key pointing to + the resource to be modified, and a resource_value + containing the new value to be set. + Returns: + results: List[Union[bool, Exception]] + List of results for resource key changes requested. + Return values must be in the same order as the requested + resource keys. If a resource is properly set, True must be + returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('resources', resources, list) + if len(resources) == 0: return [] + + msg = '[SetConfig] Method not implemented. Resources({:s}) are being ignored.' + LOGGER.warning(msg.format(str(resources))) + return [True for _ in range(len(resources))] + + @metered_subclass_method(METRICS_POOL) + def DeleteConfig(self, resources: List[Tuple[str, Any]]) \ + -> List[Union[bool, Exception]]: + """ Delete configuration for a list of service resources. + Parameters: + resources: List[Tuple[str, Any]] + List of tuples, each containing a resource_key pointing to + the resource to be modified, and a resource_value containing + possible additionally required values to locate the value + to be removed. + Returns: + results: List[Union[bool, Exception]] + List of results for resource key deletions requested. + Return values must be in the same order as the requested + resource keys. If a resource is properly deleted, True must + be returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('resources', resources, list) + if len(resources) == 0: return [] + + msg = '[SetConfig] Method not implemented. Resources({:s}) are being ignored.' + LOGGER.warning(msg.format(str(resources))) + return [True for _ in range(len(resources))] + + def _init_settings(self): + self.__switch_info = {} + self.__port_map = {} + + try: + self.__settings = self.__settings_handler.get('/settings') + LOGGER.info("{} with settings: {}".format(self.__service_label, self.__settings)) + except Exception as ex: + LOGGER.error("Failed to retrieve service settings: {}".format(ex)) + raise Exception(ex) + + def _parse_settings(self): + try: + self.__switch_info = self.__settings.value[SWITCH_INFO] + except Exception as ex: + LOGGER.error("Failed to parse service settings: {}".format(ex)) + raise Exception(ex) + assert isinstance(self.__switch_info, dict), "Switch info object must be a map with switch names as keys" + + for switch_name, switch_info in self.__switch_info.items(): + assert switch_name, "Invalid P4 switch name" + assert isinstance(switch_info, dict), "Switch {} info must be a map with arch, dpid, and fwd_list items)" + assert switch_info[ARCH] in SUPPORTED_TARGET_ARCH_LIST, \ + "Switch {} - Supported P4 architectures are: {}".format(switch_name, ','.join(SUPPORTED_TARGET_ARCH_LIST)) + switch_dpid = switch_info[DPID] + assert switch_dpid > 0, "Switch {} - P4 switch dataplane ID must be a positive integer".format(switch_name, switch_info[DPID]) + + # Access Control list + acl = switch_info[ACL] + assert isinstance(acl, list), "Switch {} access control list must be a list with port_id, [ipv4_dst/src, trn_post_dst/src], and action items)" + for acl_entry in acl: + LOGGER.info("ACL entry: {}".format(acl_entry)) + port_id = acl_entry[PORT_ID] + assert port_id >= 0, "Switch {} - Invalid P4 switch port ID".format(switch_name) + + # Prepare the port map + if switch_name not in self.__port_map: + self.__port_map[switch_name] = {} + port_key = PORT_PREFIX + str(port_id) + if port_key not in self.__port_map[switch_name]: + self.__port_map[switch_name][port_key] = {} + self.__port_map[switch_name][port_key][PORT_ID] = port_id + if ACL not in self.__port_map[switch_name][port_key]: + self.__port_map[switch_name][port_key][ACL] = [] + + map_entry = {} + + ipv4_src = "" + if IPV4_SRC in acl_entry: + ipv4_src = acl_entry[IPV4_SRC] + assert chk_address_ipv4(ipv4_src), "Invalid source IPv4 address {}".format(ipv4_dst) + map_entry[IPV4_SRC] = ipv4_src + + ipv4_dst = "" + if IPV4_DST in acl_entry: + ipv4_dst = acl_entry[IPV4_DST] + assert chk_address_ipv4(ipv4_dst), "Invalid destination IPv4 address {}".format(ipv4_dst) + map_entry[IPV4_DST] = ipv4_dst + + ipv4_prefix_len = -1 + if ipv4_src or ipv4_dst: + ipv4_prefix_len = acl_entry[IPV4_PREFIX_LEN] + assert chk_prefix_len_ipv4(ipv4_prefix_len), "Invalid IPv4 address prefix length {}".format(ipv4_prefix_len) + map_entry[IPV4_PREFIX_LEN] = ipv4_prefix_len + + trn_port_src = -1 + if TRN_PORT_SRC in acl_entry: + trn_port_src = acl_entry[TRN_PORT_SRC] + assert chk_transport_port(trn_port_src), "Invalid source transport port" + map_entry[TRN_PORT_SRC] = trn_port_src + + trn_port_dst = -1 + if TRN_PORT_DST in acl_entry: + trn_port_dst = acl_entry[TRN_PORT_DST] + assert chk_transport_port(trn_port_dst), "Invalid destination transport port" + map_entry[TRN_PORT_DST] = trn_port_dst + + action = acl_entry[ACTION] + assert is_valid_acl_action(action), "Valid actions are: {}".format(','.join(ACTION_LIST)) + + # Retrieve entry from the port map + switch_port_entry = self._get_switch_port_in_port_map(switch_name, port_id) + + # Add routing entry + switch_port_entry[ACL].append(map_entry) + + def _print_settings(self): + LOGGER.info("--------------- {} settings ---------------".format(self.__service.name)) + LOGGER.info("--- Topology info") + for switch_name, switch_info in self.__switch_info.items(): + LOGGER.info("\t Device {}".format(switch_name)) + LOGGER.info("\t\t| Target P4 architecture: {}".format(switch_info[ARCH])) + LOGGER.info("\t\t| Data plane ID: {}".format(switch_info[DPID])) + LOGGER.info("\t\t| Port map: {}".format(self.__port_map[switch_name])) + LOGGER.info("-------------------------------------------------------") + + def _get_switch_port_in_port_map(self, switch_name : str, port_id : int) -> Dict: + assert switch_name, "A valid switch name must be used as a key to the port map" + assert port_id > 0, "A valid switch port ID must be used as a key to a switch's port map" + switch_entry = self.__port_map[switch_name] + assert switch_entry, "Switch {} does not exist in the port map".format(switch_name) + port_key = PORT_PREFIX + str(port_id) + assert switch_entry[port_key], "Port with ID {} does not exist in the switch map".format(port_id) + + return switch_entry[port_key] + + def _get_acl_of_switch_port(self, switch_name : str, port_id : int) -> List [Tuple]: + switch_port_entry = self._get_switch_port_in_port_map(switch_name, port_id) + return switch_port_entry[ACL] + + def _create_rules(self, device_obj : Device, port_id : int, action : ConfigActionEnum): # type: ignore + dev_name = device_obj.name + + rules = [] + + ### ACL rules + acl = self._get_acl_of_switch_port(switch_name=dev_name, port_id=port_id) + for acl_entry in acl: + if IPV4_SRC in acl_entry: + rules += rules_set_up_acl_filter_host( + ingress_port=port_id, + ip_address=acl_entry[IPV4_SRC], + prefix_len=acl_entry[IPV4_PREFIX_LEN], + ip_direction="src", + action=action + ) + if IPV4_DST in acl_entry: + rules += rules_set_up_acl_filter_host( + ingress_port=port_id, + ip_address=acl_entry[IPV4_DST], + prefix_len=acl_entry[IPV4_PREFIX_LEN], + ip_direction="dst", + action=action + ) + if TRN_PORT_SRC in acl_entry: + rules += rules_set_up_acl_filter_port( + ingress_port=port_id, + transport_port=acl_entry[TRN_PORT_SRC], + transport_direction="src", + action=action + ) + if TRN_PORT_DST in acl_entry: + rules += rules_set_up_acl_filter_port( + ingress_port=port_id, + transport_port=acl_entry[TRN_PORT_DST], + transport_direction="dst", + action=action + ) + + return rules diff --git a/src/service/service/service_handlers/p4_fabric_tna_commons/p4_fabric_tna_commons.py b/src/service/service/service_handlers/p4_fabric_tna_commons/p4_fabric_tna_commons.py new file mode 100644 index 0000000000000000000000000000000000000000..a97bce07fb77cfda222eb2485018b944e6569f4c --- /dev/null +++ b/src/service/service/service_handlers/p4_fabric_tna_commons/p4_fabric_tna_commons.py @@ -0,0 +1,960 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Common objects and methods for the SD-Fabric (fabric TNA) dataplane. +This dataplane covers both software based and hardware-based Stratum-enabled P4 switches, +such as the BMv2 software switch and Intel's Tofino/Tofino-2 switches. + +SD-Fabric repo: https://github.com/stratum/fabric-tna +SD-Fabric docs: https://docs.sd-fabric.org/master/index.html +""" + +import time +import logging +import struct +from random import randint +from typing import List, Tuple +from common.proto.context_pb2 import ConfigActionEnum, ConfigRule, Device, EndPoint +from common.tools.object_factory.ConfigRule import json_config_rule +from common.type_checkers.Checkers import chk_address_mac, chk_vlan_id, \ + chk_address_ipv4, chk_prefix_len_ipv4, chk_transport_port +from service.service.task_scheduler.TaskExecutor import TaskExecutor + +LOGGER = logging.getLogger(__name__) + +# Common service handler settings +SWITCH_INFO = "switch_info" +ARCH = "arch" +DPID = "dpid" +MAC = "mac" +IP = "ip" +PORT = "port" # Dataplane port +PORT_ID = "port_id" +PORT_TYPE = "port_type" +VLAN_ID = "vlan_id" +RECIRCULATION_PORT_LIST = "recirculation_port_list" +PORT_LIST = "port_list" +PORT_PREFIX = "port-" +ROUTING_LIST = "routing_list" +MAC_SRC = "mac_src" +MAC_DST = "mac_dst" +IPV4_SRC = "ipv4_src" +IPV4_DST = "ipv4_dst" +IPV4_PREFIX_LEN = "ipv4_prefix_len" +TRN_PORT_SRC = "trn_port_src" # Transport network port (TCP, UDP) +TRN_PORT_DST = "trn_port_dst" + +# P4 architectures +TARGET_ARCH_TNA = "tna" +TARGET_ARCH_V1MODEL = "v1model" +SUPPORTED_TARGET_ARCH_LIST = [TARGET_ARCH_TNA, TARGET_ARCH_V1MODEL] + +# Recirculation ports for various targets +RECIRCULATION_PORTS_TNA = [68, 196, 324, 452] # Tofino-2 (2-pipe switches use only the first 2 entries) +RECIRCULATION_PORTS_V1MODEL = [510] # Variable FAKE_V1MODEL_RECIRC_PORT in p4 source program + +# P4 tables +TABLE_INGRESS_VLAN = "FabricIngress.filtering.ingress_port_vlan" +TABLE_EGRESS_VLAN = "FabricEgress.egress_next.egress_vlan" +TABLE_FWD_CLASSIFIER = "FabricIngress.filtering.fwd_classifier" +TABLE_BRIDGING = "FabricIngress.forwarding.bridging" +TABLE_ROUTING_V4 = "FabricIngress.forwarding.routing_v4" +TABLE_NEXT_SIMPLE = "FabricIngress.next.simple" +TABLE_NEXT_HASHED = "FabricIngress.next.hashed" +TABLE_ACL = "FabricIngress.acl.acl" + +# Action profile members +ACTION_PROFILE_NEXT_HASHED = "FabricIngress.next.hashed_profile" + +# Clone sessions +CLONE_SESSION = "/clone_sessions/clone_session" + +# Forwarding types +FORWARDING_TYPE_BRIDGING = 0 +FORWARDING_TYPE_MPLS = 1 +FORWARDING_TYPE_UNICAST_IPV4 = 2 +FORWARDING_TYPE_IPV4_MULTICAST = 3 +FORWARDING_TYPE_IPV6_UNICAST = 4 +FORWARDING_TYPE_IPV6_MULTICAST = 5 +FORWARDING_TYPE_UNKNOWN = 7 + +FORWARDING_TYPES_VALID = [ + FORWARDING_TYPE_BRIDGING, + FORWARDING_TYPE_MPLS, + FORWARDING_TYPE_UNICAST_IPV4, + FORWARDING_TYPE_IPV4_MULTICAST, + FORWARDING_TYPE_IPV6_UNICAST, + FORWARDING_TYPE_IPV6_MULTICAST, + FORWARDING_TYPE_UNKNOWN +] + +# Port types +PORT_TYPE_INT = "int" +PORT_TYPE_HOST = "host" +PORT_TYPE_SWITCH = "switch" + +PORT_TYPE_ACTION_EDGE = 1 +PORT_TYPE_ACTION_INFRA = 2 +PORT_TYPE_ACTION_INTERNAL = 3 + +PORT_TYPE_MAP = { + PORT_TYPE_INT: PORT_TYPE_ACTION_INTERNAL, + PORT_TYPE_HOST: PORT_TYPE_ACTION_EDGE, + PORT_TYPE_SWITCH: PORT_TYPE_ACTION_INFRA +} + +PORT_TYPES_STR_VALID = [PORT_TYPE_INT, PORT_TYPE_HOST, PORT_TYPE_SWITCH] +PORT_TYPES_INT_VALID = [PORT_TYPE_ACTION_EDGE, PORT_TYPE_ACTION_INFRA, PORT_TYPE_ACTION_INTERNAL] + +# Bridged metadata type +BRIDGED_MD_TYPE_EGRESS_MIRROR = 2 +BRIDGED_MD_TYPE_INGRESS_MIRROR = 3 +BRIDGED_MD_TYPE_INT_INGRESS_DROP = 4 +BRIDGED_MD_TYPE_DEFLECTED = 5 + +# Mirror types +MIRROR_TYPE_INVALID = 0 +MIRROR_TYPE_INT_REPORT = 1 + +# VLAN +VLAN_DEF = 4094 + +# Supported Ethernet types +ETHER_TYPE_IPV4 = "0x0800" +ETHER_TYPE_IPV6 = "0x86DD" + +# Member ID +NEXT_MEMBER_ID = 1 + +# Time interval in seconds for consecutive rule management (insert/delete) operations +RULE_CONF_INTERVAL_SEC = 0.1 + +################################################################################################################ +### Miscellaneous methods +################################################################################################################ + +def arch_tna(arch : str) -> bool: + return arch == TARGET_ARCH_TNA + +def arch_v1model(arch : str) -> bool: + return not arch_tna(arch) + +def generate_random_mac() -> str: + mac = [randint(0x00, 0xff)] * 6 + mac_str = ':'.join(map(lambda x: "%02x" % x, mac)) + chk_address_mac(mac_str), "Invalid MAC address generated" + + return mac_str + +def prefix_to_hex_mask(prefix_len : int) -> str: + # Calculate the binary mask + binary_mask = (1 << 32) - (1 << (32 - prefix_len)) + + # Convert the binary mask to the 4 octets (32 bits) + mask = struct.pack('!I', binary_mask) + + # Convert to a string of hex values + hex_mask = ''.join(f'{byte:02x}' for byte in mask) + + return "0x"+hex_mask.upper() + +def sleep_for(time_sec : int) -> None: + assert time_sec > 0, "Invalid sleep period in seconds" + time.sleep(time_sec) + +def find_port_id_in_endpoint(endpoint : EndPoint, target_endpoint_uuid : str) -> int: # type: ignore + assert endpoint, "Invalid device endpoint" + endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid + assert endpoint_uuid, "Invalid device endpoint UUID" + if endpoint_uuid == target_endpoint_uuid: + try: + dpid = int(endpoint.name) # P4 devices have integer dataplane port IDs + assert dpid > 0, "Invalid device endpoint DPID" + except Exception as ex: + LOGGER.error(ex) + return -1 + return dpid + + return -1 + +def find_port_id_in_endpoint_list(endpoint_list : List, target_endpoint_uuid : str) -> int: + assert endpoint_list, "Invalid device endpoint list" + for endpoint in endpoint_list: + result = find_port_id_in_endpoint(endpoint, target_endpoint_uuid) + if result != -1: + return result + + return -1 + +################################################################################################################ +### Rule generation methods +################################################################################################################ + +################################### +### A. Port setup +################################### + +def rules_set_up_port_ingress( + ingress_port : int, + port_type : str, + vlan_id: int, + action : ConfigActionEnum) -> List [Tuple]: # type: ignore + assert ingress_port >= 0, "Invalid ingress port to configure ingress port" + assert port_type.lower() in PORT_TYPES_STR_VALID, "Invalid port type to configure ingress port" + assert chk_vlan_id(vlan_id), "Invalid VLAN ID to configure ingress port" + + # VLAN support if 1 + vlan_is_valid = 1 if vlan_id != VLAN_DEF else 0 + + rule_no = cache_rule(TABLE_INGRESS_VLAN, action) + + port_type_int = PORT_TYPE_MAP[port_type.lower()] + assert port_type_int in PORT_TYPES_INT_VALID, "Invalid port type to configure ingress filtering" + + rules_filtering_vlan_ingress = [] + rules_filtering_vlan_ingress.append( + json_config_rule( + action, + '/tables/table/'+TABLE_INGRESS_VLAN+'['+str(rule_no)+']', + { + 'table-name': TABLE_INGRESS_VLAN, + 'match-fields': [ + { + 'match-field': 'ig_port', + 'match-value': str(ingress_port) + }, + { + 'match-field': 'vlan_is_valid', + 'match-value': str(vlan_is_valid) + } + ], + 'action-name': 'FabricIngress.filtering.permit_with_internal_vlan', + 'action-params': [ + { + 'action-param': 'port_type', + 'action-value': str(port_type_int) + }, + { + 'action-param': 'vlan_id', + 'action-value': str(vlan_id) + } + ], + 'priority': 10 + } + ) + ) + + return rules_filtering_vlan_ingress + +def rules_set_up_port_egress( + egress_port : int, + vlan_id: int, + action : ConfigActionEnum) -> List [Tuple]: # type: ignore + assert egress_port >= 0, "Invalid egress port to configure egress vlan" + assert chk_vlan_id(vlan_id), "Invalid VLAN ID to configure egress vlan" + + rule_no = cache_rule(TABLE_EGRESS_VLAN, action) + + rules_vlan_egress = [] + rules_vlan_egress.append( + json_config_rule( + action, + '/tables/table/'+TABLE_EGRESS_VLAN+'['+str(rule_no)+']', + { + 'table-name': TABLE_EGRESS_VLAN, + 'match-fields': [ + { + 'match-field': 'eg_port', + 'match-value': str(egress_port) + }, + { + 'match-field': 'vlan_id', + 'match-value': str(vlan_id) + } + ], + 'action-name': 'FabricEgress.egress_next.pop_vlan', + 'action-params': [] + } + ) + ) + + return rules_vlan_egress + +def rules_set_up_fwd_classifier( + ingress_port : int, + fwd_type : int, + eth_type: str, + action : ConfigActionEnum) -> List [Tuple]: # type: ignore + assert ingress_port >= 0, "Invalid ingress port to configure forwarding classifier" + assert fwd_type in FORWARDING_TYPES_VALID, "Invalid forwarding type to configure forwarding classifier" + + rule_no = cache_rule(TABLE_FWD_CLASSIFIER, action) + + rules_filtering_fwd_classifier = [] + rules_filtering_fwd_classifier.append( + json_config_rule( + action, + '/tables/table/'+TABLE_FWD_CLASSIFIER+'['+str(rule_no)+']', + { + 'table-name': TABLE_FWD_CLASSIFIER, + 'match-fields': [ + { + 'match-field': 'ig_port', + 'match-value': str(ingress_port) + }, + { + 'match-field': 'ip_eth_type', + 'match-value': eth_type + } + ], + 'action-name': 'FabricIngress.filtering.set_forwarding_type', + 'action-params': [ + { + 'action-param': 'fwd_type', + 'action-value': str(fwd_type) + }, + ], + 'priority': 1 + } + ) + ) + + return rules_filtering_fwd_classifier + +def rules_set_up_port( + port : int, + port_type : str, + fwd_type : int, + vlan_id : int, + action : ConfigActionEnum, # type: ignore + eth_type=ETHER_TYPE_IPV4) -> List [Tuple]: + rules_list = [] + + rules_list.extend( + rules_set_up_port_ingress( + ingress_port=port, + port_type=port_type, + vlan_id=vlan_id, + action=action + ) + ) + rules_list.extend( + rules_set_up_fwd_classifier( + ingress_port=port, + fwd_type=fwd_type, + eth_type=eth_type, + action=action + ) + ) + rules_list.extend( + rules_set_up_port_egress( + egress_port=port, + vlan_id=vlan_id, + action=action + ) + ) + LOGGER.debug("Port configured:{}".format(port)) + + return rules_list + +################################### +### A. End of port setup +################################### + + +################################### +### B. L2 setup +################################### + +def rules_set_up_fwd_bridging( + vlan_id: int, + eth_dst : str, + egress_port : int, + action : ConfigActionEnum) -> List [Tuple]: # type: ignore + assert chk_vlan_id(vlan_id), "Invalid VLAN ID to configure bridging" + assert chk_address_mac(eth_dst), "Invalid destination Ethernet address to configure bridging" + assert egress_port >= 0, "Invalid outport to configure bridging" + + rule_no = cache_rule(TABLE_BRIDGING, action) + + rules_fwd_bridging = [] + rules_fwd_bridging.append( + json_config_rule( + action, + '/tables/table/'+TABLE_BRIDGING+'['+str(rule_no)+']', + { + 'table-name': TABLE_BRIDGING, + 'match-fields': [ + { + 'match-field': 'vlan_id', + 'match-value': str(vlan_id) + }, + { + 'match-field': 'eth_dst', + 'match-value': eth_dst + } + ], + 'action-name': 'FabricIngress.forwarding.set_next_id_bridging', + 'action-params': [ + { + 'action-param': 'next_id', + 'action-value': str(egress_port) + } + ], + 'priority': 1 + } + ) + ) + + return rules_fwd_bridging + +def rules_set_up_next_output_simple( + egress_port : int, + action : ConfigActionEnum) -> List [Tuple]: # type: ignore + assert egress_port >= 0, "Invalid outport to configure next output simple" + + rule_no = cache_rule(TABLE_NEXT_SIMPLE, action) + + rules_next_output_simple = [] + rules_next_output_simple.append( + json_config_rule( + action, + '/tables/table/'+TABLE_NEXT_SIMPLE+'['+str(rule_no)+']', + { + 'table-name': TABLE_NEXT_SIMPLE, + 'match-fields': [ + { + 'match-field': 'next_id', + 'match-value': str(egress_port) + } + ], + 'action-name': 'FabricIngress.next.output_simple', + 'action-params': [ + { + 'action-param': 'port_num', + 'action-value': str(egress_port) + } + ] + } + ) + ) + + return rules_next_output_simple + +def rules_set_up_next_output_hashed( + egress_port : int, + action : ConfigActionEnum, # type: ignore + next_id = None) -> List [Tuple]: + assert egress_port >= 0, "Invalid outport to configure next output hashed" + + if next_id is None: + next_id = egress_port + + global NEXT_MEMBER_ID + + rule_no = cache_rule(ACTION_PROFILE_NEXT_HASHED, action) + + rules_next_output_hashed = [] + rules_next_output_hashed.append( + json_config_rule( + action, + '/action_profiles/action_profile/'+ACTION_PROFILE_NEXT_HASHED+'['+str(rule_no)+']', + { + 'action-profile-name': ACTION_PROFILE_NEXT_HASHED, + 'member-id': NEXT_MEMBER_ID, + 'action-name': 'FabricIngress.next.output_hashed', + 'action-params': [ + { + 'action-param': 'port_num', + 'action-value': str(egress_port) + } + ] + } + ) + ) + + rule_no = cache_rule(TABLE_NEXT_HASHED, action) + + rules_next_output_hashed.append( + json_config_rule( + action, + '/tables/table/'+TABLE_NEXT_HASHED+'['+str(rule_no)+']', + { + 'table-name': TABLE_NEXT_HASHED, + 'member-id': NEXT_MEMBER_ID, + 'match-fields': [ + { + 'match-field': 'next_id', + 'match-value': str(next_id) + } + ] + } + ) + ) + + NEXT_MEMBER_ID += 1 + + return rules_next_output_hashed + +################################### +### B. End of L2 setup +################################### + + +################################### +### C. L3 setup +################################### + +def rules_set_up_routing( + ipv4_dst : str, + ipv4_prefix_len : int, + egress_port : int, + action : ConfigActionEnum) -> List [Tuple]: # type: ignore + assert chk_address_ipv4(ipv4_dst), "Invalid destination IPv4 address to configure routing" + assert chk_prefix_len_ipv4(ipv4_prefix_len), "Invalid IPv4 prefix length" + assert egress_port >= 0, "Invalid outport to configure routing" + + rule_no = cache_rule(TABLE_ROUTING_V4, action) + + rules_routing = [] + rules_routing.append( + json_config_rule( + action, + '/tables/table/'+TABLE_ROUTING_V4+'['+str(rule_no)+']', + { + 'table-name': TABLE_ROUTING_V4, + 'match-fields': [ + { + 'match-field': 'ipv4_dst', + 'match-value': ipv4_dst + "/" + str(ipv4_prefix_len) + } + ], + 'action-name': 'FabricIngress.forwarding.set_next_id_routing_v4', + 'action-params': [ + { + 'action-param': 'next_id', + 'action-value': str(egress_port) + } + ] + } + ) + ) + + return rules_routing + +def rules_set_up_next_routing_simple( + egress_port : int, + eth_src : str, + eth_dst : str, + action : ConfigActionEnum) -> List [Tuple]: # type: ignore + assert egress_port >= 0, "Invalid outport to configure next routing simple" + assert chk_address_mac(eth_src), "Invalid source Ethernet address to configure next routing simple" + assert chk_address_mac(eth_dst), "Invalid destination Ethernet address to configure next routing simple" + + rule_no = cache_rule(TABLE_NEXT_SIMPLE, action) + + rules_next_routing_simple = [] + rules_next_routing_simple.append( + json_config_rule( + action, + '/tables/table/'+TABLE_NEXT_SIMPLE+'['+str(rule_no)+']', + { + 'table-name': TABLE_NEXT_SIMPLE, + 'match-fields': [ + { + 'match-field': 'next_id', + 'match-value': str(egress_port) + } + ], + 'action-name': 'FabricIngress.next.routing_simple', + 'action-params': [ + { + 'action-param': 'port_num', + 'action-value': str(egress_port) + }, + { + 'action-param': 'smac', + 'action-value': eth_src + }, + { + 'action-param': 'dmac', + 'action-value': eth_dst + } + ] + } + ) + ) + + return rules_next_routing_simple + +def rules_set_up_next_routing_hashed( + egress_port : int, + action : ConfigActionEnum, # type: ignore + next_id = None) -> List [Tuple]: + assert egress_port >= 0, "Invalid outport to configure next routing hashed" + random_mac_src = generate_random_mac() + random_mac_dst = generate_random_mac() + if next_id is None: + next_id = egress_port + + global NEXT_MEMBER_ID + + rule_no = cache_rule(ACTION_PROFILE_NEXT_HASHED, action) + + rules_next_routing_hashed = [] + rules_next_routing_hashed.append( + json_config_rule( + action, + '/action_profiles/action_profile/'+ACTION_PROFILE_NEXT_HASHED+'['+str(rule_no)+']', + { + 'action-profile-name': ACTION_PROFILE_NEXT_HASHED, + 'member-id': NEXT_MEMBER_ID, + 'action-name': 'FabricIngress.next.routing_hashed', + 'action-params': [ + { + 'action-param': 'port_num', + 'action-value': str(egress_port) + }, + { + 'action-param': 'smac', + 'action-value': random_mac_src + }, + { + 'action-param': 'dmac', + 'action-value': random_mac_dst + } + ] + } + ) + ) + + rule_no = cache_rule(TABLE_NEXT_HASHED, action) + + rules_next_routing_hashed.append( + json_config_rule( + action, + '/tables/table/'+TABLE_NEXT_HASHED+'['+str(rule_no)+']', + { + 'table-name': TABLE_NEXT_HASHED, + 'member-id': NEXT_MEMBER_ID, + 'match-fields': [ + { + 'match-field': 'next_id', + 'match-value': str(next_id) + } + ] + } + ) + ) + + return rules_next_routing_hashed + +################################### +### C. End of L3 setup +################################### + + +################################### +### D. Flow mirroring +################################### + +def rules_set_up_report_mirror_flow( + recirculation_port_list : List, + report_mirror_id_list : List, + action : ConfigActionEnum) -> List [Tuple]: # type: ignore + rules_list = [] + + for i, mirror_id in enumerate(report_mirror_id_list): + LOGGER.debug("Mirror ID:{} - Recirculation port: {}".format( + mirror_id, recirculation_port_list[i])) + rules_list.extend( + rules_set_up_clone_session( + session_id=mirror_id, + egress_port=recirculation_port_list[i], + instance=0, + action=action + ) + ) + + return rules_list + +def rules_set_up_clone_session( + session_id : int, + egress_port : int, + instance : int, + action : ConfigActionEnum) -> List [Tuple]: # type: ignore + assert session_id >= 0, "Invalid session identifier to configure clone session" + assert egress_port >= 0, "Invalid egress port number to configure clone session" + assert instance >= 0, "Invalid instance number to configure clone session" + + rule_no = cache_rule(CLONE_SESSION, action) + + #TODO: For TNA pass also: packet_length_bytes = 128 + packet_length_bytes = 128 + + rules_clone_session = [] + + rules_clone_session.append( + json_config_rule( + action, + CLONE_SESSION+'['+str(rule_no)+']', + { + 'session-id': session_id, + 'replicas': [ + { + 'egress-port': egress_port, + 'instance': instance + } + ] + } + ) + ) + + return rules_clone_session + +################################### +### D. End of flow mirroring +################################### + + +################################### +### E. Access Control Lists +################################### + +def rules_set_up_acl_filter_host( + ingress_port : int, + ip_address : str, + prefix_len : int, + ip_direction : str, + action : ConfigActionEnum) -> List [Tuple]: # type: ignore + assert ingress_port >= 0, "Invalid ingress port to configure ACL" + assert chk_address_ipv4(ip_address), "Invalid IP address to configure ACL" + assert 0 < prefix_len <= 32, "Invalid IP address prefix length to configure ACL" + + ip_match = "ipv4_src" if ip_direction == "src" else "ipv4_dst" + + prefix_len_hex = prefix_to_hex_mask(prefix_len) + + rule_no = cache_rule(TABLE_ACL, action) + + rules_acl = [] + rules_acl.append( + json_config_rule( + action, + '/tables/table/'+TABLE_ACL+'['+str(rule_no)+']', + { + 'table-name': TABLE_ACL, + 'match-fields': [ + { + 'match-field': 'ig_port', + 'match-value': str(ingress_port) + }, + { + 'match-field': ip_match, + 'match-value': '%s&&&%s' % (ip_address, prefix_len_hex) + } + ], + 'action-name': 'FabricIngress.acl.drop', + 'action-params': [], + 'priority': 1 + } + ) + ) + + return rules_acl + +def rules_set_up_acl_filter_port( + ingress_port : int, + transport_port : int, + transport_direction : str, + action : ConfigActionEnum) -> List [Tuple]: # type: ignore + assert ingress_port >= 0, "Invalid ingress port to configure ACL" + assert chk_transport_port(transport_port), "Invalid transport port to configure ACL" + + trn_match = "l4_sport" if transport_direction == "src" else "l4_dport" + + rule_no = cache_rule(TABLE_ACL, action) + + rules_acl = [] + rules_acl.append( + json_config_rule( + action, + '/tables/table/'+TABLE_ACL+'['+str(rule_no)+']', + { + 'table-name': TABLE_ACL, + 'match-fields': [ + { + 'match-field': 'ig_port', + 'match-value': str(ingress_port) + }, + { + 'match-field': trn_match, + 'match-value': str(transport_port) + } + ], + 'action-name': 'FabricIngress.acl.drop', + 'action-params': [], + 'priority': 1 + } + ) + ) + + return rules_acl + +########################################### +### E. End of Access Control Lists +########################################### + +################################################################################################################ +### Rule management methods +################################################################################################################ + +def apply_rules( + task_executor : TaskExecutor, + device_obj : Device, # type: ignore + json_config_rules : List): # type: ignore + applied_rules = 0 + failed_rules = 0 + total_rules = len(json_config_rules) + assert device_obj, "Cannot apply rules to invalid device object" + + if total_rules == 0: + return applied_rules, failed_rules + + # Provision rules one-by-one + for i, json_config_rule in enumerate(json_config_rules): + LOGGER.debug("Applying rule #{}: {}".format(i, json_config_rule)) + try: + # Cleanup the rules of this particular object + del device_obj.device_config.config_rules[:] + + # Add the new rule to apply + device_obj.device_config.config_rules.append(ConfigRule(**json_config_rule)) + + # Configure the device via the SBI + # TODO: Acquire status of this RPC to ensure that the rule is actually applied + task_executor.configure_device(device_obj) + + # Sleep for some time till the next operation + sleep_for(RULE_CONF_INTERVAL_SEC) + + applied_rules += 1 + except Exception as ex: + LOGGER.error("Error while applying rule #{}: {}".format(i, ex)) + failed_rules += 1 + raise Exception(ex) + + LOGGER.debug("Batch rules: {}/{} applied".format(applied_rules, total_rules)) + + return applied_rules, failed_rules + +# Map for keeping rule counts per table +RULE_ENTRY_MAP = {} + +def cache_rule( + table_name : str, + action : ConfigActionEnum) -> int: # type: ignore + rule_no = -1 + + if action == ConfigActionEnum.CONFIGACTION_SET: + rule_no = add_rule_to_map(table_name) + elif action == ConfigActionEnum.CONFIGACTION_DELETE: + rule_no = delete_rule_from_map(table_name) + else: + assert True, "Invalid rule configuration action" + + assert rule_no > 0, "Invalid rule identifier to configure table {}".format(table_name) + + return rule_no + +def add_rule_to_map(table_name : str) -> int: + if table_name not in RULE_ENTRY_MAP: + RULE_ENTRY_MAP[table_name] = [] + + # Current number of rules + rules_no = len(RULE_ENTRY_MAP[table_name]) + + # Get a new valid rule index + new_index = find_minimum_available_rule_index(RULE_ENTRY_MAP[table_name]) + LOGGER.debug("Minimum available rule index for table {} is: {}".format(table_name, new_index)) + assert new_index > 0, "Invalid rule index for table {}".format(table_name) + + # New entry + new_rule_entry = table_name+"["+str(new_index)+"]" + + # Add entry to the list + RULE_ENTRY_MAP[table_name].append(new_rule_entry) + assert len(RULE_ENTRY_MAP[table_name]) == rules_no + 1 + + return new_index + +def delete_rule_from_map(table_name : str) -> int: + if table_name not in RULE_ENTRY_MAP: + LOGGER.error("Table {} has no entries".format(table_name)) + return -1 + + # Current number of rules + rules_no = len(RULE_ENTRY_MAP[table_name]) + + # Remove last rule + rule_entry = RULE_ENTRY_MAP[table_name].pop() + # Get its index + rule_no = int(rule_entry.split('[')[1].split(']')[0]) + + assert len(RULE_ENTRY_MAP[table_name]) == rules_no - 1 + + # Return the index of the removed rule + return rule_no + +def string_contains_number(input_string : str, target_number : int) -> bool: + return str(target_number) in input_string + +def rule_index_exists(rule_entry_list : List, target_rule_index : int) -> bool: + # Rule indices start from 1 + if target_rule_index <= 0: + return False + + rules_no = len(rule_entry_list) + if rules_no == 0: + return False + + for rule in rule_entry_list: + if string_contains_number(rule, target_rule_index): + return True + + return False + +def find_minimum_available_rule_index(rule_entry_list : List) -> int: + rules_no = len(rule_entry_list) + if rules_no == 0: + return 1 + + min_index = -1 + for i, _ in enumerate(rule_entry_list): + index = i+1 + idx_exists = rule_index_exists(rule_entry_list, index) + # This index is not present in the rule list, so it is available + if not idx_exists and min_index < index: + min_index = index + + # All of the existing rule indices are taken, proceed to the next one + if min_index == -1: + min_index = rules_no + 1 + + return min_index + +def print_rule_map() -> None: + for k in RULE_ENTRY_MAP.keys(): + LOGGER.info("Table {} entries: {}".format(k, RULE_ENTRY_MAP[k])) diff --git a/src/service/service/service_handlers/p4_fabric_tna_int/__init__.py b/src/service/service/service_handlers/p4_fabric_tna_int/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..023830645e0fcb60e3f8583674a954810af222f2 --- /dev/null +++ b/src/service/service/service_handlers/p4_fabric_tna_int/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/service/service/service_handlers/p4_fabric_tna_int/p4_fabric_tna_int_config.py b/src/service/service/service_handlers/p4_fabric_tna_int/p4_fabric_tna_int_config.py new file mode 100644 index 0000000000000000000000000000000000000000..8fb22ee97d80ec03e7eca87e29772cc71141b1ff --- /dev/null +++ b/src/service/service/service_handlers/p4_fabric_tna_int/p4_fabric_tna_int_config.py @@ -0,0 +1,425 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Common objects and methods for In-band Network Telemetry (INT) dataplane +based on the SD-Fabric dataplane model. +This dataplane covers both software based and hardware-based Stratum-enabled P4 switches, +such as the BMv2 software switch and Intel's Tofino/Tofino-2 switches. + +SD-Fabric repo: https://github.com/stratum/fabric-tna +SD-Fabric docs: https://docs.sd-fabric.org/master/index.html +""" + +import logging +from typing import List, Tuple +from common.proto.context_pb2 import ConfigActionEnum +from common.tools.object_factory.ConfigRule import json_config_rule +from common.type_checkers.Checkers import chk_address_ipv4, chk_transport_port + +from service.service.service_handlers.p4_fabric_tna_commons.p4_fabric_tna_commons import * + +LOGGER = logging.getLogger(__name__) + +# INT service handler settings +INT_COLLECTOR_INFO = "int_collector_info" +INT_REPORT_MIRROR_ID_LIST = "int_report_mirror_id_list" +PORT_INT = "int_port" # In-band Network Telemetry transport port (of the collector) + +# INT tables +TABLE_INT_WATCHLIST = "FabricIngress.int_watchlist.watchlist" +TABLE_INT_EGRESS_REPORT = "FabricEgress.int_egress.report" + +# Mirror IDs for INT reports +INT_REPORT_MIRROR_ID_LIST_TNA = [0x200, 0x201, 0x202, 0x203] # Tofino-2 (2-pipe Tofino switches use only the first 2 entries) +INT_REPORT_MIRROR_ID_LIST_V1MODEL = [0x1FA] # Variable V1MODEL_INT_MIRROR_SESSION in p4 source program + +# INT report types +INT_REPORT_TYPE_NO_REPORT = 0 +INT_REPORT_TYPE_FLOW = 1 +INT_REPORT_TYPE_QUEUE = 2 +INT_REPORT_TYPE_DROP = 4 + + +def rules_set_up_int_watchlist(action : ConfigActionEnum) -> List [Tuple]: # type: ignore + rule_no = cache_rule(TABLE_INT_WATCHLIST, action) + + rules_int_watchlist = [] + rules_int_watchlist.append( + json_config_rule( + action, + '/tables/table/'+TABLE_INT_WATCHLIST+'['+str(rule_no)+']', + { + 'table-name': TABLE_INT_WATCHLIST, + 'match-fields': [ + { + 'match-field': 'ipv4_valid', + 'match-value': '1' + } + ], + 'action-name': 'FabricIngress.int_watchlist.mark_to_report', + 'action-params': [], + 'priority': 1 + } + ) + ) + + return rules_int_watchlist + +def rules_set_up_int_report_collector( + int_collector_ip : str, + action : ConfigActionEnum) -> List [Tuple]: # type: ignore + assert chk_address_ipv4(int_collector_ip), "Invalid INT collector IPv4 address to configure watchlist" + + rule_no = cache_rule(TABLE_INT_WATCHLIST, action) + + rules_int_col_report = [] + rules_int_col_report.append( + json_config_rule( + action, + '/tables/table/'+TABLE_INT_WATCHLIST+'['+str(rule_no)+']', + { + 'table-name': TABLE_INT_WATCHLIST, + 'match-fields': [ + { + 'match-field': 'ipv4_valid', + 'match-value': '1' + }, + { + 'match-field': 'ipv4_dst', + 'match-value': int_collector_ip+'&&&0xFFFFFFFF' + } + ], + 'action-name': 'FabricIngress.int_watchlist.no_report_collector', + 'action-params': [], + 'priority': 10 + } + ) + ) + + return rules_int_col_report + +def rules_set_up_int_recirculation_ports( + recirculation_port_list : List, + port_type : str, + fwd_type : int, + vlan_id : int, + action : ConfigActionEnum): # type: ignore + rules_list = [] + + for port in recirculation_port_list: + rules_list.extend( + rules_set_up_port( + port=port, + port_type=port_type, + fwd_type=fwd_type, + vlan_id=vlan_id, + action=action + ) + ) + + LOGGER.debug("INT recirculation ports configured:{}".format(recirculation_port_list)) + + return rules_list + +def rules_set_up_int_report_flow( + switch_id : int, + src_ip : str, + int_collector_ip : str, + int_collector_port : int, + action : ConfigActionEnum) -> List [Tuple]: # type: ignore + assert switch_id > 0, "Invalid switch identifier to configure egress INT report" + assert chk_address_ipv4(src_ip), "Invalid source IPv4 address to configure egress INT report" + assert chk_address_ipv4(int_collector_ip), "Invalid INT collector IPv4 address to configure egress INT report" + assert chk_transport_port(int_collector_port), "Invalid INT collector port number to configure egress INT report" + + rule_no = cache_rule(TABLE_INT_EGRESS_REPORT, action) + + rules_int_egress = [] + + # Rule #1 + rules_int_egress.append( + json_config_rule( + action, + '/tables/table/'+TABLE_INT_EGRESS_REPORT+'['+str(rule_no)+']', + { + 'table-name': TABLE_INT_EGRESS_REPORT, + 'match-fields': [ + { + 'match-field': 'bmd_type', + 'match-value': str(BRIDGED_MD_TYPE_INT_INGRESS_DROP) + }, + { + 'match-field': 'mirror_type', + 'match-value': str(MIRROR_TYPE_INVALID) + }, + { + 'match-field': 'int_report_type', + 'match-value': str(INT_REPORT_TYPE_DROP) + } + ], + 'action-name': 'FabricEgress.int_egress.do_drop_report_encap', + 'action-params': [ + { + 'action-param': 'switch_id', + 'action-value': str(switch_id) + }, + { + 'action-param': 'src_ip', + 'action-value': src_ip + }, + { + 'action-param': 'mon_ip', + 'action-value': int_collector_ip + }, + { + 'action-param': 'mon_port', + 'action-value': str(int_collector_port) + } + ] + } + ) + ) + + rule_no = cache_rule(TABLE_INT_EGRESS_REPORT, action) + + # Rule #2 + rules_int_egress.append( + json_config_rule( + action, + '/tables/table/'+TABLE_INT_EGRESS_REPORT+'['+str(rule_no)+']', + { + 'table-name': TABLE_INT_EGRESS_REPORT, + 'match-fields': [ + { + 'match-field': 'bmd_type', + 'match-value': str(BRIDGED_MD_TYPE_EGRESS_MIRROR) + }, + { + 'match-field': 'mirror_type', + 'match-value': str(MIRROR_TYPE_INT_REPORT) + }, + { + 'match-field': 'int_report_type', + 'match-value': str(INT_REPORT_TYPE_DROP) + } + ], + 'action-name': 'FabricEgress.int_egress.do_drop_report_encap', + 'action-params': [ + { + 'action-param': 'switch_id', + 'action-value': str(switch_id) + }, + { + 'action-param': 'src_ip', + 'action-value': src_ip + }, + { + 'action-param': 'mon_ip', + 'action-value': int_collector_ip + }, + { + 'action-param': 'mon_port', + 'action-value': str(int_collector_port) + } + ] + } + ) + ) + + rule_no = cache_rule(TABLE_INT_EGRESS_REPORT, action) + + # Rule #3 + rules_int_egress.append( + json_config_rule( + action, + '/tables/table/'+TABLE_INT_EGRESS_REPORT+'['+str(rule_no)+']', + { + 'table-name': TABLE_INT_EGRESS_REPORT, + 'match-fields': [ + { + 'match-field': 'bmd_type', + 'match-value': str(BRIDGED_MD_TYPE_EGRESS_MIRROR) + }, + { + 'match-field': 'mirror_type', + 'match-value': str(MIRROR_TYPE_INT_REPORT) + }, + { + 'match-field': 'int_report_type', + 'match-value': str(INT_REPORT_TYPE_FLOW) + } + ], + 'action-name': 'FabricEgress.int_egress.do_local_report_encap', + 'action-params': [ + { + 'action-param': 'switch_id', + 'action-value': str(switch_id) + }, + { + 'action-param': 'src_ip', + 'action-value': src_ip + }, + { + 'action-param': 'mon_ip', + 'action-value': int_collector_ip + }, + { + 'action-param': 'mon_port', + 'action-value': str(int_collector_port) + } + ] + } + ) + ) + + rule_no = cache_rule(TABLE_INT_EGRESS_REPORT, action) + + # Rule #4 + rules_int_egress.append( + json_config_rule( + action, + '/tables/table/'+TABLE_INT_EGRESS_REPORT+'['+str(rule_no)+']', + { + 'table-name': TABLE_INT_EGRESS_REPORT, + 'match-fields': [ + { + 'match-field': 'bmd_type', + 'match-value': str(BRIDGED_MD_TYPE_DEFLECTED) + }, + { + 'match-field': 'mirror_type', + 'match-value': str(MIRROR_TYPE_INVALID) + }, + { + 'match-field': 'int_report_type', + 'match-value': str(INT_REPORT_TYPE_DROP) + } + ], + 'action-name': 'FabricEgress.int_egress.do_drop_report_encap', + 'action-params': [ + { + 'action-param': 'switch_id', + 'action-value': str(switch_id) + }, + { + 'action-param': 'src_ip', + 'action-value': src_ip + }, + { + 'action-param': 'mon_ip', + 'action-value': int_collector_ip + }, + { + 'action-param': 'mon_port', + 'action-value': str(int_collector_port) + } + ] + } + ) + ) + + rule_no = cache_rule(TABLE_INT_EGRESS_REPORT, action) + + # Rule #5 + rules_int_egress.append( + json_config_rule( + action, + '/tables/table/'+TABLE_INT_EGRESS_REPORT+'['+str(rule_no)+']', + { + 'table-name': TABLE_INT_EGRESS_REPORT, + 'match-fields': [ + { + 'match-field': 'bmd_type', + 'match-value': str(BRIDGED_MD_TYPE_EGRESS_MIRROR) + }, + { + 'match-field': 'mirror_type', + 'match-value': str(MIRROR_TYPE_INT_REPORT) + }, + { + 'match-field': 'int_report_type', + 'match-value': str(INT_REPORT_TYPE_QUEUE) + } + ], + 'action-name': 'FabricEgress.int_egress.do_local_report_encap', + 'action-params': [ + { + 'action-param': 'switch_id', + 'action-value': str(switch_id) + }, + { + 'action-param': 'src_ip', + 'action-value': src_ip + }, + { + 'action-param': 'mon_ip', + 'action-value': int_collector_ip + }, + { + 'action-param': 'mon_port', + 'action-value': str(int_collector_port) + } + ] + } + ) + ) + + rule_no = cache_rule(TABLE_INT_EGRESS_REPORT, action) + + # Rule #6 + rules_int_egress.append( + json_config_rule( + action, + '/tables/table/'+TABLE_INT_EGRESS_REPORT+'['+str(rule_no)+']', + { + 'table-name': TABLE_INT_EGRESS_REPORT, + 'match-fields': [ + { + 'match-field': 'bmd_type', + 'match-value': str(BRIDGED_MD_TYPE_EGRESS_MIRROR) + }, + { + 'match-field': 'mirror_type', + 'match-value': str(MIRROR_TYPE_INT_REPORT) + }, + { + 'match-field': 'int_report_type', + 'match-value': str(INT_REPORT_TYPE_QUEUE | INT_REPORT_TYPE_FLOW) + } + ], + 'action-name': 'FabricEgress.int_egress.do_local_report_encap', + 'action-params': [ + { + 'action-param': 'switch_id', + 'action-value': str(switch_id) + }, + { + 'action-param': 'src_ip', + 'action-value': src_ip + }, + { + 'action-param': 'mon_ip', + 'action-value': int_collector_ip + }, + { + 'action-param': 'mon_port', + 'action-value': str(int_collector_port) + } + ] + } + ) + ) + + return rules_int_egress diff --git a/src/service/service/service_handlers/p4_fabric_tna_int/p4_fabric_tna_int_service_handler.py b/src/service/service/service_handlers/p4_fabric_tna_int/p4_fabric_tna_int_service_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..dd19c4f895665a80a57d3235db7074ed1f297af6 --- /dev/null +++ b/src/service/service/service_handlers/p4_fabric_tna_int/p4_fabric_tna_int_service_handler.py @@ -0,0 +1,439 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Service handler for P4-based In-band Network Telemetry (INT) v0.5. +The spec. is based on P4.org Application WG INT Dataplane +Specification v0.5 (2017-12): + +https://p4.org/p4-spec/docs/INT_v0_5.pdf +""" + +import logging +from typing import Any, List, Optional, Tuple, Union +from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method +from common.proto.context_pb2 import ConfigActionEnum, DeviceId, Service, Device +from common.tools.object_factory.Device import json_device_id +from common.type_checkers.Checkers import chk_type, chk_address_mac, chk_address_ipv4,\ + chk_transport_port, chk_vlan_id +from service.service.service_handler_api._ServiceHandler import _ServiceHandler +from service.service.service_handler_api.SettingsHandler import SettingsHandler +from service.service.service_handlers.p4_fabric_tna_commons.p4_fabric_tna_commons import * +from service.service.task_scheduler.TaskExecutor import TaskExecutor + +from .p4_fabric_tna_int_config import * + +LOGGER = logging.getLogger(__name__) + +METRICS_POOL = MetricsPool('Service', 'Handler', labels={'handler': 'p4_fabric_tna_int'}) + +class P4FabricINTServiceHandler(_ServiceHandler): + def __init__( # pylint: disable=super-init-not-called + self, service : Service, task_executor : TaskExecutor, **settings # type: ignore + ) -> None: + """ Initialize Driver. + Parameters: + service + The service instance (gRPC message) to be managed. + task_executor + An instance of Task Executor providing access to the + service handlers factory, the context and device clients, + and an internal cache of already-loaded gRPC entities. + **settings + Extra settings required by the service handler. + + """ + self.__service_label = "P4 In-band Network Telemetry (INT) connectivity service" + self.__service = service + self.__task_executor = task_executor + self.__settings_handler = SettingsHandler(self.__service.service_config, **settings) + + self._init_settings() + self._parse_settings() + self._print_settings() + + @metered_subclass_method(METRICS_POOL) + def SetEndpoint( + self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None + ) -> List[Union[bool, Exception]]: + """ Create/Update service endpoints from a list. + Parameters: + endpoints: List[Tuple[str, str, Optional[str]]] + List of tuples, each containing a device_uuid, + endpoint_uuid and, optionally, the topology_uuid + of the endpoint to be added. + connection_uuid : Optional[str] + If specified, is the UUID of the connection this endpoint is associated to. + Returns: + results: List[Union[bool, Exception]] + List of results for endpoint changes requested. + Return values must be in the same order as the requested + endpoints. If an endpoint is properly added, True must be + returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('endpoints', endpoints, list) + if len(endpoints) == 0: return [] + + LOGGER.info("{} - Provision service configuration".format( + self.__service_label)) + + visited = set() + results = [] + for endpoint in endpoints: + device_uuid, _ = endpoint[0:2] + device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) + + # Skip already visited devices + if device.name in visited: + continue + LOGGER.info("Device {} - Setting up In-band Network Telemetry (INT) configuration".format( + device.name)) + + rules = [] + actual_rules = -1 + applied_rules, failed_rules = 0, -1 + + # Create and apply rules + try: + rules = self._create_rules(device_obj=device, action=ConfigActionEnum.CONFIGACTION_SET) + actual_rules = len(rules) + applied_rules, failed_rules = apply_rules( + task_executor=self.__task_executor, + device_obj=device, + json_config_rules=rules + ) + except Exception as ex: + LOGGER.error("Failed to insert INT rules on device {} due to {}".format(device.name, ex)) + finally: + rules.clear() + + # Ensure correct status + results.append(True) if (failed_rules == 0) and (applied_rules == actual_rules) \ + else results.append(False) + + # You should no longer visit this device again + visited.add(device.name) + + LOGGER.info("Installed {}/{} INT rules on device {}".format( + applied_rules, actual_rules, device.name)) + + return results + + @metered_subclass_method(METRICS_POOL) + def DeleteEndpoint( + self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None + ) -> List[Union[bool, Exception]]: + """ Delete service endpoints from a list. + Parameters: + endpoints: List[Tuple[str, str, Optional[str]]] + List of tuples, each containing a device_uuid, + endpoint_uuid, and the topology_uuid of the endpoint + to be removed. + connection_uuid : Optional[str] + If specified, is the UUID of the connection this endpoint is associated to. + Returns: + results: List[Union[bool, Exception]] + List of results for endpoint deletions requested. + Return values must be in the same order as the requested + endpoints. If an endpoint is properly deleted, True must be + returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('endpoints', endpoints, list) + if len(endpoints) == 0: return [] + + LOGGER.info("{} - Deprovision service configuration".format( + self.__service_label)) + + visited = set() + results = [] + for endpoint in endpoints: + device_uuid, _ = endpoint[0:2] + device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) + + # Skip already visited devices + if device.name in visited: + continue + LOGGER.info("Device {} - Removing In-band Network Telemetry (INT) configuration".format( + device.name)) + + rules = [] + actual_rules = -1 + applied_rules, failed_rules = 0, -1 + + # Create and apply rules + try: + rules = self._create_rules(device_obj=device, action=ConfigActionEnum.CONFIGACTION_DELETE) + actual_rules = len(rules) + applied_rules, failed_rules = apply_rules( + task_executor=self.__task_executor, device_obj=device, json_config_rules=rules) + except Exception as ex: + LOGGER.error("Failed to delete INT rules from device {} due to {}".format(device.name, ex)) + finally: + rules.clear() + + # Ensure correct status + results.append(True) if (failed_rules == 0) and (applied_rules == actual_rules) \ + else results.append(False) + + # You should no longer visit this device again + visited.add(device.name) + + LOGGER.info("Deleted {}/{} INT rules from device {}".format( + applied_rules, actual_rules, device.name)) + + return results + + @metered_subclass_method(METRICS_POOL) + def SetConstraint(self, constraints: List[Tuple[str, Any]]) \ + -> List[Union[bool, Exception]]: + """ Create/Update service constraints. + Parameters: + constraints: List[Tuple[str, Any]] + List of tuples, each containing a constraint_type and the + new constraint_value to be set. + Returns: + results: List[Union[bool, Exception]] + List of results for constraint changes requested. + Return values must be in the same order as the requested + constraints. If a constraint is properly set, True must be + returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('constraints', constraints, list) + if len(constraints) == 0: return [] + + msg = '[SetConstraint] Method not implemented. Constraints({:s}) are being ignored.' + LOGGER.warning(msg.format(str(constraints))) + return [True for _ in range(len(constraints))] + + @metered_subclass_method(METRICS_POOL) + def DeleteConstraint(self, constraints: List[Tuple[str, Any]]) \ + -> List[Union[bool, Exception]]: + """ Delete service constraints. + Parameters: + constraints: List[Tuple[str, Any]] + List of tuples, each containing a constraint_type pointing + to the constraint to be deleted, and a constraint_value + containing possible additionally required values to locate + the constraint to be removed. + Returns: + results: List[Union[bool, Exception]] + List of results for constraint deletions requested. + Return values must be in the same order as the requested + constraints. If a constraint is properly deleted, True must + be returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('constraints', constraints, list) + if len(constraints) == 0: return [] + + msg = '[DeleteConstraint] Method not implemented. Constraints({:s}) are being ignored.' + LOGGER.warning(msg.format(str(constraints))) + return [True for _ in range(len(constraints))] + + @metered_subclass_method(METRICS_POOL) + def SetConfig(self, resources: List[Tuple[str, Any]]) \ + -> List[Union[bool, Exception]]: + """ Create/Update configuration for a list of service resources. + Parameters: + resources: List[Tuple[str, Any]] + List of tuples, each containing a resource_key pointing to + the resource to be modified, and a resource_value + containing the new value to be set. + Returns: + results: List[Union[bool, Exception]] + List of results for resource key changes requested. + Return values must be in the same order as the requested + resource keys. If a resource is properly set, True must be + returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('resources', resources, list) + if len(resources) == 0: return [] + + msg = '[SetConfig] Method not implemented. Resources({:s}) are being ignored.' + LOGGER.warning(msg.format(str(resources))) + return [True for _ in range(len(resources))] + + @metered_subclass_method(METRICS_POOL) + def DeleteConfig(self, resources: List[Tuple[str, Any]]) \ + -> List[Union[bool, Exception]]: + """ Delete configuration for a list of service resources. + Parameters: + resources: List[Tuple[str, Any]] + List of tuples, each containing a resource_key pointing to + the resource to be modified, and a resource_value containing + possible additionally required values to locate the value + to be removed. + Returns: + results: List[Union[bool, Exception]] + List of results for resource key deletions requested. + Return values must be in the same order as the requested + resource keys. If a resource is properly deleted, True must + be returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('resources', resources, list) + if len(resources) == 0: return [] + + msg = '[SetConfig] Method not implemented. Resources({:s}) are being ignored.' + LOGGER.warning(msg.format(str(resources))) + return [True for _ in range(len(resources))] + + def _init_settings(self): + self.__switch_info = {} + self.__int_collector_info = {} + self.__int_collector_mac = "" + self.__int_collector_ip = "" + self.__int_collector_port = -1 + self.__int_vlan_id = -1 + + try: + self.__settings = self.__settings_handler.get('/settings') + LOGGER.info("{} with settings: {}".format(self.__service_label, self.__settings)) + except Exception as ex: + LOGGER.error("Failed to retrieve service settings: {}".format(ex)) + raise Exception(ex) + + def _parse_settings(self): + try: + self.__switch_info = self.__settings.value[SWITCH_INFO] + except Exception as ex: + LOGGER.error("Failed to parse service settings: {}".format(ex)) + raise Exception(ex) + assert isinstance(self.__switch_info, dict), "Switch info object must be a map with switch names as keys" + + for switch_name, switch_info in self.__switch_info.items(): + assert switch_name, "Invalid P4 switch name" + assert isinstance(switch_info, dict), "Switch {} info must be a map with arch, dpid, mac, ip, and int_port items)" + assert switch_info[ARCH] in SUPPORTED_TARGET_ARCH_LIST, \ + "Switch {} - Supported P4 architectures are: {}".format(switch_name, ','.join(SUPPORTED_TARGET_ARCH_LIST)) + assert switch_info[DPID] > 0, "Switch {} - P4 switch dataplane ID must be a positive integer".format(switch_name, switch_info[DPID]) + assert chk_address_mac(switch_info[MAC]), "Switch {} - Invalid source Ethernet address".format(switch_name) + assert chk_address_ipv4(switch_info[IP]), "Switch {} - Invalid source IP address".format(switch_name) + assert isinstance(switch_info[PORT_INT], dict), "Switch {} - INT port object must be a map with port_id and port_type items".format(switch_name) + assert switch_info[PORT_INT][PORT_ID] >= 0, "Switch {} - Invalid P4 switch port ID".format(switch_name) + assert switch_info[PORT_INT][PORT_TYPE] in PORT_TYPES_STR_VALID, "Switch {} - Valid P4 switch port types are: {}".format( + switch_name, ','.join(PORT_TYPES_STR_VALID)) + if arch_tna(switch_info[ARCH]): + switch_info[RECIRCULATION_PORT_LIST] = RECIRCULATION_PORTS_TNA + switch_info[INT_REPORT_MIRROR_ID_LIST] = INT_REPORT_MIRROR_ID_LIST_TNA + else: + switch_info[RECIRCULATION_PORT_LIST] = RECIRCULATION_PORTS_V1MODEL + switch_info[INT_REPORT_MIRROR_ID_LIST] = INT_REPORT_MIRROR_ID_LIST_V1MODEL + assert isinstance(switch_info[RECIRCULATION_PORT_LIST], list), "Switch {} - Recirculation ports must be described as a list".format(switch_name) + + self.__int_collector_info = self.__settings.value[INT_COLLECTOR_INFO] + assert isinstance(self.__int_collector_info, dict), "INT collector info object must be a map with mac, ip, port, and vlan_id keys)" + + self.__int_collector_mac = self.__int_collector_info[MAC] + assert chk_address_mac(self.__int_collector_mac), "Invalid P4 INT collector MAC address" + + self.__int_collector_ip = self.__int_collector_info[IP] + assert chk_address_ipv4(self.__int_collector_ip), "Invalid P4 INT collector IPv4 address" + + self.__int_collector_port = self.__int_collector_info[PORT] + assert chk_transport_port(self.__int_collector_port), "Invalid P4 INT collector transport port" + + self.__int_vlan_id = self.__int_collector_info[VLAN_ID] + assert chk_vlan_id(self.__int_vlan_id), "Invalid VLAN ID" + + def _print_settings(self): + LOGGER.info("-------------------- {} settings --------------------".format(self.__service.name)) + LOGGER.info("--- Topology info") + for switch_name, switch_info in self.__switch_info.items(): + LOGGER.info("\t Device {}".format(switch_name)) + LOGGER.info("\t\t| Target P4 architecture: {}".format(switch_info[ARCH])) + LOGGER.info("\t\t| Data plane ID: {}".format(switch_info[DPID])) + LOGGER.info("\t\t| Source MAC address: {}".format(switch_info[MAC])) + LOGGER.info("\t\t| Source IP address: {}".format(switch_info[IP])) + LOGGER.info("\t\t| INT port ID: {}".format(switch_info[PORT_INT][PORT_ID])) + LOGGER.info("\t\t| INT port type: {}".format(switch_info[PORT_INT][PORT_TYPE])) + LOGGER.info("\t\t| Recirculation port list: {}".format(switch_info[RECIRCULATION_PORT_LIST])) + LOGGER.info("\t\t| Report mirror ID list: {}".format(switch_info[INT_REPORT_MIRROR_ID_LIST])) + LOGGER.info("--- INT collector MAC: {}".format(self.__int_collector_mac)) + LOGGER.info("--- INT collector IP: {}".format(self.__int_collector_ip)) + LOGGER.info("--- INT collector port: {}".format(self.__int_collector_port)) + LOGGER.info("--- INT VLAN ID: {}".format(self.__int_vlan_id)) + LOGGER.info("-----------------------------------------------------------------") + + def _create_rules(self, device_obj : Device, action : ConfigActionEnum): # type: ignore + dev_name = device_obj.name + rules = [] + + try: + ### INT reporting rules + rules += rules_set_up_int_watchlist(action=action) + rules += rules_set_up_int_recirculation_ports( + recirculation_port_list=self.__switch_info[dev_name][RECIRCULATION_PORT_LIST], + port_type=PORT_TYPE_INT, + fwd_type=FORWARDING_TYPE_UNICAST_IPV4, + vlan_id=self.__int_vlan_id, + action=action + ) + rules += rules_set_up_int_report_collector( + int_collector_ip=self.__int_collector_ip, + action=action + ) + rules += rules_set_up_int_report_flow( + switch_id=self.__switch_info[dev_name][DPID], + src_ip=self.__switch_info[dev_name][IP], + int_collector_ip=self.__int_collector_ip, + int_collector_port=self.__int_collector_port, + action=action + ) + rules += rules_set_up_report_mirror_flow( + recirculation_port_list=self.__switch_info[dev_name][RECIRCULATION_PORT_LIST], + report_mirror_id_list=self.__switch_info[dev_name][INT_REPORT_MIRROR_ID_LIST], + action=action + ) + ### INT port setup rules + rules += rules_set_up_port( + port=self.__switch_info[dev_name][PORT_INT][PORT_ID], + port_type=PORT_TYPE_HOST, + fwd_type=FORWARDING_TYPE_BRIDGING, + vlan_id=self.__int_vlan_id, + action=action + ) + ### INT port forwarding rules + rules += rules_set_up_fwd_bridging( + vlan_id=self.__int_vlan_id, + eth_dst=self.__int_collector_mac, + egress_port=self.__switch_info[dev_name][PORT_INT][PORT_ID], + action=action + ) + rules += rules_set_up_next_output_simple( + egress_port=self.__switch_info[dev_name][PORT_INT][PORT_ID], + action=action + ) + ### INT packet routing rules + rules += rules_set_up_next_routing_simple( + egress_port=self.__switch_info[dev_name][PORT_INT][PORT_ID], + eth_src=self.__switch_info[dev_name][MAC], + eth_dst=self.__int_collector_mac, + action=action + ) + rules += rules_set_up_routing( + ipv4_dst=self.__int_collector_ip, + ipv4_prefix_len=32, + egress_port=self.__switch_info[dev_name][PORT_INT][PORT_ID], + action=action + ) + except Exception as ex: + LOGGER.error("Error while creating rules") + raise Exception(ex) + + return rules diff --git a/src/service/service/service_handlers/p4_fabric_tna_l2_simple/__init__.py b/src/service/service/service_handlers/p4_fabric_tna_l2_simple/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..023830645e0fcb60e3f8583674a954810af222f2 --- /dev/null +++ b/src/service/service/service_handlers/p4_fabric_tna_l2_simple/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/service/service/service_handlers/p4_fabric_tna_l2_simple/p4_fabric_tna_l2_simple_config.py b/src/service/service/service_handlers/p4_fabric_tna_l2_simple/p4_fabric_tna_l2_simple_config.py new file mode 100644 index 0000000000000000000000000000000000000000..5e2aed066a82dd8d37713aec88b7e560fa363b6a --- /dev/null +++ b/src/service/service/service_handlers/p4_fabric_tna_l2_simple/p4_fabric_tna_l2_simple_config.py @@ -0,0 +1,69 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Common objects and methods for L2 forwarding based on the SD-Fabric dataplane model. +This dataplane covers both software based and hardware-based Stratum-enabled P4 switches, +such as the BMv2 software switch and Intel's Tofino/Tofino-2 switches. + +SD-Fabric repo: https://github.com/stratum/fabric-tna +SD-Fabric docs: https://docs.sd-fabric.org/master/index.html +""" + +import logging +from common.proto.context_pb2 import ConfigActionEnum + +from service.service.service_handlers.p4_fabric_tna_commons.p4_fabric_tna_commons import * + +LOGGER = logging.getLogger(__name__) + +# L2 simple service handler settings +FORWARDING_LIST = "fwd_list" +HOST_MAC = "host_mac" + +def rules_set_up_port_host( + port : int, + vlan_id : int, + action : ConfigActionEnum, # type: ignore + fwd_type=FORWARDING_TYPE_BRIDGING, + eth_type=ETHER_TYPE_IPV4): + # This is a host facing port + port_type = PORT_TYPE_HOST + + return rules_set_up_port( + port=port, + port_type=port_type, + fwd_type=fwd_type, + vlan_id=vlan_id, + action=action, + eth_type=eth_type + ) + +def rules_set_up_port_switch( + port : int, + vlan_id : int, + action : ConfigActionEnum, # type: ignore + fwd_type=FORWARDING_TYPE_BRIDGING, + eth_type=ETHER_TYPE_IPV4): + # This is a switch facing port + port_type = PORT_TYPE_SWITCH + + return rules_set_up_port( + port=port, + port_type=port_type, + fwd_type=fwd_type, + vlan_id=vlan_id, + action=action, + eth_type=eth_type + ) diff --git a/src/service/service/service_handlers/p4_fabric_tna_l2_simple/p4_fabric_tna_l2_simple_service_handler.py b/src/service/service/service_handlers/p4_fabric_tna_l2_simple/p4_fabric_tna_l2_simple_service_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..8d4aaf08119ea591a1829eee7122f45e0d9f904f --- /dev/null +++ b/src/service/service/service_handlers/p4_fabric_tna_l2_simple/p4_fabric_tna_l2_simple_service_handler.py @@ -0,0 +1,464 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Service handler for P4-based L2 forwarding using the SD-Fabric P4 dataplane +for BMv2 and Intel Tofino switches. +""" + +import logging +from typing import Any, List, Dict, Optional, Tuple, Union +from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method +from common.proto.context_pb2 import ConfigActionEnum, DeviceId, Service, Device +from common.tools.object_factory.Device import json_device_id +from common.type_checkers.Checkers import chk_type, chk_address_mac, chk_vlan_id +from service.service.service_handler_api._ServiceHandler import _ServiceHandler +from service.service.service_handler_api.SettingsHandler import SettingsHandler +from service.service.service_handlers.p4_fabric_tna_commons.p4_fabric_tna_commons import * +from service.service.task_scheduler.TaskExecutor import TaskExecutor + +from .p4_fabric_tna_l2_simple_config import * + +LOGGER = logging.getLogger(__name__) + +METRICS_POOL = MetricsPool('Service', 'Handler', labels={'handler': 'p4_fabric_tna_l2_simple'}) + +class P4FabricL2SimpleServiceHandler(_ServiceHandler): + def __init__( # pylint: disable=super-init-not-called + self, service : Service, task_executor : TaskExecutor, **settings # type: ignore + ) -> None: + """ Initialize Driver. + Parameters: + service + The service instance (gRPC message) to be managed. + task_executor + An instance of Task Executor providing access to the + service handlers factory, the context and device clients, + and an internal cache of already-loaded gRPC entities. + **settings + Extra settings required by the service handler. + + """ + self.__service_label = "P4 L2 simple connectivity service" + self.__service = service + self.__task_executor = task_executor + self.__settings_handler = SettingsHandler(self.__service.service_config, **settings) + + self._init_settings() + self._parse_settings() + self._print_settings() + + @metered_subclass_method(METRICS_POOL) + def SetEndpoint( + self, endpoints : List[Tuple[str, str, Optional[str]]], + connection_uuid : Optional[str] = None + ) -> List[Union[bool, Exception]]: + """ Create/Update service endpoints from a list. + Parameters: + endpoints: List[Tuple[str, str, Optional[str]]] + List of tuples, each containing a device_uuid, + endpoint_uuid and, optionally, the topology_uuid + of the endpoint to be added. + connection_uuid : Optional[str] + If specified, is the UUID of the connection this endpoint is associated to. + Returns: + results: List[Union[bool, Exception]] + List of results for endpoint changes requested. + Return values must be in the same order as the requested + endpoints. If an endpoint is properly added, True must be + returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('endpoints', endpoints, list) + if len(endpoints) == 0: return [] + + LOGGER.info("{} - Provision service configuration".format( + self.__service_label)) + + visited = set() + results = [] + for endpoint in endpoints: + device_uuid, endpoint_uuid = endpoint[0:2] + device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) + device_name = device.name + + LOGGER.info("Device {}".format(device_name)) + LOGGER.info("\t | Service endpoint UUID: {}".format(endpoint_uuid)) + + port_id = find_port_id_in_endpoint_list(device.device_endpoints, endpoint_uuid) + LOGGER.info("\t | Service port ID: {}".format(port_id)) + + dev_port_key = device_name + "-" + PORT_PREFIX + str(port_id) + + # Skip already visited device ports + if dev_port_key in visited: + continue + + rules = [] + actual_rules = -1 + applied_rules, failed_rules = 0, -1 + + # Create and apply rules + try: + rules = self._create_rules( + device_obj=device, port_id=port_id, action=ConfigActionEnum.CONFIGACTION_SET) + actual_rules = len(rules) + applied_rules, failed_rules = apply_rules( + task_executor=self.__task_executor, + device_obj=device, + json_config_rules=rules + ) + except Exception as ex: + LOGGER.error("Failed to insert L2 rules on device {} due to {}".format(device.name, ex)) + finally: + rules.clear() + + # Ensure correct status + results.append(True) if (failed_rules == 0) and (applied_rules == actual_rules) \ + else results.append(False) + + # You should no longer visit this device port again + visited.add(dev_port_key) + + LOGGER.info("Installed {}/{} L2 rules on device {} and port {}".format( + applied_rules, actual_rules, device_name, port_id)) + + return results + + @metered_subclass_method(METRICS_POOL) + def DeleteEndpoint( + self, endpoints : List[Tuple[str, str, Optional[str]]], + connection_uuid : Optional[str] = None + ) -> List[Union[bool, Exception]]: + """ Delete service endpoints from a list. + Parameters: + endpoints: List[Tuple[str, str, Optional[str]]] + List of tuples, each containing a device_uuid, + endpoint_uuid, and the topology_uuid of the endpoint + to be removed. + connection_uuid : Optional[str] + If specified, is the UUID of the connection this endpoint is associated to. + Returns: + results: List[Union[bool, Exception]] + List of results for endpoint deletions requested. + Return values must be in the same order as the requested + endpoints. If an endpoint is properly deleted, True must be + returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('endpoints', endpoints, list) + if len(endpoints) == 0: return [] + + LOGGER.info("{} - Deprovision service configuration".format( + self.__service_label)) + + visited = set() + results = [] + for endpoint in endpoints: + device_uuid, endpoint_uuid = endpoint[0:2] + device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) + device_name = device.name + + LOGGER.info("Device {}".format(device_name)) + LOGGER.info("\t | Service endpoint UUID: {}".format(endpoint_uuid)) + + port_id = find_port_id_in_endpoint_list(device.device_endpoints, endpoint_uuid) + LOGGER.info("\t | Service port ID: {}".format(port_id)) + + dev_port_key = device_name + "-" + PORT_PREFIX + str(port_id) + + # Skip already visited device ports + if dev_port_key in visited: + continue + + rules = [] + actual_rules = -1 + applied_rules, failed_rules = 0, -1 + + # Create and apply rules + try: + rules = self._create_rules( + device_obj=device, port_id=port_id, action=ConfigActionEnum.CONFIGACTION_DELETE) + actual_rules = len(rules) + applied_rules, failed_rules = apply_rules( + task_executor=self.__task_executor, + device_obj=device, + json_config_rules=rules + ) + except Exception as ex: + LOGGER.error("Failed to insert L2 rules on device {} due to {}".format(device.name, ex)) + finally: + rules.clear() + + # Ensure correct status + results.append(True) if (failed_rules == 0) and (applied_rules == actual_rules) \ + else results.append(False) + + # You should no longer visit this device port again + visited.add(dev_port_key) + + LOGGER.info("Deleted {}/{} L2 rules from device {} and port {}".format( + applied_rules, actual_rules, device_name, port_id)) + + return results + + @metered_subclass_method(METRICS_POOL) + def SetConstraint(self, constraints: List[Tuple[str, Any]]) \ + -> List[Union[bool, Exception]]: + """ Create/Update service constraints. + Parameters: + constraints: List[Tuple[str, Any]] + List of tuples, each containing a constraint_type and the + new constraint_value to be set. + Returns: + results: List[Union[bool, Exception]] + List of results for constraint changes requested. + Return values must be in the same order as the requested + constraints. If a constraint is properly set, True must be + returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('constraints', constraints, list) + if len(constraints) == 0: return [] + + msg = '[SetConstraint] Method not implemented. Constraints({:s}) are being ignored.' + LOGGER.warning(msg.format(str(constraints))) + return [True for _ in range(len(constraints))] + + @metered_subclass_method(METRICS_POOL) + def DeleteConstraint(self, constraints: List[Tuple[str, Any]]) \ + -> List[Union[bool, Exception]]: + """ Delete service constraints. + Parameters: + constraints: List[Tuple[str, Any]] + List of tuples, each containing a constraint_type pointing + to the constraint to be deleted, and a constraint_value + containing possible additionally required values to locate + the constraint to be removed. + Returns: + results: List[Union[bool, Exception]] + List of results for constraint deletions requested. + Return values must be in the same order as the requested + constraints. If a constraint is properly deleted, True must + be returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('constraints', constraints, list) + if len(constraints) == 0: return [] + + msg = '[DeleteConstraint] Method not implemented. Constraints({:s}) are being ignored.' + LOGGER.warning(msg.format(str(constraints))) + return [True for _ in range(len(constraints))] + + @metered_subclass_method(METRICS_POOL) + def SetConfig(self, resources: List[Tuple[str, Any]]) \ + -> List[Union[bool, Exception]]: + """ Create/Update configuration for a list of service resources. + Parameters: + resources: List[Tuple[str, Any]] + List of tuples, each containing a resource_key pointing to + the resource to be modified, and a resource_value + containing the new value to be set. + Returns: + results: List[Union[bool, Exception]] + List of results for resource key changes requested. + Return values must be in the same order as the requested + resource keys. If a resource is properly set, True must be + returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('resources', resources, list) + if len(resources) == 0: return [] + + msg = '[SetConfig] Method not implemented. Resources({:s}) are being ignored.' + LOGGER.warning(msg.format(str(resources))) + return [True for _ in range(len(resources))] + + @metered_subclass_method(METRICS_POOL) + def DeleteConfig(self, resources: List[Tuple[str, Any]]) \ + -> List[Union[bool, Exception]]: + """ Delete configuration for a list of service resources. + Parameters: + resources: List[Tuple[str, Any]] + List of tuples, each containing a resource_key pointing to + the resource to be modified, and a resource_value containing + possible additionally required values to locate the value + to be removed. + Returns: + results: List[Union[bool, Exception]] + List of results for resource key deletions requested. + Return values must be in the same order as the requested + resource keys. If a resource is properly deleted, True must + be returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('resources', resources, list) + if len(resources) == 0: return [] + + msg = '[SetConfig] Method not implemented. Resources({:s}) are being ignored.' + LOGGER.warning(msg.format(str(resources))) + return [True for _ in range(len(resources))] + + def _init_settings(self): + self.__switch_info = {} + self.__port_map = {} + + try: + self.__settings = self.__settings_handler.get('/settings') + LOGGER.info("{} with settings: {}".format(self.__service_label, self.__settings)) + except Exception as ex: + LOGGER.error("Failed to retrieve service settings: {}".format(ex)) + raise Exception(ex) + + def _parse_settings(self): + try: + self.__switch_info = self.__settings.value[SWITCH_INFO] + except Exception as ex: + LOGGER.error("Failed to parse service settings: {}".format(ex)) + raise Exception(ex) + assert isinstance(self.__switch_info, dict), "Switch info object must be a map with switch names as keys" + + for switch_name, switch_info in self.__switch_info.items(): + assert switch_name, "Invalid P4 switch name" + assert isinstance(switch_info, dict), "Switch {} info must be a map with arch, dpid, and fwd_list items)" + assert switch_info[ARCH] in SUPPORTED_TARGET_ARCH_LIST, \ + "Switch {} - Supported P4 architectures are: {}".format(switch_name, ','.join(SUPPORTED_TARGET_ARCH_LIST)) + switch_dpid = switch_info[DPID] + assert switch_dpid > 0, "Switch {} - P4 switch dataplane ID must be a positive integer".format(switch_name, switch_info[DPID]) + + # Port list + port_list = switch_info[PORT_LIST] + assert isinstance(port_list, list), "Switch {} port list must be a list with port_id, port_type, and vlan_id items)" + for port in port_list: + port_id = port[PORT_ID] + assert port_id >= 0, "Switch {} - Invalid P4 switch port ID".format(switch_name) + port_type = port[PORT_TYPE] + assert port_type in PORT_TYPES_STR_VALID, "Switch {} - Valid P4 switch port types are: {}".format( + switch_name, ','.join(PORT_TYPES_STR_VALID)) + vlan_id = port[VLAN_ID] + assert chk_vlan_id(vlan_id), "Switch {} - Invalid VLAN ID for port {}".format(switch_name, port_id) + + if switch_name not in self.__port_map: + self.__port_map[switch_name] = {} + port_key = PORT_PREFIX + str(port_id) + if port_key not in self.__port_map[switch_name]: + self.__port_map[switch_name][port_key] = {} + self.__port_map[switch_name][port_key][PORT_ID] = port_id + self.__port_map[switch_name][port_key][PORT_TYPE] = port_type + self.__port_map[switch_name][port_key][VLAN_ID] = vlan_id + self.__port_map[switch_name][port_key][FORWARDING_LIST] = [] + + # Forwarding list + fwd_list = switch_info[FORWARDING_LIST] + assert isinstance(fwd_list, list), "Switch {} forwarding list be a list)" + for fwd_entry in fwd_list: + port_id = fwd_entry[PORT_ID] + assert port_id >= 0, "Invalid port ID: {}".format(port_id) + host_mac = fwd_entry[HOST_MAC] + assert chk_address_mac(host_mac), "Invalid host MAC address {}".format(host_mac) + + # Retrieve entry from the port map + switch_port_entry = self._get_switch_port_in_port_map(switch_name, port_id) + + host_facing_port = self._is_host_facing_port(switch_name, port_id) + LOGGER.info("Switch {} - Port {}: Is host facing: {}".format(switch_name, port_id, "True" if host_facing_port else "False")) + switch_port_entry[FORWARDING_LIST].append(host_mac) + + def _print_settings(self): + LOGGER.info("--------------- {} settings ---------------".format(self.__service.name)) + LOGGER.info("--- Topology info") + for switch_name, switch_info in self.__switch_info.items(): + LOGGER.info("\t Device {}".format(switch_name)) + LOGGER.info("\t\t| Target P4 architecture: {}".format(switch_info[ARCH])) + LOGGER.info("\t\t| Data plane ID: {}".format(switch_info[DPID])) + LOGGER.info("\t\t| Port map: {}".format(self.__port_map[switch_name])) + LOGGER.info("-------------------------------------------------------") + + def _get_switch_port_in_port_map(self, switch_name : str, port_id : int) -> Dict: + assert switch_name, "A valid switch name must be used as a key to the port map" + assert port_id > 0, "A valid switch port ID must be used as a key to a switch's port map" + switch_entry = self.__port_map[switch_name] + assert switch_entry, "Switch {} does not exist in the port map".format(switch_name) + port_key = PORT_PREFIX + str(port_id) + assert switch_entry[port_key], "Port with ID {} does not exist in the switch map".format(port_id) + + return switch_entry[port_key] + + def _get_port_type_of_switch_port(self, switch_name : str, port_id : int) -> str: + switch_port_entry = self._get_switch_port_in_port_map(switch_name, port_id) + return switch_port_entry[PORT_TYPE] + + def _get_vlan_id_of_switch_port(self, switch_name : str, port_id : int) -> int: + switch_port_entry = self._get_switch_port_in_port_map(switch_name, port_id) + return switch_port_entry[VLAN_ID] + + def _get_fwd_list_of_switch_port(self, switch_name : str, port_id : int) -> List [Tuple]: + switch_port_entry = self._get_switch_port_in_port_map(switch_name, port_id) + return switch_port_entry[FORWARDING_LIST] + + def _is_host_facing_port(self, switch_name : str, port_id : int) -> bool: + return self._get_port_type_of_switch_port(switch_name, port_id) == PORT_TYPE_HOST + + def _create_rules(self, device_obj : Device, port_id : int, action : ConfigActionEnum): # type: ignore + dev_name = device_obj.name + + host_facing_port = self._is_host_facing_port(dev_name, port_id) + LOGGER.info("\t | Service endpoint is host facing: {}".format("True" if host_facing_port else "False")) + + rules = [] + + try: + ### Port setup rules + if host_facing_port: + rules += rules_set_up_port_host( + port=port_id, + vlan_id=self._get_vlan_id_of_switch_port(switch_name=dev_name, port_id=port_id), + action=action + ) + else: + rules += rules_set_up_port_switch( + port=port_id, + vlan_id=self._get_vlan_id_of_switch_port(switch_name=dev_name, port_id=port_id), + action=action + ) + except Exception as ex: + LOGGER.error("Error while creating port setup rules") + raise Exception(ex) + + fwd_list = self._get_fwd_list_of_switch_port(switch_name=dev_name, port_id=port_id) + for mac in fwd_list: + LOGGER.info("Switch {} - Port {} - Creating rule for host MAC: {}".format(dev_name, port_id, mac)) + try: + ### Bridging rules + rules += rules_set_up_fwd_bridging( + vlan_id=self._get_vlan_id_of_switch_port(switch_name=dev_name, port_id=port_id), + eth_dst=mac, + egress_port=port_id, + action=action + ) + except Exception as ex: + LOGGER.error("Error while creating bridging rules") + raise Exception(ex) + + try: + ### Next output rule + rules += rules_set_up_next_output_simple( + egress_port=port_id, + action=action + ) + except Exception as ex: + LOGGER.error("Error while creating next output L2 rules") + raise Exception(ex) + + return rules diff --git a/src/service/service/service_handlers/p4_fabric_tna_l3/__init__.py b/src/service/service/service_handlers/p4_fabric_tna_l3/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..023830645e0fcb60e3f8583674a954810af222f2 --- /dev/null +++ b/src/service/service/service_handlers/p4_fabric_tna_l3/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/service/service/service_handlers/p4_fabric_tna_l3/p4_fabric_tna_l3_service_handler.py b/src/service/service/service_handlers/p4_fabric_tna_l3/p4_fabric_tna_l3_service_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..4b013328ee30425a25d7bd6171d938818cb413dc --- /dev/null +++ b/src/service/service/service_handlers/p4_fabric_tna_l3/p4_fabric_tna_l3_service_handler.py @@ -0,0 +1,435 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Service handler for P4-based static L3 routing using the SD-Fabric P4 dataplane +for BMv2 and Intel Tofino switches. +""" + +import logging +from typing import Any, List, Dict, Optional, Tuple, Union +from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method +from common.proto.context_pb2 import ConfigActionEnum, DeviceId, Service, Device +from common.tools.object_factory.Device import json_device_id +from common.type_checkers.Checkers import chk_type, chk_address_mac, chk_address_ipv4, chk_prefix_len_ipv4 +from service.service.service_handler_api._ServiceHandler import _ServiceHandler +from service.service.service_handler_api.SettingsHandler import SettingsHandler +from service.service.service_handlers.p4_fabric_tna_commons.p4_fabric_tna_commons import * +from service.service.task_scheduler.TaskExecutor import TaskExecutor + +LOGGER = logging.getLogger(__name__) + +METRICS_POOL = MetricsPool('Service', 'Handler', labels={'handler': 'p4_fabric_tna_l3'}) + +class P4FabricL3ServiceHandler(_ServiceHandler): + def __init__( # pylint: disable=super-init-not-called + self, service : Service, task_executor : TaskExecutor, **settings # type: ignore + ) -> None: + """ Initialize Driver. + Parameters: + service + The service instance (gRPC message) to be managed. + task_executor + An instance of Task Executor providing access to the + service handlers factory, the context and device clients, + and an internal cache of already-loaded gRPC entities. + **settings + Extra settings required by the service handler. + + """ + self.__service_label = "P4 static L3 connectivity service" + self.__service = service + self.__task_executor = task_executor + self.__settings_handler = SettingsHandler(self.__service.service_config, **settings) + + self._init_settings() + self._parse_settings() + self._print_settings() + + @metered_subclass_method(METRICS_POOL) + def SetEndpoint( + self, endpoints : List[Tuple[str, str, Optional[str]]], + connection_uuid : Optional[str] = None + ) -> List[Union[bool, Exception]]: + """ Create/Update service endpoints from a list. + Parameters: + endpoints: List[Tuple[str, str, Optional[str]]] + List of tuples, each containing a device_uuid, + endpoint_uuid and, optionally, the topology_uuid + of the endpoint to be added. + connection_uuid : Optional[str] + If specified, is the UUID of the connection this endpoint is associated to. + Returns: + results: List[Union[bool, Exception]] + List of results for endpoint changes requested. + Return values must be in the same order as the requested + endpoints. If an endpoint is properly added, True must be + returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('endpoints', endpoints, list) + if len(endpoints) == 0: return [] + + LOGGER.info("{} - Provision service configuration".format( + self.__service_label)) + + visited = set() + results = [] + for endpoint in endpoints: + device_uuid, endpoint_uuid = endpoint[0:2] + device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) + device_name = device.name + + LOGGER.info("Device {}".format(device_name)) + LOGGER.info("\t | Service endpoint UUID: {}".format(endpoint_uuid)) + + port_id = find_port_id_in_endpoint_list(device.device_endpoints, endpoint_uuid) + LOGGER.info("\t | Service port ID: {}".format(port_id)) + + dev_port_key = device_name + "-" + PORT_PREFIX + str(port_id) + + # Skip already visited device ports + if dev_port_key in visited: + continue + + rules = [] + actual_rules = -1 + applied_rules, failed_rules = 0, -1 + + # Create and apply rules + try: + rules = self._create_rules( + device_obj=device, port_id=port_id, action=ConfigActionEnum.CONFIGACTION_SET) + actual_rules = len(rules) + applied_rules, failed_rules = apply_rules( + task_executor=self.__task_executor, + device_obj=device, + json_config_rules=rules + ) + except Exception as ex: + LOGGER.error("Failed to insert L3 rules on device {} due to {}".format(device.name, ex)) + finally: + rules.clear() + + # Ensure correct status + results.append(True) if (failed_rules == 0) and (applied_rules == actual_rules) \ + else results.append(False) + + # You should no longer visit this device port again + visited.add(dev_port_key) + + LOGGER.info("Installed {}/{} L3 rules on device {} and port {}".format( + applied_rules, actual_rules, device_name, port_id)) + + return results + + @metered_subclass_method(METRICS_POOL) + def DeleteEndpoint( + self, endpoints : List[Tuple[str, str, Optional[str]]], + connection_uuid : Optional[str] = None + ) -> List[Union[bool, Exception]]: + """ Delete service endpoints from a list. + Parameters: + endpoints: List[Tuple[str, str, Optional[str]]] + List of tuples, each containing a device_uuid, + endpoint_uuid, and the topology_uuid of the endpoint + to be removed. + connection_uuid : Optional[str] + If specified, is the UUID of the connection this endpoint is associated to. + Returns: + results: List[Union[bool, Exception]] + List of results for endpoint deletions requested. + Return values must be in the same order as the requested + endpoints. If an endpoint is properly deleted, True must be + returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('endpoints', endpoints, list) + if len(endpoints) == 0: return [] + + LOGGER.info("{} - Deprovision service configuration".format( + self.__service_label)) + + visited = set() + results = [] + for endpoint in endpoints: + device_uuid, endpoint_uuid = endpoint[0:2] + device = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) + device_name = device.name + + LOGGER.info("Device {}".format(device_name)) + LOGGER.info("\t | Service endpoint UUID: {}".format(endpoint_uuid)) + + port_id = find_port_id_in_endpoint_list(device.device_endpoints, endpoint_uuid) + LOGGER.info("\t | Service port ID: {}".format(port_id)) + + dev_port_key = device_name + "-" + PORT_PREFIX + str(port_id) + + # Skip already visited device ports + if dev_port_key in visited: + continue + + rules = [] + actual_rules = -1 + applied_rules, failed_rules = 0, -1 + + # Create and apply rules + try: + rules = self._create_rules( + device_obj=device, port_id=port_id, action=ConfigActionEnum.CONFIGACTION_DELETE) + actual_rules = len(rules) + applied_rules, failed_rules = apply_rules( + task_executor=self.__task_executor, + device_obj=device, + json_config_rules=rules + ) + except Exception as ex: + LOGGER.error("Failed to insert L3 rules on device {} due to {}".format(device.name, ex)) + finally: + rules.clear() + + # Ensure correct status + results.append(True) if (failed_rules == 0) and (applied_rules == actual_rules) \ + else results.append(False) + + # You should no longer visit this device port again + visited.add(dev_port_key) + + LOGGER.info("Deleted {}/{} L3 rules from device {} and port {}".format( + applied_rules, actual_rules, device_name, port_id)) + + return results + + @metered_subclass_method(METRICS_POOL) + def SetConstraint(self, constraints: List[Tuple[str, Any]]) \ + -> List[Union[bool, Exception]]: + """ Create/Update service constraints. + Parameters: + constraints: List[Tuple[str, Any]] + List of tuples, each containing a constraint_type and the + new constraint_value to be set. + Returns: + results: List[Union[bool, Exception]] + List of results for constraint changes requested. + Return values must be in the same order as the requested + constraints. If a constraint is properly set, True must be + returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('constraints', constraints, list) + if len(constraints) == 0: return [] + + msg = '[SetConstraint] Method not implemented. Constraints({:s}) are being ignored.' + LOGGER.warning(msg.format(str(constraints))) + return [True for _ in range(len(constraints))] + + @metered_subclass_method(METRICS_POOL) + def DeleteConstraint(self, constraints: List[Tuple[str, Any]]) \ + -> List[Union[bool, Exception]]: + """ Delete service constraints. + Parameters: + constraints: List[Tuple[str, Any]] + List of tuples, each containing a constraint_type pointing + to the constraint to be deleted, and a constraint_value + containing possible additionally required values to locate + the constraint to be removed. + Returns: + results: List[Union[bool, Exception]] + List of results for constraint deletions requested. + Return values must be in the same order as the requested + constraints. If a constraint is properly deleted, True must + be returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('constraints', constraints, list) + if len(constraints) == 0: return [] + + msg = '[DeleteConstraint] Method not implemented. Constraints({:s}) are being ignored.' + LOGGER.warning(msg.format(str(constraints))) + return [True for _ in range(len(constraints))] + + @metered_subclass_method(METRICS_POOL) + def SetConfig(self, resources: List[Tuple[str, Any]]) \ + -> List[Union[bool, Exception]]: + """ Create/Update configuration for a list of service resources. + Parameters: + resources: List[Tuple[str, Any]] + List of tuples, each containing a resource_key pointing to + the resource to be modified, and a resource_value + containing the new value to be set. + Returns: + results: List[Union[bool, Exception]] + List of results for resource key changes requested. + Return values must be in the same order as the requested + resource keys. If a resource is properly set, True must be + returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('resources', resources, list) + if len(resources) == 0: return [] + + msg = '[SetConfig] Method not implemented. Resources({:s}) are being ignored.' + LOGGER.warning(msg.format(str(resources))) + return [True for _ in range(len(resources))] + + @metered_subclass_method(METRICS_POOL) + def DeleteConfig(self, resources: List[Tuple[str, Any]]) \ + -> List[Union[bool, Exception]]: + """ Delete configuration for a list of service resources. + Parameters: + resources: List[Tuple[str, Any]] + List of tuples, each containing a resource_key pointing to + the resource to be modified, and a resource_value containing + possible additionally required values to locate the value + to be removed. + Returns: + results: List[Union[bool, Exception]] + List of results for resource key deletions requested. + Return values must be in the same order as the requested + resource keys. If a resource is properly deleted, True must + be returned; otherwise, the Exception that is raised during + the processing must be returned. + """ + chk_type('resources', resources, list) + if len(resources) == 0: return [] + + msg = '[SetConfig] Method not implemented. Resources({:s}) are being ignored.' + LOGGER.warning(msg.format(str(resources))) + return [True for _ in range(len(resources))] + + def _init_settings(self): + self.__switch_info = {} + self.__port_map = {} + + try: + self.__settings = self.__settings_handler.get('/settings') + LOGGER.info("{} with settings: {}".format(self.__service_label, self.__settings)) + except Exception as ex: + LOGGER.error("Failed to retrieve service settings: {}".format(ex)) + raise Exception(ex) + + def _parse_settings(self): + try: + self.__switch_info = self.__settings.value[SWITCH_INFO] + except Exception as ex: + LOGGER.error("Failed to parse service settings: {}".format(ex)) + raise Exception(ex) + assert isinstance(self.__switch_info, dict), "Switch info object must be a map with switch names as keys" + + for switch_name, switch_info in self.__switch_info.items(): + assert switch_name, "Invalid P4 switch name" + assert isinstance(switch_info, dict), "Switch {} info must be a map with arch, dpid, and fwd_list items)" + assert switch_info[ARCH] in SUPPORTED_TARGET_ARCH_LIST, \ + "Switch {} - Supported P4 architectures are: {}".format(switch_name, ','.join(SUPPORTED_TARGET_ARCH_LIST)) + switch_dpid = switch_info[DPID] + assert switch_dpid > 0, "Switch {} - P4 switch dataplane ID must be a positive integer".format(switch_name, switch_info[DPID]) + + # Port list + port_list = switch_info[PORT_LIST] + assert isinstance(port_list, list), "Switch {} port list must be a list with port_id and port_type items)" + for port in port_list: + port_id = port[PORT_ID] + assert port_id >= 0, "Switch {} - Invalid P4 switch port ID".format(switch_name) + port_type = port[PORT_TYPE] + assert port_type in PORT_TYPES_STR_VALID, "Switch {} - Valid P4 switch port types are: {}".format( + switch_name, ','.join(PORT_TYPES_STR_VALID)) + + if switch_name not in self.__port_map: + self.__port_map[switch_name] = {} + port_key = PORT_PREFIX + str(port_id) + if port_key not in self.__port_map[switch_name]: + self.__port_map[switch_name][port_key] = {} + self.__port_map[switch_name][port_key][PORT_ID] = port_id + self.__port_map[switch_name][port_key][PORT_TYPE] = port_type + self.__port_map[switch_name][port_key][ROUTING_LIST] = [] + + # Routing list + routing_list = switch_info[ROUTING_LIST] + assert isinstance(routing_list, list), "Switch {} routing list be a list)" + for rt_entry in routing_list: + port_id = rt_entry[PORT_ID] + assert port_id >= 0, "Invalid port ID: {}".format(port_id) + ipv4_dst = rt_entry[IPV4_DST] + assert chk_address_ipv4(ipv4_dst), "Invalid destination IPv4 address {}".format(ipv4_dst) + ipv4_prefix_len = rt_entry[IPV4_PREFIX_LEN] + assert chk_prefix_len_ipv4(ipv4_prefix_len), "Invalid IPv4 address prefix length {}".format(ipv4_prefix_len) + mac_src = rt_entry[MAC_SRC] + assert chk_address_mac(mac_src), "Invalid source MAC address {}".format(mac_src) + mac_dst = rt_entry[MAC_DST] + assert chk_address_mac(mac_dst), "Invalid destination MAC address {}".format(mac_dst) + + # Retrieve entry from the port map + switch_port_entry = self._get_switch_port_in_port_map(switch_name, port_id) + + # Add routing entry + switch_port_entry[ROUTING_LIST].append( + { + PORT_ID: port_id, + IPV4_DST: ipv4_dst, + IPV4_PREFIX_LEN: ipv4_prefix_len, + MAC_SRC: mac_src, + MAC_DST: mac_dst + } + ) + + def _print_settings(self): + LOGGER.info("--------------- {} settings ---------------".format(self.__service.name)) + LOGGER.info("--- Topology info") + for switch_name, switch_info in self.__switch_info.items(): + LOGGER.info("\t Device {}".format(switch_name)) + LOGGER.info("\t\t| Target P4 architecture: {}".format(switch_info[ARCH])) + LOGGER.info("\t\t| Data plane ID: {}".format(switch_info[DPID])) + LOGGER.info("\t\t| Port map: {}".format(self.__port_map[switch_name])) + LOGGER.info("-------------------------------------------------------") + + def _get_switch_port_in_port_map(self, switch_name : str, port_id : int) -> Dict: + assert switch_name, "A valid switch name must be used as a key to the port map" + assert port_id > 0, "A valid switch port ID must be used as a key to a switch's port map" + switch_entry = self.__port_map[switch_name] + assert switch_entry, "Switch {} does not exist in the port map".format(switch_name) + port_key = PORT_PREFIX + str(port_id) + assert switch_entry[port_key], "Port with ID {} does not exist in the switch map".format(port_id) + + return switch_entry[port_key] + + def _get_routing_list_of_switch_port(self, switch_name : str, port_id : int) -> List [Tuple]: + switch_port_entry = self._get_switch_port_in_port_map(switch_name, port_id) + return switch_port_entry[ROUTING_LIST] + + def _create_rules(self, device_obj : Device, port_id : int, action : ConfigActionEnum): # type: ignore + dev_name = device_obj.name + + rules = [] + + ### Static routing rules + routing_list = self._get_routing_list_of_switch_port(switch_name=dev_name, port_id=port_id) + for rt_entry in routing_list: + try: + rules += rules_set_up_next_routing_simple( + egress_port=port_id, + eth_src=rt_entry[MAC_SRC], + eth_dst=rt_entry[MAC_DST], + action=action + ) + rules += rules_set_up_routing( + ipv4_dst=rt_entry[IPV4_DST], + ipv4_prefix_len=rt_entry[IPV4_PREFIX_LEN], + egress_port=port_id, + action=action + ) + except Exception as ex: + LOGGER.error("Error while creating static L3 routing rules") + raise Exception(ex) + + return rules diff --git a/src/service/service/task_scheduler/TaskExecutor.py b/src/service/service/task_scheduler/TaskExecutor.py index 51fc42a5a5ea06d5abaa49946a154d0c38f6de8b..8f797882f86f4d53c61da22964909ab1a808ba53 100644 --- a/src/service/service/task_scheduler/TaskExecutor.py +++ b/src/service/service/task_scheduler/TaskExecutor.py @@ -203,7 +203,7 @@ class TaskExecutor: self, service_id : ServiceId, config_key : str, config_value : str ): service_configRule = ServiceConfigRule() - service_configRule.service_id.CopyFrom( service_id) + service_configRule.service_id.CopyFrom(service_id) service_configRule.configrule_custom.resource_key = config_key service_configRule.configrule_custom.resource_value = config_value try: diff --git a/src/tests/p4-fabric-tna/README.md b/src/tests/p4-fabric-tna/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e8ae8fde918b6dd65fc1eb9a078753ea442f22a6 --- /dev/null +++ b/src/tests/p4-fabric-tna/README.md @@ -0,0 +1,206 @@ +# Tests for P4 routing, ACL, and In-Band Network Telemetry functions + +This directory contains the necessary scripts and configurations to run tests atop a Stratum-based P4 whitebox that performs a set of network functions, including forwarding (L2), routing (L3), L3/L4 access control list (ACL), and In-Band Network Telemetry (INT). +The P4 data plane is based on ONF's SD-Fabric implementation, titled [fabric-tna](https://github.com/stratum/fabric-tna) + +## Prerequisites + +You need Python3, which should already be installed while preparing for a TFS build. +Additionally, `pytest` is also mandatory as it is used by our tests below. +Aliasing python with python3 will also help bridging issues between older and newer python versions. + +```shell +alias python='python3' +pip3 install pytest +pip3 install grpclib protobuf +pip3 install grpcio-tools +``` + +The versions used for this test are: + +- `protobuf` 3.20.3 +- `grpclib` 0.4.4 +- `grpcio` 1.47.5 +- `grpcio-tools` 1.47.5 + +After the installation of `grpclib`, protoc-gen-grpclib_python binary is in /home/$USER/.local/bin/ +First we copy it to /usr/local/bin/: + +```shell + sudo cp /home/$USER/.local/bin/protoc-gen-grpclib_python /usr/local/bin/ +``` + +Then, we include this path to the PYTHONPATH: + +```shell +export PYTHONPATH="${PYTHONPATH}:/usr/local/bin/protoc-gen-grpclib_python" +``` + +You need to build and deploy a software-based Stratum switch, before being able to use TFS to control it. +To do so, follow the instructions in the `./topology` folder. + +## Steps to setup and run a TFS program atop Stratum + +To conduct this test, follow the steps below. + +### Deploy TFS + +```shell +cd ~/tfs-ctrl/ +source my_deploy.sh && source tfs_runtime_env_vars.sh +./deploy/all.sh +``` + +### Path setup + +Ensure that `PATH` variable contains the parent project directory, e.g., "home/$USER/tfs-ctrl". + +Ensure that `PYTHONPATH` variable contains the source code directory of TFS, e.g., "home/$USER/tfs-ctrl/src" + +## Topology setup + +In the `./topology/` directory there are scripts that allow to build Stratum on a target machine (e.g., a VM) and then deploy a P4 switch atop this machine. +This test assumes a Stratum P4 switch with 2 network interfaces used as a data plane (routing traffic from one to another) as well as another network interface used to send telemetry information to an external telemetry collector. + +## P4 artifacts + +In the `./p4src/` directory there are compiled P4 artifacts of the pipeline that will be pushed to the P4 switch, along with the P4-runtime definitions. +The `./setup.sh` script copies from this directory. If you need to change the P4 program, make sure to put the compiled artifacts there. + +## Tests + +A set of tests is implemented, each focusing on different aspects of TFS. +For each of these tests, an auxiliary bash script allows to run it with less typing. + +| Bash Runner | Purpose | +| --------------------------------------------- | --------------------------------------------------------------------------- | +| setup.sh | Copy P4 artifacts into the SBI pod | +| run_test_01_bootstrap.sh | Connect TFS to the P4 switch | +| run_test_02a_sbi_provision_int_l2_l3_acl.sh | Install L2, L3, INT, and ACL rules on the P4 switch via the SBI service | +| run_test_02b_sbi_deprovision_int_l2_l3_acl.sh | Uninstall L2, L3, INT, and ACL rules from the P4 switch via the SBI service | +| run_test_03a_service_provision_l2.sh | Install L2 forwarding rules via a dedicated P4 L2 service handler | +| run_test_03b_service_deprovision_l2.sh | Uninstall L2 forwarding rules via a dedicated P4 L2 service handler | +| run_test_04a_service_provision_l3.sh | Install L3 routing rules via a dedicated P4 L3 service handler | +| run_test_04b_service_deprovision_l3.sh | Uninstall L3 routing rules via a dedicated P4 L3 service handler | +| run_test_05a_service_provision_acl.sh | Install ACL rules via a dedicated P4 ACL service handler | +| run_test_05b_service_deprovision_acl.sh | Uninstall ACL rules via a dedicated P4 ACL service handler | +| run_test_06a_service_provision_int.sh | Install INT rules via a dedicated P4 INT service handler | +| run_test_06b_service_deprovision_int.sh | Uninstall INT rules via a dedicated P4 INT service handler | +| run_test_07_cleanup.sh | Clean-up context and topology and disconnect TFS from the P4 switch | + +Each of the tests above is described in detail below. + +### Step 0: Copy the necessary P4 artifacts into the TFS SBI service pod + +The setup script copies the necessary artifacts to the SBI service pod. +It should be run just once, after a fresh install of TFS. +If you `deploy/all.sh` again, you need to repeat this step. + +```shell +cd ~/tfs-ctrl/ +source my_deploy.sh && source tfs_runtime_env_vars.sh +bash src/tests/p4-fabric-tna/setup.sh +``` + +### Step 1: Bootstrap topology + +The bootstrap script registers the context, topology, links, and devices to TFS. + +```shell +cd ~/tfs-ctrl/ +bash src/tests/p4-fabric-tna/run_test_01_bootstrap.sh +``` + +### Step 2: Manage L2, L3, ACL, and INT via the SBI API (rules) + +Implement forwarding, routing, ACL, and INT network functions by installing P4 rules to the Stratum switch via the TFS SBI API. +In this test, these rules are installed in batches, as the switch cannot digest all these rules at once. + +```shell +cd ~/tfs-ctrl/ +bash src/tests/p4-fabric-tna/run_test_02a_sbi_provision_int_l2_l3_acl.sh +``` + +Deprovision forwarding, routing, ACL, and INT network functions by removing the previously installed P4 rules (via the TFS SBI API) from the Stratum switch. + +```shell +cd ~/tfs-ctrl/ +bash src/tests/p4-fabric-tna/run_test_02b_sbi_deprovision_int_l2_l3_acl.sh +``` + +### Step 3: Manage L2, L3, ACL, and INT via the Service API + +To avoid interacting with the switch using low-level P4 rules (via the SBI), we created modular network services, which allow users to easily provision L2, L3, ACL, and INT network functions. +These services require users to define the service endpoints as well as some high-level service configuration, while leaving the rest of complexity to tailored service handlers that interact with the SBI on behalf of the user. + +#### Provision L2 network service via the Service API + +```shell +cd ~/tfs-ctrl/ +bash src/tests/p4-fabric-tna/run_test_03a_service_provision_l2.sh +``` + +#### Deprovision L2 network service via the Service API + +```shell +cd ~/tfs-ctrl/ +bash src/tests/p4-fabric-tna/run_test_03b_service_deprovision_l2.sh +``` + +#### Provision L3 network service via the Service API + +```shell +cd ~/tfs-ctrl/ +bash src/tests/p4-fabric-tna/run_test_04a_service_provision_l3.sh +``` + +#### Deprovision L3 network service via the Service API + +```shell +cd ~/tfs-ctrl/ +bash src/tests/p4-fabric-tna/run_test_04b_service_deprovision_l3.sh +``` + +#### Provision ACL network service via the Service API + +```shell +cd ~/tfs-ctrl/ +bash src/tests/p4-fabric-tna/run_test_05a_service_provision_acl.sh +``` + +#### Deprovision ACL network service via the Service API + +```shell +cd ~/tfs-ctrl/ +bash src/tests/p4-fabric-tna/run_test_05b_service_deprovision_acl.sh +``` + +#### Provision INT service via the Service API + +```shell +cd ~/tfs-ctrl/ +bash src/tests/p4-fabric-tna/run_test_06a_service_provision_int.sh +``` + +#### Deprovision INT service via the Service API + +```shell +cd ~/tfs-ctrl/ +bash src/tests/p4-fabric-tna/run_test_06b_service_deprovision_int.sh +``` + +### Step 4: Deprovision topology + +Delete all the objects (context, topology, links, devices) from TFS: + +```shell +cd ~/tfs-ctrl/ +bash src/tests/p4-fabric-tna/run_test_07_cleanup.sh +``` + +Alternatively, a purge test is implemented; this test removes services, links, devices, topology, and context in this order. + +```shell +cd ~/tfs-ctrl/ +bash src/tests/p4-fabric-tna/run_test_08_purge.sh +``` diff --git a/src/tests/p4-fabric-tna/__init__.py b/src/tests/p4-fabric-tna/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..023830645e0fcb60e3f8583674a954810af222f2 --- /dev/null +++ b/src/tests/p4-fabric-tna/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/tests/p4-int-routing-acl/descriptors/rules-insert-acl.json b/src/tests/p4-fabric-tna/descriptors/sbi-rules-insert-acl.json similarity index 100% rename from src/tests/p4-int-routing-acl/descriptors/rules-insert-acl.json rename to src/tests/p4-fabric-tna/descriptors/sbi-rules-insert-acl.json diff --git a/src/tests/p4-int-routing-acl/descriptors/rules-insert-int-b1.json b/src/tests/p4-fabric-tna/descriptors/sbi-rules-insert-int-b1.json similarity index 100% rename from src/tests/p4-int-routing-acl/descriptors/rules-insert-int-b1.json rename to src/tests/p4-fabric-tna/descriptors/sbi-rules-insert-int-b1.json diff --git a/src/tests/p4-int-routing-acl/descriptors/rules-insert-int-b2.json b/src/tests/p4-fabric-tna/descriptors/sbi-rules-insert-int-b2.json similarity index 100% rename from src/tests/p4-int-routing-acl/descriptors/rules-insert-int-b2.json rename to src/tests/p4-fabric-tna/descriptors/sbi-rules-insert-int-b2.json diff --git a/src/tests/p4-int-routing-acl/descriptors/rules-insert-int-b3.json b/src/tests/p4-fabric-tna/descriptors/sbi-rules-insert-int-b3.json similarity index 100% rename from src/tests/p4-int-routing-acl/descriptors/rules-insert-int-b3.json rename to src/tests/p4-fabric-tna/descriptors/sbi-rules-insert-int-b3.json diff --git a/src/tests/p4-int-routing-acl/descriptors/rules-insert-routing-corp.json b/src/tests/p4-fabric-tna/descriptors/sbi-rules-insert-routing-corp.json similarity index 100% rename from src/tests/p4-int-routing-acl/descriptors/rules-insert-routing-corp.json rename to src/tests/p4-fabric-tna/descriptors/sbi-rules-insert-routing-corp.json diff --git a/src/tests/p4-int-routing-acl/descriptors/rules-insert-routing-edge.json b/src/tests/p4-fabric-tna/descriptors/sbi-rules-insert-routing-edge.json similarity index 100% rename from src/tests/p4-int-routing-acl/descriptors/rules-insert-routing-edge.json rename to src/tests/p4-fabric-tna/descriptors/sbi-rules-insert-routing-edge.json diff --git a/src/tests/p4-int-routing-acl/descriptors/rules-remove.json b/src/tests/p4-fabric-tna/descriptors/sbi-rules-remove.json similarity index 100% rename from src/tests/p4-int-routing-acl/descriptors/rules-remove.json rename to src/tests/p4-fabric-tna/descriptors/sbi-rules-remove.json diff --git a/src/tests/p4-fabric-tna/descriptors/service-create-acl.json b/src/tests/p4-fabric-tna/descriptors/service-create-acl.json new file mode 100644 index 0000000000000000000000000000000000000000..2ec9c6674e5f571768894415d106fc9178be3ece --- /dev/null +++ b/src/tests/p4-fabric-tna/descriptors/service-create-acl.json @@ -0,0 +1,48 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, "service_uuid": {"uuid": "p4-service-acl"} + }, + "name": "p4-service-acl", + "service_type": "SERVICETYPE_ACL", + "service_status": {"service_status": "SERVICESTATUS_PLANNED"}, + "service_endpoint_ids": [ + { + "device_id": {"device_uuid": {"uuid": "sw1"}}, + "endpoint_uuid": {"uuid": "1"} + }, + { + "device_id": {"device_uuid": {"uuid": "sw1"}}, + "endpoint_uuid": {"uuid": "2"} + } + ], + "service_config": { + "config_rules": [ + { + "action": "CONFIGACTION_SET", + "custom": { + "resource_key": "/settings", + "resource_value": { + "switch_info": { + "sw1": { + "arch": "v1model", + "dpid": 1, + "acl": [ + { + "port_id": 1, + "trn_port_dst": 8080, + "action": "drop" + } + ] + } + } + } + } + } + ] + }, + "service_constraints": [] + } + ] +} diff --git a/src/tests/p4-fabric-tna/descriptors/service-create-int.json b/src/tests/p4-fabric-tna/descriptors/service-create-int.json new file mode 100644 index 0000000000000000000000000000000000000000..785468f6876b13e6186fb7f79ab82e0e0f479c60 --- /dev/null +++ b/src/tests/p4-fabric-tna/descriptors/service-create-int.json @@ -0,0 +1,53 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, "service_uuid": {"uuid": "p4-service-int"} + }, + "name": "p4-service-int", + "service_type": "SERVICETYPE_INT", + "service_status": {"service_status": "SERVICESTATUS_PLANNED"}, + "service_endpoint_ids": [ + { + "device_id": {"device_uuid": {"uuid": "sw1"}}, + "endpoint_uuid": {"uuid": "1"} + }, + { + "device_id": {"device_uuid": {"uuid": "sw1"}}, + "endpoint_uuid": {"uuid": "2"} + } + ], + "service_config": { + "config_rules": [ + { + "action": "CONFIGACTION_SET", + "custom": { + "resource_key": "/settings", + "resource_value": { + "switch_info": { + "sw1": { + "arch": "v1model", + "dpid": 1, + "mac": "ee:ee:8c:6c:f3:2c", + "ip": "192.168.5.139", + "int_port": { + "port_id": 3, + "port_type": "host" + } + } + }, + "int_collector_info": { + "mac": "46:e4:58:c6:74:53", + "ip": "192.168.5.137", + "port": 32766, + "vlan_id": 4094 + } + } + } + } + ] + }, + "service_constraints": [] + } + ] +} diff --git a/src/tests/p4-fabric-tna/descriptors/service-create-l2-simple.json b/src/tests/p4-fabric-tna/descriptors/service-create-l2-simple.json new file mode 100644 index 0000000000000000000000000000000000000000..05f53d7a651ca59f71e2b768a99932cf55490957 --- /dev/null +++ b/src/tests/p4-fabric-tna/descriptors/service-create-l2-simple.json @@ -0,0 +1,63 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, "service_uuid": {"uuid": "p4-service-l2"} + }, + "name": "p4-service-l2", + "service_type": "SERVICETYPE_L2NM", + "service_status": {"service_status": "SERVICESTATUS_PLANNED"}, + "service_endpoint_ids": [ + { + "device_id": {"device_uuid": {"uuid": "sw1"}}, + "endpoint_uuid": {"uuid": "1"} + }, + { + "device_id": {"device_uuid": {"uuid": "sw1"}}, + "endpoint_uuid": {"uuid": "2"} + } + ], + "service_config": { + "config_rules": [ + { + "action": "CONFIGACTION_SET", + "custom": { + "resource_key": "/settings", + "resource_value": { + "switch_info": { + "sw1": { + "arch": "v1model", + "dpid": 1, + "port_list": [ + { + "port_id": 1, + "port_type": "host", + "vlan_id": 4094 + }, + { + "port_id": 2, + "port_type": "host", + "vlan_id": 4094 + } + ], + "fwd_list": [ + { + "port_id": 1, + "host_mac": "00:00:00:00:00:01" + }, + { + "port_id": 2, + "host_mac": "00:00:00:00:00:02" + } + ] + } + } + } + } + } + ] + }, + "service_constraints": [] + } + ] +} diff --git a/src/tests/p4-fabric-tna/descriptors/service-create-l3.json b/src/tests/p4-fabric-tna/descriptors/service-create-l3.json new file mode 100644 index 0000000000000000000000000000000000000000..4a016f25502881f4f23a71da532aed8455ac77a7 --- /dev/null +++ b/src/tests/p4-fabric-tna/descriptors/service-create-l3.json @@ -0,0 +1,67 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, "service_uuid": {"uuid": "p4-service-l3"} + }, + "name": "p4-service-l3", + "service_type": "SERVICETYPE_L3NM", + "service_status": {"service_status": "SERVICESTATUS_PLANNED"}, + "service_endpoint_ids": [ + { + "device_id": {"device_uuid": {"uuid": "sw1"}}, + "endpoint_uuid": {"uuid": "1"} + }, + { + "device_id": {"device_uuid": {"uuid": "sw1"}}, + "endpoint_uuid": {"uuid": "2"} + } + ], + "service_config": { + "config_rules": [ + { + "action": "CONFIGACTION_SET", + "custom": { + "resource_key": "/settings", + "resource_value": { + "switch_info": { + "sw1": { + "arch": "v1model", + "dpid": 1, + "port_list": [ + { + "port_id": 1, + "port_type": "host" + }, + { + "port_id": 2, + "port_type": "host" + } + ], + "routing_list": [ + { + "port_id": 1, + "ipv4_dst": "10.0.0.1", + "ipv4_prefix_len": 32, + "mac_src": "00:00:00:00:00:02", + "mac_dst": "00:00:00:00:00:01" + }, + { + "port_id": 2, + "ipv4_dst": "10.0.0.2", + "ipv4_prefix_len": 32, + "mac_src": "00:00:00:00:00:01", + "mac_dst": "00:00:00:00:00:02" + } + ] + } + } + } + } + } + ] + }, + "service_constraints": [] + } + ] +} diff --git a/src/tests/p4-fabric-tna/descriptors/topology.json b/src/tests/p4-fabric-tna/descriptors/topology.json new file mode 100644 index 0000000000000000000000000000000000000000..908faaa7d07eecb04e6b301e4e2bb9a388e0e992 --- /dev/null +++ b/src/tests/p4-fabric-tna/descriptors/topology.json @@ -0,0 +1,114 @@ +{ + "contexts": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}} + ], + "topologies": [ + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}} + } + ], + "devices": [ + { + "device_id": {"device_uuid": {"uuid": "tfs-sdn-controller"}}, + "name": "tfs-sdn-controller", + "device_type": "teraflowsdn", + "device_drivers": ["DEVICEDRIVER_IETF_L2VPN"], + "device_operational_status": "DEVICEOPERATIONALSTATUS_UNDEFINED", + "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "192.168.5.137"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "8003"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": { + "endpoints": [{"uuid": "mgmt", "name": "mgmt", "type": "mgmt-int"}], + "scheme": "http", "username": "admin", "password": "admin", "import_topology": "topology" + }}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "edge-net"}}, + "device_type": "network", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], + "device_config": { + "config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { + "endpoints": [{"uuid": "eth1", "name": "eth1", "type": "copper"}] + }}} + ] + } + }, + { + "device_id": {"device_uuid": {"uuid": "corporate-net"}}, + "device_type": "network", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], + "device_config": { + "config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { + "endpoints": [{"uuid": "eth1", "name": "eth1", "type": "copper"}] + }}} + ] + } + }, + { + "device_id": {"device_uuid": {"uuid": "sw1"}}, + "device_type": "p4-switch", + "device_drivers": ["DEVICEDRIVER_P4"], + "device_operational_status": "DEVICEOPERATIONALSTATUS_DISABLED", + "name": "sw1", + "device_config": { + "config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "192.168.5.139"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "50001"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { + "id": 1, + "name": "sw1", + "vendor": "Open Networking Foundation", + "hw_ver": "BMv2 simple_switch", + "sw_ver": "Stratum", + "timeout": 60, + "p4bin": "/root/p4/bmv2.json", + "p4info": "/root/p4/p4info.txt", + "endpoints": [ + {"uuid": "1", "name": "1", "type": "port-dataplane"}, + {"uuid": "2", "name": "2", "type": "port-dataplane"}, + {"uuid": "3", "name": "3", "type": "port-int"} + ] + }}} + ] + } + } + ], + "links": [ + { + "link_id": {"link_uuid": {"uuid": "sw1/1==edge-net/eth1"}}, "link_type": "LINKTYPE_VIRTUAL", "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "sw1"}}, "endpoint_uuid": {"uuid": "1"}}, + {"device_id": {"device_uuid": {"uuid": "edge-net"}}, "endpoint_uuid": {"uuid": "eth1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "edge-net/eth1==sw1/1"}}, "link_type": "LINKTYPE_VIRTUAL", "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "edge-net"}}, "endpoint_uuid": {"uuid": "eth1"}}, + {"device_id": {"device_uuid": {"uuid": "sw1"}}, "endpoint_uuid": {"uuid": "1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "sw1/2==corporate-net/eth1"}}, "link_type": "LINKTYPE_VIRTUAL", "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "sw1"}}, "endpoint_uuid": {"uuid": "2"}}, + {"device_id": {"device_uuid": {"uuid": "corporate-net"}}, "endpoint_uuid": {"uuid": "eth1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "corporate-net/eth1==sw1/2"}}, "link_type": "LINKTYPE_VIRTUAL", "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "corporate-net"}}, "endpoint_uuid": {"uuid": "eth1"}}, + {"device_id": {"device_uuid": {"uuid": "sw1"}}, "endpoint_uuid": {"uuid": "2"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "sw1/3==tfs-sdn-controller/mgmt"}}, "link_type": "LINKTYPE_MANAGEMENT", "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "sw1"}}, "endpoint_uuid": {"uuid": "3"}}, + {"device_id": {"device_uuid": {"uuid": "tfs-sdn-controller"}}, "endpoint_uuid": {"uuid": "mgmt"}} + ] + } + ] +} diff --git a/src/tests/p4-int-routing-acl/p4src/README.md b/src/tests/p4-fabric-tna/p4src/README.md similarity index 100% rename from src/tests/p4-int-routing-acl/p4src/README.md rename to src/tests/p4-fabric-tna/p4src/README.md diff --git a/src/tests/p4-int-routing-acl/p4src/_pp.p4 b/src/tests/p4-fabric-tna/p4src/_pp.p4 similarity index 100% rename from src/tests/p4-int-routing-acl/p4src/_pp.p4 rename to src/tests/p4-fabric-tna/p4src/_pp.p4 diff --git a/src/tests/p4-int-routing-acl/p4src/bmv2.json b/src/tests/p4-fabric-tna/p4src/bmv2.json similarity index 100% rename from src/tests/p4-int-routing-acl/p4src/bmv2.json rename to src/tests/p4-fabric-tna/p4src/bmv2.json diff --git a/src/tests/p4-int-routing-acl/p4src/p4info.txt b/src/tests/p4-fabric-tna/p4src/p4info.txt similarity index 100% rename from src/tests/p4-int-routing-acl/p4src/p4info.txt rename to src/tests/p4-fabric-tna/p4src/p4info.txt diff --git a/src/tests/p4-int-routing-acl/run_test_01_bootstrap.sh b/src/tests/p4-fabric-tna/run_test_01_bootstrap.sh similarity index 89% rename from src/tests/p4-int-routing-acl/run_test_01_bootstrap.sh rename to src/tests/p4-fabric-tna/run_test_01_bootstrap.sh index 76469ca55455aec65569a7c104f87e8d6673dec9..81d87476e396130cc90e3f75dc0873874005c3b0 100755 --- a/src/tests/p4-int-routing-acl/run_test_01_bootstrap.sh +++ b/src/tests/p4-fabric-tna/run_test_01_bootstrap.sh @@ -18,4 +18,4 @@ # - tfs_runtime_env_vars.sh source tfs_runtime_env_vars.sh -python3 -m pytest --verbose src/tests/p4-int-routing-acl/test_functional_bootstrap.py +python3 -m pytest --verbose src/tests/p4-fabric-tna/tests-setup/test_functional_bootstrap.py diff --git a/src/tests/p4-int-routing-acl/run_test_03_rules_deprovision.sh b/src/tests/p4-fabric-tna/run_test_02a_sbi_provision_int_l2_l3_acl.sh similarity index 86% rename from src/tests/p4-int-routing-acl/run_test_03_rules_deprovision.sh rename to src/tests/p4-fabric-tna/run_test_02a_sbi_provision_int_l2_l3_acl.sh index 3a67fad8a04520bc48c666b48d684b5ad7fe3d13..a3ab51c2247a39005cb1502e386e835d7b076157 100755 --- a/src/tests/p4-int-routing-acl/run_test_03_rules_deprovision.sh +++ b/src/tests/p4-fabric-tna/run_test_02a_sbi_provision_int_l2_l3_acl.sh @@ -14,4 +14,4 @@ # limitations under the License. source tfs_runtime_env_vars.sh -python3 -m pytest --verbose src/tests/p4-int-routing-acl/test_functional_rules_deprovision.py +python3 -m pytest --verbose src/tests/p4-fabric-tna/tests-sbi/test_functional_sbi_rules_provision.py diff --git a/src/tests/p4-fabric-tna/run_test_02b_sbi_deprovision_int_l2_l3_acl.sh b/src/tests/p4-fabric-tna/run_test_02b_sbi_deprovision_int_l2_l3_acl.sh new file mode 100755 index 0000000000000000000000000000000000000000..49a686638775ee261636f9600c11164e6838d105 --- /dev/null +++ b/src/tests/p4-fabric-tna/run_test_02b_sbi_deprovision_int_l2_l3_acl.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source tfs_runtime_env_vars.sh +python3 -m pytest --verbose src/tests/p4-fabric-tna/tests-sbi/test_functional_sbi_rules_deprovision.py diff --git a/src/tests/p4-fabric-tna/run_test_03a_service_provision_l2.sh b/src/tests/p4-fabric-tna/run_test_03a_service_provision_l2.sh new file mode 100755 index 0000000000000000000000000000000000000000..6da5904690aa4da03a744779b113f6eea02759b4 --- /dev/null +++ b/src/tests/p4-fabric-tna/run_test_03a_service_provision_l2.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source tfs_runtime_env_vars.sh +python3 -m pytest --verbose src/tests/p4-fabric-tna/tests-service/test_functional_service_provision_l2.py diff --git a/src/tests/p4-fabric-tna/run_test_03b_service_deprovision_l2.sh b/src/tests/p4-fabric-tna/run_test_03b_service_deprovision_l2.sh new file mode 100755 index 0000000000000000000000000000000000000000..fdb30c2fcc56aa4b5e1696beff329284b6a6727f --- /dev/null +++ b/src/tests/p4-fabric-tna/run_test_03b_service_deprovision_l2.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source tfs_runtime_env_vars.sh +python3 -m pytest --verbose src/tests/p4-fabric-tna/tests-service/test_functional_service_deprovision_l2.py diff --git a/src/tests/p4-fabric-tna/run_test_04a_service_provision_l3.sh b/src/tests/p4-fabric-tna/run_test_04a_service_provision_l3.sh new file mode 100755 index 0000000000000000000000000000000000000000..96c629370d4e3c126c04a4ded496f4ba4b6c16f1 --- /dev/null +++ b/src/tests/p4-fabric-tna/run_test_04a_service_provision_l3.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source tfs_runtime_env_vars.sh +python3 -m pytest --verbose src/tests/p4-fabric-tna/tests-service/test_functional_service_provision_l3.py diff --git a/src/tests/p4-fabric-tna/run_test_04b_service_deprovision_l3.sh b/src/tests/p4-fabric-tna/run_test_04b_service_deprovision_l3.sh new file mode 100755 index 0000000000000000000000000000000000000000..fdc1d72ac574aa21590f52b53407f9194a3e4633 --- /dev/null +++ b/src/tests/p4-fabric-tna/run_test_04b_service_deprovision_l3.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source tfs_runtime_env_vars.sh +python3 -m pytest --verbose src/tests/p4-fabric-tna/tests-service/test_functional_service_deprovision_l3.py diff --git a/src/tests/p4-fabric-tna/run_test_05a_service_provision_acl.sh b/src/tests/p4-fabric-tna/run_test_05a_service_provision_acl.sh new file mode 100755 index 0000000000000000000000000000000000000000..2cf94b1bd09a39e7908d94a1b4f0fb4ab51f0ae9 --- /dev/null +++ b/src/tests/p4-fabric-tna/run_test_05a_service_provision_acl.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source tfs_runtime_env_vars.sh +python3 -m pytest --verbose src/tests/p4-fabric-tna/tests-service/test_functional_service_provision_acl.py diff --git a/src/tests/p4-fabric-tna/run_test_05b_service_deprovision_acl.sh b/src/tests/p4-fabric-tna/run_test_05b_service_deprovision_acl.sh new file mode 100755 index 0000000000000000000000000000000000000000..681490896f54e1bfe5ebc8cb3f3c9b60ef47ead0 --- /dev/null +++ b/src/tests/p4-fabric-tna/run_test_05b_service_deprovision_acl.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source tfs_runtime_env_vars.sh +python3 -m pytest --verbose src/tests/p4-fabric-tna/tests-service/test_functional_service_deprovision_acl.py diff --git a/src/tests/p4-fabric-tna/run_test_06a_service_provision_int.sh b/src/tests/p4-fabric-tna/run_test_06a_service_provision_int.sh new file mode 100755 index 0000000000000000000000000000000000000000..12bc82352c83b2ffa0e32cf196b9d4bee951bee4 --- /dev/null +++ b/src/tests/p4-fabric-tna/run_test_06a_service_provision_int.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source tfs_runtime_env_vars.sh +python3 -m pytest --verbose src/tests/p4-fabric-tna/tests-service/test_functional_service_provision_int.py diff --git a/src/tests/p4-fabric-tna/run_test_06b_service_deprovision_int.sh b/src/tests/p4-fabric-tna/run_test_06b_service_deprovision_int.sh new file mode 100755 index 0000000000000000000000000000000000000000..a501de77089d2200d5170ba85b980ee220d58e4f --- /dev/null +++ b/src/tests/p4-fabric-tna/run_test_06b_service_deprovision_int.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source tfs_runtime_env_vars.sh +python3 -m pytest --verbose src/tests/p4-fabric-tna/tests-service/test_functional_service_deprovision_int.py diff --git a/src/tests/p4-int-routing-acl/run_test_02_rules_provision.sh b/src/tests/p4-fabric-tna/run_test_07_cleanup.sh similarity index 87% rename from src/tests/p4-int-routing-acl/run_test_02_rules_provision.sh rename to src/tests/p4-fabric-tna/run_test_07_cleanup.sh index 6709d66c62cae11ecc1e555bae81c680dfeaafc8..c7b829168e956b2bcc0c2ee1a65cfdda66f20180 100755 --- a/src/tests/p4-int-routing-acl/run_test_02_rules_provision.sh +++ b/src/tests/p4-fabric-tna/run_test_07_cleanup.sh @@ -14,4 +14,4 @@ # limitations under the License. source tfs_runtime_env_vars.sh -python3 -m pytest --verbose src/tests/p4-int-routing-acl/test_functional_rules_provision.py +python3 -m pytest --verbose src/tests/p4-fabric-tna/tests-setup/test_functional_cleanup.py diff --git a/src/tests/p4-int-routing-acl/run_test_04_cleanup.sh b/src/tests/p4-fabric-tna/run_test_08_purge.sh similarity index 88% rename from src/tests/p4-int-routing-acl/run_test_04_cleanup.sh rename to src/tests/p4-fabric-tna/run_test_08_purge.sh index 917a2af2dab0ab1b9d0d05ad272c6494486d9580..9aef079f1919a8131e016f19a0b3064d6836e7c6 100755 --- a/src/tests/p4-int-routing-acl/run_test_04_cleanup.sh +++ b/src/tests/p4-fabric-tna/run_test_08_purge.sh @@ -14,4 +14,4 @@ # limitations under the License. source tfs_runtime_env_vars.sh -python3 -m pytest --verbose src/tests/p4-int-routing-acl/test_functional_cleanup.py +python3 -m pytest --verbose src/tests/p4-fabric-tna/tests-setup/test_functional_purge.py diff --git a/src/tests/p4-int-routing-acl/setup.sh b/src/tests/p4-fabric-tna/setup.sh similarity index 80% rename from src/tests/p4-int-routing-acl/setup.sh rename to src/tests/p4-fabric-tna/setup.sh index c771642769fe528fe1179909ab0b8edb768f7264..9418ac3d7132eca5d08db44d73441922aa2be228 100755 --- a/src/tests/p4-int-routing-acl/setup.sh +++ b/src/tests/p4-fabric-tna/setup.sh @@ -18,5 +18,5 @@ export POD_NAME=$(kubectl get pods -n=tfs | grep device | awk '{print $1}') kubectl exec ${POD_NAME} -n=tfs -c=server -- mkdir -p /root/p4 -kubectl cp src/tests/p4-int-routing-acl/p4src/p4info.txt tfs/${POD_NAME}:/root/p4 -c=server -kubectl cp src/tests/p4-int-routing-acl/p4src/bmv2.json tfs/${POD_NAME}:/root/p4 -c=server +kubectl cp src/tests/p4-fabric-tna/p4src/p4info.txt tfs/${POD_NAME}:/root/p4 -c=server +kubectl cp src/tests/p4-fabric-tna/p4src/bmv2.json tfs/${POD_NAME}:/root/p4 -c=server diff --git a/src/tests/p4-int-routing-acl/test_functional_rules_deprovision.py b/src/tests/p4-fabric-tna/tests-sbi/test_functional_sbi_rules_deprovision.py similarity index 99% rename from src/tests/p4-int-routing-acl/test_functional_rules_deprovision.py rename to src/tests/p4-fabric-tna/tests-sbi/test_functional_sbi_rules_deprovision.py index 2d54ae9088600381a000722608bd39eb49483a03..6d5f7dfd2b2207b1a98bcc516259f841285fdb9d 100644 --- a/src/tests/p4-int-routing-acl/test_functional_rules_deprovision.py +++ b/src/tests/p4-fabric-tna/tests-sbi/test_functional_sbi_rules_deprovision.py @@ -18,7 +18,7 @@ from common.tools.grpc.Tools import grpc_message_to_json_string from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient from tests.Fixtures import context_client, device_client # pylint: disable=unused-import -from test_common import * +from tests.tools.test_tools_p4 import * LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) diff --git a/src/tests/p4-int-routing-acl/test_functional_rules_provision.py b/src/tests/p4-fabric-tna/tests-sbi/test_functional_sbi_rules_provision.py similarity index 99% rename from src/tests/p4-int-routing-acl/test_functional_rules_provision.py rename to src/tests/p4-fabric-tna/tests-sbi/test_functional_sbi_rules_provision.py index 86a82d2129e495f3c3be9f9ea7b67b24d27a8db7..49d9aba4d504477d1079aa55cefc607368ee79b8 100644 --- a/src/tests/p4-int-routing-acl/test_functional_rules_provision.py +++ b/src/tests/p4-fabric-tna/tests-sbi/test_functional_sbi_rules_provision.py @@ -18,7 +18,7 @@ from common.tools.grpc.Tools import grpc_message_to_json_string from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient from tests.Fixtures import context_client, device_client # pylint: disable=unused-import -from test_common import * +from tests.tools.test_tools_p4 import * LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) diff --git a/src/tests/p4-fabric-tna/tests-service/test_functional_service_deprovision_acl.py b/src/tests/p4-fabric-tna/tests-service/test_functional_service_deprovision_acl.py new file mode 100644 index 0000000000000000000000000000000000000000..fcecbd2c7ce5fdf266e3524a98b07e4cf5bbbb89 --- /dev/null +++ b/src/tests/p4-fabric-tna/tests-service/test_functional_service_deprovision_acl.py @@ -0,0 +1,78 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from common.proto.context_pb2 import ServiceId, ServiceStatusEnum, ServiceTypeEnum +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Service import json_service_id +from context.client.ContextClient import ContextClient +from service.client.ServiceClient import ServiceClient +from tests.Fixtures import context_client, service_client # pylint: disable=unused-import +from tests.tools.test_tools_p4 import * + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +def test_service_deletion_acl( + context_client : ContextClient, # pylint: disable=redefined-outer-name + service_client : ServiceClient # pylint: disable=redefined-outer-name +) -> None: + # Get the current number of devices + response = context_client.ListDevices(ADMIN_CONTEXT_ID) + LOGGER.warning('Devices[{:d}] = {:s}'.format(len(response.devices), grpc_message_to_json_string(response))) + + # Total devices + dev_nb = len(response.devices) + assert dev_nb == DEV_NB + + # P4 devices + p4_dev_nb = identify_number_of_p4_devices(response.devices) + assert p4_dev_nb == P4_DEV_NB + + # Get the current number of rules in the P4 devices + p4_rules_before_deletion = get_number_of_rules(response.devices) + + # Get the current number of services + response = context_client.ListServices(ADMIN_CONTEXT_ID) + services_nb_before_deletion = len(response.services) + assert verify_active_service_type(response.services, ServiceTypeEnum.SERVICETYPE_ACL) + + for service in response.services: + # Ignore services of other types + if service.service_type != ServiceTypeEnum.SERVICETYPE_ACL: + continue + + service_id = service.service_id + assert service_id + + service_uuid = service_id.service_uuid.uuid + context_uuid = service_id.context_id.context_uuid.uuid + assert service.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE + + # Delete ACL service + service_client.DeleteService(ServiceId(**json_service_id(service_uuid, json_context_id(context_uuid)))) + + # Get an updated view of the services + response = context_client.ListServices(ADMIN_CONTEXT_ID) + services_nb_after_deletion = len(response.services) + assert services_nb_after_deletion == services_nb_before_deletion - 1, "Exactly one new service must be deleted" + + # Get an updated view of the devices + response = context_client.ListDevices(ADMIN_CONTEXT_ID) + p4_rules_after_deletion = get_number_of_rules(response.devices) + + rules_diff = p4_rules_before_deletion - p4_rules_after_deletion + + assert p4_rules_after_deletion < p4_rules_before_deletion, "ACL service must contain some rules" + assert rules_diff == P4_DEV_NB * ACL_RULES, "ACL service must contain {} rules per device".format(ACL_RULES) diff --git a/src/tests/p4-fabric-tna/tests-service/test_functional_service_deprovision_int.py b/src/tests/p4-fabric-tna/tests-service/test_functional_service_deprovision_int.py new file mode 100644 index 0000000000000000000000000000000000000000..f29f6b17c92bc851e9ee2dae03fb2e040aba409f --- /dev/null +++ b/src/tests/p4-fabric-tna/tests-service/test_functional_service_deprovision_int.py @@ -0,0 +1,78 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from common.proto.context_pb2 import ServiceId, ServiceStatusEnum, ServiceTypeEnum +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Service import json_service_id +from context.client.ContextClient import ContextClient +from service.client.ServiceClient import ServiceClient +from tests.Fixtures import context_client, service_client # pylint: disable=unused-import +from tests.tools.test_tools_p4 import * + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +def test_service_deletion_int( + context_client : ContextClient, # pylint: disable=redefined-outer-name + service_client : ServiceClient # pylint: disable=redefined-outer-name +) -> None: + # Get the current number of devices + response = context_client.ListDevices(ADMIN_CONTEXT_ID) + LOGGER.warning('Devices[{:d}] = {:s}'.format(len(response.devices), grpc_message_to_json_string(response))) + + # Total devices + dev_nb = len(response.devices) + assert dev_nb == DEV_NB + + # P4 devices + p4_dev_nb = identify_number_of_p4_devices(response.devices) + assert p4_dev_nb == P4_DEV_NB + + # Get the current number of rules in the P4 devices + p4_rules_before_deletion = get_number_of_rules(response.devices) + + # Get the current number of services + response = context_client.ListServices(ADMIN_CONTEXT_ID) + services_nb_before_deletion = len(response.services) + assert verify_active_service_type(response.services, ServiceTypeEnum.SERVICETYPE_INT) + + for service in response.services: + # Ignore services of other types + if service.service_type != ServiceTypeEnum.SERVICETYPE_INT: + continue + + service_id = service.service_id + assert service_id + + service_uuid = service_id.service_uuid.uuid + context_uuid = service_id.context_id.context_uuid.uuid + assert service.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE + + # Delete INT service + service_client.DeleteService(ServiceId(**json_service_id(service_uuid, json_context_id(context_uuid)))) + + # Get an updated view of the services + response = context_client.ListServices(ADMIN_CONTEXT_ID) + services_nb_after_deletion = len(response.services) + assert services_nb_after_deletion == services_nb_before_deletion - 1, "Exactly one new service must be deleted" + + # Get an updated view of the devices + response = context_client.ListDevices(ADMIN_CONTEXT_ID) + p4_rules_after_deletion = get_number_of_rules(response.devices) + + rules_diff = p4_rules_before_deletion - p4_rules_after_deletion + + assert p4_rules_after_deletion < p4_rules_before_deletion, "INT service must contain some rules" + assert rules_diff == P4_DEV_NB * INT_RULES, "INT service must contain {} rules per device".format(INT_RULES) diff --git a/src/tests/p4-fabric-tna/tests-service/test_functional_service_deprovision_l2.py b/src/tests/p4-fabric-tna/tests-service/test_functional_service_deprovision_l2.py new file mode 100644 index 0000000000000000000000000000000000000000..87ef21285fe3c33944e850d689f341118b08976b --- /dev/null +++ b/src/tests/p4-fabric-tna/tests-service/test_functional_service_deprovision_l2.py @@ -0,0 +1,78 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from common.proto.context_pb2 import ServiceId, ServiceStatusEnum, ServiceTypeEnum +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Service import json_service_id +from context.client.ContextClient import ContextClient +from service.client.ServiceClient import ServiceClient +from tests.Fixtures import context_client, service_client # pylint: disable=unused-import +from tests.tools.test_tools_p4 import * + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +def test_service_deletion_l2( + context_client : ContextClient, # pylint: disable=redefined-outer-name + service_client : ServiceClient # pylint: disable=redefined-outer-name +) -> None: + # Get the current number of devices + response = context_client.ListDevices(ADMIN_CONTEXT_ID) + LOGGER.warning('Devices[{:d}] = {:s}'.format(len(response.devices), grpc_message_to_json_string(response))) + + # Total devices + dev_nb = len(response.devices) + assert dev_nb == DEV_NB + + # P4 devices + p4_dev_nb = identify_number_of_p4_devices(response.devices) + assert p4_dev_nb == P4_DEV_NB + + # Get the current number of rules in the P4 devices + p4_rules_before_deletion = get_number_of_rules(response.devices) + + # Get the current number of services + response = context_client.ListServices(ADMIN_CONTEXT_ID) + services_nb_before_deletion = len(response.services) + assert verify_active_service_type(response.services, ServiceTypeEnum.SERVICETYPE_L2NM) + + for service in response.services: + # Ignore services of other types + if service.service_type != ServiceTypeEnum.SERVICETYPE_L2NM: + continue + + service_id = service.service_id + assert service_id + + service_uuid = service_id.service_uuid.uuid + context_uuid = service_id.context_id.context_uuid.uuid + assert service.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE + + # Delete L2 service + service_client.DeleteService(ServiceId(**json_service_id(service_uuid, json_context_id(context_uuid)))) + + # Get an updated view of the services + response = context_client.ListServices(ADMIN_CONTEXT_ID) + services_nb_after_deletion = len(response.services) + assert services_nb_after_deletion == services_nb_before_deletion - 1, "Exactly one new service must be deleted" + + # Get an updated view of the devices + response = context_client.ListDevices(ADMIN_CONTEXT_ID) + p4_rules_after_deletion = get_number_of_rules(response.devices) + + rules_diff = p4_rules_before_deletion - p4_rules_after_deletion + + assert p4_rules_after_deletion < p4_rules_before_deletion, "L2 service must contain some rules" + assert rules_diff == P4_DEV_NB * L2_RULES, "L2 service must contain {} rules per device".format(L2_RULES) diff --git a/src/tests/p4-fabric-tna/tests-service/test_functional_service_deprovision_l3.py b/src/tests/p4-fabric-tna/tests-service/test_functional_service_deprovision_l3.py new file mode 100644 index 0000000000000000000000000000000000000000..d349a08747802746a1f9d04d53a165e4359b70ce --- /dev/null +++ b/src/tests/p4-fabric-tna/tests-service/test_functional_service_deprovision_l3.py @@ -0,0 +1,78 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from common.proto.context_pb2 import ServiceId, ServiceStatusEnum, ServiceTypeEnum +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Service import json_service_id +from context.client.ContextClient import ContextClient +from service.client.ServiceClient import ServiceClient +from tests.Fixtures import context_client, service_client # pylint: disable=unused-import +from tests.tools.test_tools_p4 import * + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +def test_service_deletion_l3( + context_client : ContextClient, # pylint: disable=redefined-outer-name + service_client : ServiceClient # pylint: disable=redefined-outer-name +) -> None: + # Get the current number of devices + response = context_client.ListDevices(ADMIN_CONTEXT_ID) + LOGGER.warning('Devices[{:d}] = {:s}'.format(len(response.devices), grpc_message_to_json_string(response))) + + # Total devices + dev_nb = len(response.devices) + assert dev_nb == DEV_NB + + # P4 devices + p4_dev_nb = identify_number_of_p4_devices(response.devices) + assert p4_dev_nb == P4_DEV_NB + + # Get the current number of rules in the P4 devices + p4_rules_before_deletion = get_number_of_rules(response.devices) + + # Get the current number of services + response = context_client.ListServices(ADMIN_CONTEXT_ID) + services_nb_before_deletion = len(response.services) + assert verify_active_service_type(response.services, ServiceTypeEnum.SERVICETYPE_L3NM) + + for service in response.services: + # Ignore services of other types + if service.service_type != ServiceTypeEnum.SERVICETYPE_L3NM: + continue + + service_id = service.service_id + assert service_id + + service_uuid = service_id.service_uuid.uuid + context_uuid = service_id.context_id.context_uuid.uuid + assert service.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE + + # Delete L3 service + service_client.DeleteService(ServiceId(**json_service_id(service_uuid, json_context_id(context_uuid)))) + + # Get an updated view of the services + response = context_client.ListServices(ADMIN_CONTEXT_ID) + services_nb_after_deletion = len(response.services) + assert services_nb_after_deletion == services_nb_before_deletion - 1, "Exactly one new service must be deleted" + + # Get an updated view of the devices + response = context_client.ListDevices(ADMIN_CONTEXT_ID) + p4_rules_after_deletion = get_number_of_rules(response.devices) + + rules_diff = p4_rules_before_deletion - p4_rules_after_deletion + + assert p4_rules_after_deletion < p4_rules_before_deletion, "L3 service must contain some rules" + assert rules_diff == P4_DEV_NB * L3_RULES, "L3 service must contain {} rules per device".format(L3_RULES) diff --git a/src/tests/p4-fabric-tna/tests-service/test_functional_service_provision_acl.py b/src/tests/p4-fabric-tna/tests-service/test_functional_service_provision_acl.py new file mode 100644 index 0000000000000000000000000000000000000000..58de046b4171f12ccfc39d078e66cf5ad0670d2a --- /dev/null +++ b/src/tests/p4-fabric-tna/tests-service/test_functional_service_provision_acl.py @@ -0,0 +1,73 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from common.proto.context_pb2 import ServiceStatusEnum, ServiceTypeEnum +from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results +from common.tools.grpc.Tools import grpc_message_to_json_string +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from service.client.ServiceClient import ServiceClient +from tests.Fixtures import context_client, device_client, service_client # pylint: disable=unused-import +from tests.tools.test_tools_p4 import * + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +def test_service_creation_acl( + context_client : ContextClient, # pylint: disable=redefined-outer-name + device_client : DeviceClient, # pylint: disable=redefined-outer-name + service_client : ServiceClient # pylint: disable=redefined-outer-name +) -> None: + # Get the current number of services + response = context_client.ListServices(ADMIN_CONTEXT_ID) + services_nb_before = len(response.services) + + # Get the current number of devices + response = context_client.ListDevices(ADMIN_CONTEXT_ID) + LOGGER.warning('Devices[{:d}] = {:s}'.format(len(response.devices), grpc_message_to_json_string(response))) + + # Total devices + dev_nb = len(response.devices) + assert dev_nb == DEV_NB + + # P4 devices + p4_dev_nb = identify_number_of_p4_devices(response.devices) + assert p4_dev_nb == P4_DEV_NB + + # Get the current number of rules in the P4 devices + p4_rules_before = get_number_of_rules(response.devices) + + # Load service + descriptor_loader = DescriptorLoader( + descriptors_file=DESC_FILE_SERVICE_CREATE_ACL, + context_client=context_client, device_client=device_client, service_client=service_client + ) + results = descriptor_loader.process() + check_descriptor_load_results(results, descriptor_loader) + + # Get an updated view of the services + response = context_client.ListServices(ADMIN_CONTEXT_ID) + services_nb_after = len(response.services) + assert services_nb_after == services_nb_before + 1, "Exactly one new service must be in place" + assert verify_active_service_type(response.services, ServiceTypeEnum.SERVICETYPE_ACL) + + # Get an updated view of the devices + response = context_client.ListDevices(ADMIN_CONTEXT_ID) + p4_rules_after = get_number_of_rules(response.devices) + + rules_diff = p4_rules_after - p4_rules_before + + assert p4_rules_after > p4_rules_before, "ACL service must install some rules" + assert rules_diff == P4_DEV_NB * ACL_RULES, "ACL service must install {} rules per device".format(ACL_RULES) diff --git a/src/tests/p4-fabric-tna/tests-service/test_functional_service_provision_int.py b/src/tests/p4-fabric-tna/tests-service/test_functional_service_provision_int.py new file mode 100644 index 0000000000000000000000000000000000000000..7a875c66abae8139e4e7bc29451dab01490f4599 --- /dev/null +++ b/src/tests/p4-fabric-tna/tests-service/test_functional_service_provision_int.py @@ -0,0 +1,73 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from common.proto.context_pb2 import ServiceTypeEnum +from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results +from common.tools.grpc.Tools import grpc_message_to_json_string +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from service.client.ServiceClient import ServiceClient +from tests.Fixtures import context_client, device_client, service_client # pylint: disable=unused-import +from tests.tools.test_tools_p4 import * + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +def test_service_creation_int( + context_client : ContextClient, # pylint: disable=redefined-outer-name + device_client : DeviceClient, # pylint: disable=redefined-outer-name + service_client : ServiceClient # pylint: disable=redefined-outer-name +) -> None: + # Get the current number of services + response = context_client.ListServices(ADMIN_CONTEXT_ID) + services_nb_before = len(response.services) + + # Get the current number of devices + response = context_client.ListDevices(ADMIN_CONTEXT_ID) + LOGGER.warning('Devices[{:d}] = {:s}'.format(len(response.devices), grpc_message_to_json_string(response))) + + # Total devices + dev_nb = len(response.devices) + assert dev_nb == DEV_NB + + # P4 devices + p4_dev_nb = identify_number_of_p4_devices(response.devices) + assert p4_dev_nb == P4_DEV_NB + + # Get the current number of rules in the P4 devices + p4_rules_before = get_number_of_rules(response.devices) + + # Load service + descriptor_loader = DescriptorLoader( + descriptors_file=DESC_FILE_SERVICE_CREATE_INT, + context_client=context_client, device_client=device_client, service_client=service_client + ) + results = descriptor_loader.process() + check_descriptor_load_results(results, descriptor_loader) + + # Get an updated view of the services + response = context_client.ListServices(ADMIN_CONTEXT_ID) + services_nb_after = len(response.services) + assert services_nb_after == services_nb_before + 1, "Exactly one new service must be in place" + assert verify_active_service_type(response.services, ServiceTypeEnum.SERVICETYPE_INT) + + # Get an updated view of the devices + response = context_client.ListDevices(ADMIN_CONTEXT_ID) + p4_rules_after = get_number_of_rules(response.devices) + + rules_diff = p4_rules_after - p4_rules_before + + assert p4_rules_after > p4_rules_before, "INT service must install some rules" + assert rules_diff == P4_DEV_NB * INT_RULES, "INT service must install {} rules per device".format(INT_RULES) diff --git a/src/tests/p4-fabric-tna/tests-service/test_functional_service_provision_l2.py b/src/tests/p4-fabric-tna/tests-service/test_functional_service_provision_l2.py new file mode 100644 index 0000000000000000000000000000000000000000..a42c05546659c809cfa94049255296beecd7069a --- /dev/null +++ b/src/tests/p4-fabric-tna/tests-service/test_functional_service_provision_l2.py @@ -0,0 +1,73 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from common.proto.context_pb2 import ServiceStatusEnum, ServiceTypeEnum +from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results +from common.tools.grpc.Tools import grpc_message_to_json_string +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from service.client.ServiceClient import ServiceClient +from tests.Fixtures import context_client, device_client, service_client # pylint: disable=unused-import +from tests.tools.test_tools_p4 import * + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +def test_service_creation_l2( + context_client : ContextClient, # pylint: disable=redefined-outer-name + device_client : DeviceClient, # pylint: disable=redefined-outer-name + service_client : ServiceClient # pylint: disable=redefined-outer-name +) -> None: + # Get the current number of services + response = context_client.ListServices(ADMIN_CONTEXT_ID) + services_nb_before = len(response.services) + + # Get the current number of devices + response = context_client.ListDevices(ADMIN_CONTEXT_ID) + LOGGER.warning('Devices[{:d}] = {:s}'.format(len(response.devices), grpc_message_to_json_string(response))) + + # Total devices + dev_nb = len(response.devices) + assert dev_nb == DEV_NB + + # P4 devices + p4_dev_nb = identify_number_of_p4_devices(response.devices) + assert p4_dev_nb == P4_DEV_NB + + # Get the current number of rules in the P4 devices + p4_rules_before = get_number_of_rules(response.devices) + + # Load service + descriptor_loader = DescriptorLoader( + descriptors_file=DESC_FILE_SERVICE_CREATE_L2_SIMPLE, + context_client=context_client, device_client=device_client, service_client=service_client + ) + results = descriptor_loader.process() + check_descriptor_load_results(results, descriptor_loader) + + # Get an updated view of the services + response = context_client.ListServices(ADMIN_CONTEXT_ID) + services_nb_after = len(response.services) + assert services_nb_after == services_nb_before + 1, "Exactly one new service must be in place" + assert verify_active_service_type(response.services, ServiceTypeEnum.SERVICETYPE_L2NM) + + # Get an updated view of the devices + response = context_client.ListDevices(ADMIN_CONTEXT_ID) + p4_rules_after = get_number_of_rules(response.devices) + + rules_diff = p4_rules_after - p4_rules_before + + assert p4_rules_after > p4_rules_before, "L2 service must install some rules" + assert rules_diff == P4_DEV_NB * L2_RULES, "L2 service must install {} rules per device".format(L2_RULES) diff --git a/src/tests/p4-fabric-tna/tests-service/test_functional_service_provision_l3.py b/src/tests/p4-fabric-tna/tests-service/test_functional_service_provision_l3.py new file mode 100644 index 0000000000000000000000000000000000000000..9c0009b14472ddf055d41a8bf4923c64a9f5c151 --- /dev/null +++ b/src/tests/p4-fabric-tna/tests-service/test_functional_service_provision_l3.py @@ -0,0 +1,73 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from common.proto.context_pb2 import ServiceStatusEnum, ServiceTypeEnum +from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results +from common.tools.grpc.Tools import grpc_message_to_json_string +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from service.client.ServiceClient import ServiceClient +from tests.Fixtures import context_client, device_client, service_client # pylint: disable=unused-import +from tests.tools.test_tools_p4 import * + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +def test_service_creation_l3( + context_client : ContextClient, # pylint: disable=redefined-outer-name + device_client : DeviceClient, # pylint: disable=redefined-outer-name + service_client : ServiceClient # pylint: disable=redefined-outer-name +) -> None: + # Get the current number of services + response = context_client.ListServices(ADMIN_CONTEXT_ID) + services_nb_before = len(response.services) + + # Get the current number of devices + response = context_client.ListDevices(ADMIN_CONTEXT_ID) + LOGGER.warning('Devices[{:d}] = {:s}'.format(len(response.devices), grpc_message_to_json_string(response))) + + # Total devices + dev_nb = len(response.devices) + assert dev_nb == DEV_NB + + # P4 devices + p4_dev_nb = identify_number_of_p4_devices(response.devices) + assert p4_dev_nb == P4_DEV_NB + + # Get the current number of rules in the P4 devices + p4_rules_before = get_number_of_rules(response.devices) + + # Load service + descriptor_loader = DescriptorLoader( + descriptors_file=DESC_FILE_SERVICE_CREATE_L3, + context_client=context_client, device_client=device_client, service_client=service_client + ) + results = descriptor_loader.process() + check_descriptor_load_results(results, descriptor_loader) + + # Get an updated view of the services + response = context_client.ListServices(ADMIN_CONTEXT_ID) + services_nb_after = len(response.services) + assert services_nb_after == services_nb_before + 1, "Exactly one new service must be in place" + assert verify_active_service_type(response.services, ServiceTypeEnum.SERVICETYPE_L3NM) + + # Get an updated view of the devices + response = context_client.ListDevices(ADMIN_CONTEXT_ID) + p4_rules_after = get_number_of_rules(response.devices) + + rules_diff = p4_rules_after - p4_rules_before + + assert p4_rules_after > p4_rules_before, "L3 service must install some rules" + assert rules_diff == P4_DEV_NB * L3_RULES, "L3 service must install {} rules per device".format(L3_RULES) diff --git a/src/tests/p4-int-routing-acl/test_functional_bootstrap.py b/src/tests/p4-fabric-tna/tests-setup/test_functional_bootstrap.py similarity index 98% rename from src/tests/p4-int-routing-acl/test_functional_bootstrap.py rename to src/tests/p4-fabric-tna/tests-setup/test_functional_bootstrap.py index b5b72cc331ea1c7bf6e57aefc484532d66cb8504..2f9130ad000f406c7dab6de828c314670ffaa173 100644 --- a/src/tests/p4-int-routing-acl/test_functional_bootstrap.py +++ b/src/tests/p4-fabric-tna/tests-setup/test_functional_bootstrap.py @@ -19,7 +19,7 @@ from common.tools.descriptor.Loader import DescriptorLoader, \ from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient from tests.Fixtures import context_client, device_client # pylint: disable=unused-import -from test_common import * +from tests.tools.test_tools_p4 import * LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) diff --git a/src/tests/p4-int-routing-acl/test_functional_cleanup.py b/src/tests/p4-fabric-tna/tests-setup/test_functional_cleanup.py similarity index 92% rename from src/tests/p4-int-routing-acl/test_functional_cleanup.py rename to src/tests/p4-fabric-tna/tests-setup/test_functional_cleanup.py index 60c8684b098aa886fdd62db28a07a2d5c8adc125..4d98c9e0500281c4e8dfd6f43de833c4ad3411fd 100644 --- a/src/tests/p4-int-routing-acl/test_functional_cleanup.py +++ b/src/tests/p4-fabric-tna/tests-setup/test_functional_cleanup.py @@ -12,14 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging, os -from common.Constants import DEFAULT_CONTEXT_NAME -from common.proto.context_pb2 import ContextId +import logging from common.tools.descriptor.Loader import DescriptorLoader, validate_empty_scenario from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient from tests.Fixtures import context_client, device_client # pylint: disable=unused-import -from test_common import * +from tests.tools.test_tools_p4 import * LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) diff --git a/src/tests/p4-fabric-tna/tests-setup/test_functional_purge.py b/src/tests/p4-fabric-tna/tests-setup/test_functional_purge.py new file mode 100644 index 0000000000000000000000000000000000000000..ba37fbd89c2eea9ab6e3bae84fcfb8669f9a4e78 --- /dev/null +++ b/src/tests/p4-fabric-tna/tests-setup/test_functional_purge.py @@ -0,0 +1,81 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from common.proto.context_pb2 import ServiceId, DeviceId, LinkId, ServiceStatusEnum, ServiceTypeEnum +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Service import json_service_id +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from service.client.ServiceClient import ServiceClient +from tests.Fixtures import context_client, device_client, service_client # pylint: disable=unused-import +from tests.tools.test_tools_p4 import * + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +def test_clean_services( + context_client : ContextClient, # pylint: disable=redefined-outer-name + service_client : ServiceClient # pylint: disable=redefined-outer-name +) -> None: + response = context_client.ListServices(ADMIN_CONTEXT_ID) + LOGGER.warning('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) + + for service in response.services: + service_id = service.service_id + assert service_id + + service_uuid = service_id.service_uuid.uuid + context_uuid = service_id.context_id.context_uuid.uuid + assert service.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE + assert service.service_type == ServiceTypeEnum.SERVICETYPE_INT + + # Delete service + service_client.DeleteService(ServiceId(**json_service_id(service_uuid, json_context_id(context_uuid)))) + +def test_clean_links( + context_client : ContextClient, # pylint: disable=redefined-outer-name +) -> None: + response = context_client.ListLinks(ADMIN_CONTEXT_ID) + + for link in response.links: + link_id = link.link_id + + # Delete link + context_client.RemoveLink(LinkId(**link_id)) + +def test_clean_devices( + context_client : ContextClient, # pylint: disable=redefined-outer-name + device_client : DeviceClient # pylint: disable=redefined-outer-name +) -> None: + response = context_client.ListDevices(ADMIN_CONTEXT_ID) + LOGGER.warning('Devices[{:d}] = {:s}'.format(len(response.devices), grpc_message_to_json_string(response))) + + for device in response.devices: + device_id = device.device_id + + # Delete device + device_client.DeleteDevice(DeviceId(**device_id)) + +def test_clean_context( + context_client : ContextClient # pylint: disable=redefined-outer-name +) -> None: + # Verify the scenario has no services/slices + response = context_client.ListTopologies(ADMIN_CONTEXT_ID) + + for topology in response.topologies: + topology_id = topology.topology_id + response = context_client.RemoveTopology(topology_id) + + response = context_client.RemoveContext(ADMIN_CONTEXT_ID) diff --git a/src/tests/p4-int-routing-acl/topology/README.md b/src/tests/p4-fabric-tna/topology/README.md similarity index 100% rename from src/tests/p4-int-routing-acl/topology/README.md rename to src/tests/p4-fabric-tna/topology/README.md diff --git a/src/tests/p4-int-routing-acl/topology/p4-switch-conf-common.sh b/src/tests/p4-fabric-tna/topology/p4-switch-conf-common.sh similarity index 100% rename from src/tests/p4-int-routing-acl/topology/p4-switch-conf-common.sh rename to src/tests/p4-fabric-tna/topology/p4-switch-conf-common.sh diff --git a/src/tests/p4-int-routing-acl/topology/p4-switch-setup.sh b/src/tests/p4-fabric-tna/topology/p4-switch-setup.sh similarity index 100% rename from src/tests/p4-int-routing-acl/topology/p4-switch-setup.sh rename to src/tests/p4-fabric-tna/topology/p4-switch-setup.sh diff --git a/src/tests/p4-int-routing-acl/topology/p4-switch-tear-down.sh b/src/tests/p4-fabric-tna/topology/p4-switch-tear-down.sh similarity index 100% rename from src/tests/p4-int-routing-acl/topology/p4-switch-tear-down.sh rename to src/tests/p4-fabric-tna/topology/p4-switch-tear-down.sh diff --git a/src/tests/p4-int-routing-acl/topology/p4-switch-three-port-chassis-config-phy.pb.txt b/src/tests/p4-fabric-tna/topology/p4-switch-three-port-chassis-config-phy.pb.txt similarity index 97% rename from src/tests/p4-int-routing-acl/topology/p4-switch-three-port-chassis-config-phy.pb.txt rename to src/tests/p4-fabric-tna/topology/p4-switch-three-port-chassis-config-phy.pb.txt index 038d3626960e252a318edd5419ed887e2682a4b5..bc13f29d9231823684d103c0ce86378c2d204eaa 100644 --- a/src/tests/p4-int-routing-acl/topology/p4-switch-three-port-chassis-config-phy.pb.txt +++ b/src/tests/p4-fabric-tna/topology/p4-switch-three-port-chassis-config-phy.pb.txt @@ -18,10 +18,11 @@ description: "Chassis configuration for a single Stratum bmv2 switch with 3 ports" chassis { platform: PLT_P4_SOFT_SWITCH - name: "bmv2-switch" + name: "sw1" } nodes { id: 1 + name: "sw1 node 1" slot: 1 index: 1 } diff --git a/src/tests/p4-int-routing-acl/topology/run-stratum.sh b/src/tests/p4-fabric-tna/topology/run-stratum.sh similarity index 100% rename from src/tests/p4-int-routing-acl/topology/run-stratum.sh rename to src/tests/p4-fabric-tna/topology/run-stratum.sh diff --git a/src/tests/p4-fwd-l1/tests/topologies/6switchObjects.py b/src/tests/p4-fwd-l1/tests/topologies/6switchObjects.py index 522066bb0c4c77fa4e949f68226ebea27d262bcb..69a68a5697d80782a0b157116dc3c3dcf77471a7 100644 --- a/src/tests/p4-fwd-l1/tests/topologies/6switchObjects.py +++ b/src/tests/p4-fwd-l1/tests/topologies/6switchObjects.py @@ -312,12 +312,6 @@ LINK_SW5_SW6 = json_link(LINK_SW5_SW6_UUID, [ENDPOINT_ID_SW5_2, E # ----- Service ---------------------------------------------------------------------------------------------------------- -#SERVICE_SW1_UUID = get_service_uuid(ENDPOINT_ID_SW1_1, ENDPOINT_ID_SW1_2) -#SERVICE_SW1 = json_service_p4_planned(SERVICE_SW1_UUID) - -#SERVICE_SW2_UUID = get_service_uuid(ENDPOINT_ID_SW2_1, ENDPOINT_ID_SW2_2) -#SERVICE_SW2 = json_service_p4_planned(SERVICE_SW2_UUID) - SERVICE_SW1_SW6_UUID = get_service_uuid(ENDPOINT_ID_SW1_3, ENDPOINT_ID_SW6_3) SERVICE_SW1_SW6 = json_service_p4_planned(SERVICE_SW1_SW6_UUID) SERVICE_SW1_SW6_ENDPOINT_IDS = [DEVICE_SW1_ENDPOINT_IDS[2], DEVICE_SW6_ENDPOINT_IDS[2]] @@ -345,10 +339,6 @@ LINKS = [ LINK_SW4_SW6, LINK_SW5_SW6 - ] - -#SERVICES = [(SERVICE_SW1, DEVICE_SW1_ENDPOINT_IDS), (SERVICE_SW2, DEVICE_SW2_ENDPOINT_IDS)] - -#SERVICE_SW1_SW2_ENDPOINT_IDS = DEVICE_SW1_ENDPOINT_IDS + DEVICE_SW2_ENDPOINT_IDS + ] -SERVICES = [(SERVICE_SW1_SW6, SERVICE_SW1_SW6_ENDPOINT_IDS)] \ No newline at end of file +SERVICES = [(SERVICE_SW1_SW6, SERVICE_SW1_SW6_ENDPOINT_IDS)] diff --git a/src/tests/p4-int-routing-acl/README.md b/src/tests/p4-int-routing-acl/README.md deleted file mode 100644 index fa935e1b2eae2decb0e852ebd72b752c3d67ca28..0000000000000000000000000000000000000000 --- a/src/tests/p4-int-routing-acl/README.md +++ /dev/null @@ -1,129 +0,0 @@ -# Tests for P4 routing, ACL, and In-Band Network Telemetry functions - -This directory contains the necessary scripts and configurations to run tests atop a Stratum-based P4 whitebox that performs a set of network functions, including routing, access control list (ACL), and In-Band Network Telemetry (INT). - -## Prerequisites - -You need Python3, which should already be installed while preparing for a TFS build. -Additionally, `pytest` is also mandatory as it is used by our tests below. -Aliasing python with python3 will also help bridging issues between older and newer python versions. - -```shell -alias python='python3' -pip3 install pytest -pip3 install grpclib protobuf -pip3 install grpcio-tools -``` - -The versions used for this test are: -- `protobuf` 3.20.3 -- `grpclib` 0.4.4 -- `grpcio` 1.47.5 -- `grpcio-tools` 1.47.5 - -After the installation of `grpclib`, protoc-gen-grpclib_python binary is in /home/$USER/.local/bin/ -First we copy it to /usr/local/bin/: - -```shell - sudo cp /home/$USER/.local/bin/protoc-gen-grpclib_python /usr/local/bin/ -``` - -Then, we include this path to the PYTHONPATH: -```shell -export PYTHONPATH="${PYTHONPATH}:/usr/local/bin/protoc-gen-grpclib_python" -``` - - -You need to build and deploy a software-based Stratum switch, before being able to use TFS to control it. -To do so, follow the instructions in the `./topology` folder. - -## Steps to setup and run a TFS program atop Stratum - -To conduct this test, follow the steps below. - -### TFS re-deploy - -```shell -cd ~/tfs-ctrl/ -source my_deploy.sh && source tfs_runtime_env_vars.sh -./deploy/all.sh -``` - -### Path setup - -Ensure that `PATH` variable contains the parent project directory, e.g., "home/$USER/tfs-ctrl". - -Ensure that `PYTHONPATH` variable contains the source code directory of TFS, e.g., "home/$USER/tfs-ctrl/src" - -## Topology setup - -In the `./topology/` directory there are scripts that allow to build Stratum on a target machine (e.g., a VM) and then deploy a P4 switch atop this machine. -This test assumes a Stratum P4 switch with 2 network interfaces used as a data plane (routing traffic from one to another) as well as another network interface used to send telemetry information to an external telemetry collector. - -## P4 artifacts - -In the `./p4src/` directory there are compiled P4 artifacts of the pipeline that will be pushed to the P4 switch, along with the P4-runtime definitions. -The `./setup.sh` script copies from this directory. If you need to change the P4 program, make sure to put the compiled artifacts there. - -## Tests - -The following tests are implemented. -For each of these tests, an auxiliary bash script allows to run it with less typing. - -| Test | Bash Runner | Purpose | -| ------------------------------------ | ---------------------------------- | ---------------------------------- | -| - | setup.sh | Copy P4 artifacts into the SBI pod | -| test_functional_bootstrap.py | run_test_01_bootstrap.sh | Connect TFS to the P4 switch | -| test_functional_rules_provision.py | run_test_02_rules_provision.sh | Install rules on the P4 switch | -| test_functional_rules_deprovision.py | run_test_03_rules_deprovision.sh | Uninstall rules from the P4 switch | -| test_functional_cleanup.py | run_test_04_cleanup.sh | Disconnect TFS from the P4 switch | - -Each of the tests above is described in detail below. - -### Step 1: Copy the necessary P4 artifacts into the TFS SBI service pod - -The setup script copies the necessary artifacts to the SBI service pod. -It should be run just once, after a fresh install of TFS. -If you `deploy/all.sh` again, you need to repeat this step. - -```shell -cd ~/tfs-ctrl/ -source my_deploy.sh && source tfs_runtime_env_vars.sh -bash src/tests/p4-int-routing-acl/setup.sh -``` - -### Step 2: Bootstrap topology - -The bootstrap script registers the context, topology, links, and devices to TFS. - -```shell -cd ~/tfs-ctrl/ -bash src/tests/p4-int-routing-acl/run_test_01_bootstrap.sh -``` - -### Step 3: Provision rules via the SBI API - -Implement routing, ACL, and INT functions by installing P4 rules to the Stratum switch via the TFS SBI API. - -```shell -cd ~/tfs-ctrl/ -bash src/tests/p4-int-routing-acl/run_test_02_rules_provision.sh -``` - -### Step 4: Deprovision rules via the SBI API - -Deprovision the routing, ACL, and INT network functions by removing the previously installed P4 rules (via the TFS SBI API) from the Stratum switch. - -```shell -cd ~/tfs-ctrl/ -bash src/tests/p4-int-routing-acl/run_test_03_rules_deprovision.sh -``` - -### Step 4: Deprovision topology - -Delete all the objects (context, topology, links, devices) from TFS: - -```shell -cd ~/tfs-ctrl/ -bash src/tests/p4-int-routing-acl/run_test_04_cleanup.sh -``` diff --git a/src/tests/p4-int-routing-acl/descriptors/topology.json b/src/tests/p4-int-routing-acl/descriptors/topology.json deleted file mode 100644 index 3b1f6e410cc5a2adc1c99b6208523fd9a9971fe7..0000000000000000000000000000000000000000 --- a/src/tests/p4-int-routing-acl/descriptors/topology.json +++ /dev/null @@ -1,288 +0,0 @@ -{ - "contexts": [ - { - "context_id": { - "context_uuid": { - "uuid": "admin" - } - } - } - ], - "topologies": [ - { - "topology_id": { - "context_id": { - "context_uuid": { - "uuid": "admin" - } - }, - "topology_uuid": { - "uuid": "admin" - } - } - } - ], - "devices": [ - { - "device_id": { - "device_uuid": { - "uuid": "edge-net" - } - }, - "device_type": "network", - "device_drivers": [ - "DEVICEDRIVER_UNDEFINED" - ], - "device_config": { - "config_rules": [ - { - "action": "CONFIGACTION_SET", - "custom": { - "resource_key": "_connect/address", - "resource_value": "127.0.0.1" - } - }, - { - "action": "CONFIGACTION_SET", - "custom": { - "resource_key": "_connect/port", - "resource_value": "0" - } - }, - { - "action": "CONFIGACTION_SET", - "custom": { - "resource_key": "_connect/settings", - "resource_value": { - "endpoints": [ - { - "uuid": "eth1", - "type": "copper" - } - ] - } - } - } - ] - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "corporate-net" - } - }, - "device_type": "network", - "device_drivers": [ - "DEVICEDRIVER_UNDEFINED" - ], - "device_config": { - "config_rules": [ - { - "action": "CONFIGACTION_SET", - "custom": { - "resource_key": "_connect/address", - "resource_value": "127.0.0.1" - } - }, - { - "action": "CONFIGACTION_SET", - "custom": { - "resource_key": "_connect/port", - "resource_value": "0" - } - }, - { - "action": "CONFIGACTION_SET", - "custom": { - "resource_key": "_connect/settings", - "resource_value": { - "endpoints": [ - { - "uuid": "eth1", - "type": "copper" - } - ] - } - } - } - ] - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "p4-sw1" - } - }, - "device_type": "p4-switch", - "device_drivers": [ - "DEVICEDRIVER_P4" - ], - "device_operational_status": "DEVICEOPERATIONALSTATUS_DISABLED", - "name": "p4-sw1", - "device_config": { - "config_rules": [ - { - "action": "CONFIGACTION_SET", - "custom": { - "resource_key": "_connect/address", - "resource_value": "10.10.10.120" - } - }, - { - "action": "CONFIGACTION_SET", - "custom": { - "resource_key": "_connect/port", - "resource_value": "50001" - } - }, - { - "action": "CONFIGACTION_SET", - "custom": { - "resource_key": "_connect/settings", - "resource_value": { - "id": 1, - "name": "p4-sw1", - "vendor": "Open Networking Foundation", - "hw_ver": "BMv2 simple_switch", - "sw_ver": "Stratum", - "p4bin": "/root/p4/bmv2.json", - "p4info": "/root/p4/p4info.txt", - "timeout": 60, - "endpoints": [ - { - "uuid": "1", - "type": "port" - }, - { - "uuid": "2", - "type": "port" - } - ] - } - } - } - ] - } - } - ], - "links": [ - { - "link_id": { - "link_uuid": { - "uuid": "p4-sw1/1==edge-net/eth1" - } - }, - "link_endpoint_ids": [ - { - "device_id": { - "device_uuid": { - "uuid": "p4-sw1" - } - }, - "endpoint_uuid": { - "uuid": "1" - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "edge-net" - } - }, - "endpoint_uuid": { - "uuid": "eth1" - } - } - ] - }, - { - "link_id": { - "link_uuid": { - "uuid": "edge-net/eth1==p4-sw1/1" - } - }, - "link_endpoint_ids": [ - { - "device_id": { - "device_uuid": { - "uuid": "edge-net" - } - }, - "endpoint_uuid": { - "uuid": "eth1" - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "p4-sw1" - } - }, - "endpoint_uuid": { - "uuid": "1" - } - } - ] - }, - { - "link_id": { - "link_uuid": { - "uuid": "p4-sw1/2==corporate-net/eth1" - } - }, - "link_endpoint_ids": [ - { - "device_id": { - "device_uuid": { - "uuid": "p4-sw1" - } - }, - "endpoint_uuid": { - "uuid": "2" - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "corporate-net" - } - }, - "endpoint_uuid": { - "uuid": "eth1" - } - } - ] - }, - { - "link_id": { - "link_uuid": { - "uuid": "corporate-net/eth1==p4-sw1/2" - } - }, - "link_endpoint_ids": [ - { - "device_id": { - "device_uuid": { - "uuid": "corporate-net" - } - }, - "endpoint_uuid": { - "uuid": "eth1" - } - }, - { - "device_id": { - "device_uuid": { - "uuid": "p4-sw1" - } - }, - "endpoint_uuid": { - "uuid": "2" - } - } - ] - } - ] -} diff --git a/src/tests/p4-int-routing-acl/test_common.py b/src/tests/p4-int-routing-acl/test_common.py deleted file mode 100644 index 8254eddc5bb5f2f2bbf4c0866a9409552872b2c8..0000000000000000000000000000000000000000 --- a/src/tests/p4-int-routing-acl/test_common.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os -from common.Constants import DEFAULT_CONTEXT_NAME -from common.proto.context_pb2 import ContextId, DeviceOperationalStatusEnum -from common.tools.object_factory.Context import json_context_id - -# Context info -CONTEXT_NAME_P4 = DEFAULT_CONTEXT_NAME -ADMIN_CONTEXT_ID = ContextId(**json_context_id(CONTEXT_NAME_P4)) - -# Device and rule cardinality variables -DEV_NB = 3 -CONNECTION_RULES = 3 -ENDPOINT_RULES = 2 -DATAPLANE_RULES_NB_INT_B1 = 5 -DATAPLANE_RULES_NB_INT_B2 = 6 -DATAPLANE_RULES_NB_INT_B3 = 8 -DATAPLANE_RULES_NB_RT_EDGE = 7 -DATAPLANE_RULES_NB_RT_CORP = 7 -DATAPLANE_RULES_NB_ACL = 1 -DATAPLANE_RULES_NB_TOT = \ - DATAPLANE_RULES_NB_INT_B1 +\ - DATAPLANE_RULES_NB_INT_B2 +\ - DATAPLANE_RULES_NB_INT_B3 +\ - DATAPLANE_RULES_NB_RT_EDGE +\ - DATAPLANE_RULES_NB_RT_CORP +\ - DATAPLANE_RULES_NB_ACL - -# Topology descriptor -DESC_TOPO = os.path.join( - os.path.dirname( - os.path.abspath(__file__) - ), - 'descriptors', 'topology.json' -) - -# Rule insertion descriptors -# The switch cannot digest all rules at once, hence we insert in batches -DESC_FILE_RULES_INSERT_INT_B1 = os.path.join( - os.path.dirname( - os.path.abspath(__file__) - ), - 'descriptors', 'rules-insert-int-b1.json' -) -DESC_FILE_RULES_INSERT_INT_B2 = os.path.join( - os.path.dirname( - os.path.abspath(__file__) - ), - 'descriptors', 'rules-insert-int-b2.json' -) -DESC_FILE_RULES_INSERT_INT_B3 = os.path.join( - os.path.dirname( - os.path.abspath(__file__) - ), - 'descriptors', 'rules-insert-int-b3.json' -) -DESC_FILE_RULES_INSERT_ROUTING_EDGE = os.path.join( - os.path.dirname( - os.path.abspath(__file__) - ), - 'descriptors', 'rules-insert-routing-edge.json' -) -DESC_FILE_RULES_INSERT_ROUTING_CORP = os.path.join( - os.path.dirname( - os.path.abspath(__file__) - ), - 'descriptors', 'rules-insert-routing-corp.json' -) -DESC_FILE_RULES_INSERT_ACL = os.path.join( - os.path.dirname( - os.path.abspath(__file__) - ), - 'descriptors', 'rules-insert-acl.json' -) - -# Rule deletion descriptor -DESC_FILE_RULES_DELETE_ALL = os.path.join( - os.path.dirname( - os.path.abspath(__file__) - ), - 'descriptors', 'rules-remove.json' -) - -def verify_number_of_rules(devices, desired_rules_nb): - # Iterate all devices - for device in devices: - # Skip non-P4 devices - if device.device_type != "p4-switch": continue - - # We want the device to be active - assert \ - device.device_operational_status == DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED - - # Get the configuration rules of this device - config_rules = device.device_config.config_rules - - # Expected rule cardinality - assert len(config_rules) == desired_rules_nb diff --git a/src/tests/tools/test_tools_p4.py b/src/tests/tools/test_tools_p4.py new file mode 100644 index 0000000000000000000000000000000000000000..c68f3518326d2cae4e0c145b3e8630c2a61f7f39 --- /dev/null +++ b/src/tests/tools/test_tools_p4.py @@ -0,0 +1,172 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import ContextId, DeviceOperationalStatusEnum,\ + DeviceDriverEnum, ServiceTypeEnum, ServiceStatusEnum +from common.tools.object_factory.Context import json_context_id + +# Context info +CONTEXT_NAME_P4 = DEFAULT_CONTEXT_NAME +ADMIN_CONTEXT_ID = ContextId(**json_context_id(CONTEXT_NAME_P4)) + +# Device and rule cardinality variables +DEV_NB = 4 +P4_DEV_NB = 1 +CONNECTION_RULES = 3 +ENDPOINT_RULES = 3 +INT_RULES = 19 +L2_RULES = 10 +L3_RULES = 4 +ACL_RULES = 1 + +DATAPLANE_RULES_NB_INT_B1 = 5 +DATAPLANE_RULES_NB_INT_B2 = 6 +DATAPLANE_RULES_NB_INT_B3 = 8 +DATAPLANE_RULES_NB_RT_EDGE = 7 +DATAPLANE_RULES_NB_RT_CORP = 7 +DATAPLANE_RULES_NB_ACL = 1 +DATAPLANE_RULES_NB_TOT = \ + DATAPLANE_RULES_NB_INT_B1 +\ + DATAPLANE_RULES_NB_INT_B2 +\ + DATAPLANE_RULES_NB_INT_B3 +\ + DATAPLANE_RULES_NB_RT_EDGE +\ + DATAPLANE_RULES_NB_RT_CORP +\ + DATAPLANE_RULES_NB_ACL + +# Service-related variables +SVC_NB = 1 +NO_SERVICES = 0 +NO_SLICES = 0 + +TEST_PATH = os.path.join( + os.path.dirname(os.path.dirname( + os.path.abspath(__file__) + )) + '/p4-fabric-tna/descriptors') +assert os.path.exists(TEST_PATH), "Invalid path to P4 SD-Fabric tests" + +# Topology descriptor +DESC_TOPO = os.path.join(TEST_PATH, 'topology.json') +assert os.path.exists(DESC_TOPO), "Invalid path to the SD-Fabric topology descriptor" + +# SBI descriptors +# The switch cannot digest all rules at once, hence we insert in batches +DESC_FILE_RULES_INSERT_INT_B1 = os.path.join(TEST_PATH, 'sbi-rules-insert-int-b1.json') +assert os.path.exists(DESC_FILE_RULES_INSERT_INT_B1),\ + "Invalid path to the SD-Fabric INT SBI descriptor (batch #1)" + +DESC_FILE_RULES_INSERT_INT_B2 = os.path.join(TEST_PATH, 'sbi-rules-insert-int-b2.json') +assert os.path.exists(DESC_FILE_RULES_INSERT_INT_B2),\ + "Invalid path to the SD-Fabric INT SBI descriptor (batch #2)" + +DESC_FILE_RULES_INSERT_INT_B3 = os.path.join(TEST_PATH, 'sbi-rules-insert-int-b3.json') +assert os.path.exists(DESC_FILE_RULES_INSERT_INT_B3),\ + "Invalid path to the SD-Fabric INT SBI descriptor (batch #3)" + +DESC_FILE_RULES_INSERT_ROUTING_EDGE = os.path.join(TEST_PATH, 'sbi-rules-insert-routing-edge.json') +assert os.path.exists(DESC_FILE_RULES_INSERT_ROUTING_EDGE),\ + "Invalid path to the SD-Fabric routing SBI descriptor (domain1-side)" + +DESC_FILE_RULES_INSERT_ROUTING_CORP = os.path.join(TEST_PATH, 'sbi-rules-insert-routing-corp.json') +assert os.path.exists(DESC_FILE_RULES_INSERT_ROUTING_CORP),\ + "Invalid path to the SD-Fabric routing SBI descriptor (domain2-side)" + +DESC_FILE_RULES_INSERT_ACL = os.path.join(TEST_PATH, 'sbi-rules-insert-acl.json') +assert os.path.exists(DESC_FILE_RULES_INSERT_ACL),\ + "Invalid path to the SD-Fabric ACL SBI descriptor" + +DESC_FILE_RULES_DELETE_ALL = os.path.join(TEST_PATH, 'sbi-rules-remove.json') +assert os.path.exists(DESC_FILE_RULES_DELETE_ALL),\ + "Invalid path to the SD-Fabric rule removal SBI descriptor" + +# Service descriptors +DESC_FILE_SERVICE_CREATE_INT = os.path.join(TEST_PATH, 'service-create-int.json') +assert os.path.exists(DESC_FILE_SERVICE_CREATE_INT),\ + "Invalid path to the SD-Fabric INT service descriptor" + +DESC_FILE_SERVICE_CREATE_L2_SIMPLE = os.path.join(TEST_PATH, 'service-create-l2-simple.json') +assert os.path.exists(DESC_FILE_SERVICE_CREATE_L2_SIMPLE),\ + "Invalid path to the SD-Fabric L2 simple service descriptor" + +DESC_FILE_SERVICE_CREATE_L3 = os.path.join(TEST_PATH, 'service-create-l3.json') +assert os.path.exists(DESC_FILE_SERVICE_CREATE_L3),\ + "Invalid path to the SD-Fabric L3 service descriptor" + +DESC_FILE_SERVICE_CREATE_ACL = os.path.join(TEST_PATH, 'service-create-acl.json') +assert os.path.exists(DESC_FILE_SERVICE_CREATE_ACL),\ + "Invalid path to the SD-Fabric ACL service descriptor" + +def identify_number_of_p4_devices(devices) -> int: + p4_dev_no = 0 + + # Iterate all devices + for device in devices: + # Skip non-P4 devices + if not DeviceDriverEnum.DEVICEDRIVER_P4 in device.device_drivers: continue + + p4_dev_no += 1 + + return p4_dev_no + +def get_number_of_rules(devices) -> int: + total_rules_no = 0 + + # Iterate all devices + for device in devices: + # Skip non-P4 devices + if not DeviceDriverEnum.DEVICEDRIVER_P4 in device.device_drivers: continue + + # We want the device to be active + assert device.device_operational_status == \ + DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED + + # Get the configuration rules of this device + config_rules = device.device_config.config_rules + + # Expected rule cardinality + total_rules_no += len(config_rules) + + return total_rules_no + +def verify_number_of_rules(devices, desired_rules_nb : int) -> None: + # Iterate all devices + for device in devices: + # Skip non-P4 devices + if not DeviceDriverEnum.DEVICEDRIVER_P4 in device.device_drivers: continue + + # We want the device to be active + assert device.device_operational_status == \ + DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED + + # Get the configuration rules of this device + config_rules = device.device_config.config_rules + + # Expected rule cardinality + assert len(config_rules) == desired_rules_nb + +def verify_active_service_type(services, target_service_type : ServiceTypeEnum) -> bool: # type: ignore + # Iterate all services + for service in services: + # Ignore services of other types + if service.service_type != target_service_type: + continue + + service_id = service.service_id + assert service_id + assert service.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE + assert service.service_config + return True + + return False diff --git a/src/webui/service/static/topology_icons/p4-switch.png b/src/webui/service/static/topology_icons/p4-switch.png index 9afcda1c0e38cb5757574024f1a3f96001b03943..178943d67e0b0a1e501d27b32a5356d8afdd4e56 100644 Binary files a/src/webui/service/static/topology_icons/p4-switch.png and b/src/webui/service/static/topology_icons/p4-switch.png differ