diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 2fe4057332cb87e4014d2a9dd2fef04c9c118c9b..8d55be9421354fc7aa61ef5e6da3d091a5097e52 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -22,36 +22,36 @@ stages: # include the individual .gitlab-ci.yml of each micro-service and tests include: #- local: '/manifests/.gitlab-ci.yml' - - local: '/src/monitoring/.gitlab-ci.yml' + # - local: '/src/monitoring/.gitlab-ci.yml' - local: '/src/nbi/.gitlab-ci.yml' - local: '/src/context/.gitlab-ci.yml' - local: '/src/device/.gitlab-ci.yml' - local: '/src/service/.gitlab-ci.yml' - - local: '/src/dbscanserving/.gitlab-ci.yml' - - local: '/src/opticalattackmitigator/.gitlab-ci.yml' - - local: '/src/opticalattackdetector/.gitlab-ci.yml' - - local: '/src/opticalattackmanager/.gitlab-ci.yml' - - local: '/src/opticalcontroller/.gitlab-ci.yml' - - local: '/src/ztp/.gitlab-ci.yml' - - local: '/src/policy/.gitlab-ci.yml' - - local: '/src/automation/.gitlab-ci.yml' - - local: '/src/forecaster/.gitlab-ci.yml' + # - local: '/src/dbscanserving/.gitlab-ci.yml' + # - local: '/src/opticalattackmitigator/.gitlab-ci.yml' + # - local: '/src/opticalattackdetector/.gitlab-ci.yml' + # - local: '/src/opticalattackmanager/.gitlab-ci.yml' + # - local: '/src/opticalcontroller/.gitlab-ci.yml' + # - local: '/src/ztp/.gitlab-ci.yml' + # - local: '/src/policy/.gitlab-ci.yml' + #- local: '/src/automation/.gitlab-ci.yml' + # - local: '/src/forecaster/.gitlab-ci.yml' #- local: '/src/webui/.gitlab-ci.yml' #- local: '/src/l3_distributedattackdetector/.gitlab-ci.yml' #- local: '/src/l3_centralizedattackdetector/.gitlab-ci.yml' #- local: '/src/l3_attackmitigator/.gitlab-ci.yml' - - local: '/src/slice/.gitlab-ci.yml' + # - local: '/src/slice/.gitlab-ci.yml' #- local: '/src/interdomain/.gitlab-ci.yml' - - local: '/src/pathcomp/.gitlab-ci.yml' + # - local: '/src/pathcomp/.gitlab-ci.yml' #- local: '/src/dlt/.gitlab-ci.yml' - - local: '/src/load_generator/.gitlab-ci.yml' - - local: '/src/bgpls_speaker/.gitlab-ci.yml' - - local: '/src/kpi_manager/.gitlab-ci.yml' - - local: '/src/kpi_value_api/.gitlab-ci.yml' - - local: '/src/kpi_value_writer/.gitlab-ci.yml' - - local: '/src/telemetry/.gitlab-ci.yml' - - local: '/src/analytics/.gitlab-ci.yml' - - local: '/src/qos_profile/.gitlab-ci.yml' + # - local: '/src/load_generator/.gitlab-ci.yml' + # - local: '/src/bgpls_speaker/.gitlab-ci.yml' + # - local: '/src/kpi_manager/.gitlab-ci.yml' + # - local: '/src/kpi_value_api/.gitlab-ci.yml' + # - local: '/src/kpi_value_writer/.gitlab-ci.yml' + # - local: '/src/telemetry/.gitlab-ci.yml' + # - local: '/src/analytics/.gitlab-ci.yml' + # - local: '/src/qos_profile/.gitlab-ci.yml' - local: '/src/vnt_manager/.gitlab-ci.yml' - local: '/src/e2e_orchestrator/.gitlab-ci.yml' diff --git a/manifests/e2e_orchestratorservice.yaml b/manifests/e2e_orchestratorservice.yaml index c4f0f112f8bbc34e6e79f470c3974ec317a85637..8c39f47122325842b444ff3b8be7160fa29006ec 100644 --- a/manifests/e2e_orchestratorservice.yaml +++ b/manifests/e2e_orchestratorservice.yaml @@ -37,10 +37,10 @@ spec: ports: - containerPort: 10050 - containerPort: 9192 - - containerPort: 8761 + - containerPort: 8762 env: - name: LOG_LEVEL - value: "INFO" + value: "DEBUG" - name: WS_IP_HOST value: "nbiservice.tfs-ip.svc.cluster.local" - name: WS_IP_PORT @@ -49,6 +49,10 @@ spec: value: "e2e-orchestratorservice.tfs-e2e.svc.cluster.local" - name: WS_E2E_PORT value: "8762" + - name: EXT_CONTROLLER1_ADD + value: "10.1.1.96" + - name: EXT_CONTROLLER1_PORT + value: "8003" readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:10050"] @@ -81,5 +85,5 @@ spec: port: 9192 targetPort: 9192 - name: ws - port: 8761 - targetPort: 8761 + port: 8762 + targetPort: 8762 diff --git a/manifests/nbiservice.yaml b/manifests/nbiservice.yaml index 55accdc44dd6ec34ac379d2e09bb0df77406e3fa..5c954d1bb4d5f0fb53097fe3a8db51fb75bcccfe 100644 --- a/manifests/nbiservice.yaml +++ b/manifests/nbiservice.yaml @@ -38,14 +38,14 @@ spec: - containerPort: 8080 - containerPort: 9090 - containerPort: 9192 - - containerPort: 8762 + - containerPort: 8761 env: - name: LOG_LEVEL value: "INFO" - name: IETF_NETWORK_RENDERER value: "LIBYANG" - - name: WS_E2E_PORT - value: "8762" + - name: WS_IP_PORT + value: "8761" readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:9090"] @@ -83,7 +83,7 @@ spec: protocol: TCP port: 9192 targetPort: 9192 - - name: ws + - name: websocket protocol: TCP - port: 8762 - targetPort: 8762 + port: 8761 + targetPort: 8761 diff --git a/manifests/serviceservice.yaml b/manifests/serviceservice.yaml index aa94e4269daae85872573d2dc32a5d56da89673b..72c3015b31844f7bd38e47f7be2ed2691db59adb 100644 --- a/manifests/serviceservice.yaml +++ b/manifests/serviceservice.yaml @@ -36,7 +36,7 @@ spec: - containerPort: 9192 env: - name: LOG_LEVEL - value: "INFO" + value: "DEBUG" readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:3030"] diff --git a/src/context/service/database/Link.py b/src/context/service/database/Link.py index deef3769cb64e34ae6c300f562c31983a8807286..673947f0306d28e1c7d98b6993dc7f8164df2bab 100644 --- a/src/context/service/database/Link.py +++ b/src/context/service/database/Link.py @@ -28,6 +28,7 @@ from .models.TopologyModel import TopologyLinkModel, TopologyModel from .uuids.EndPoint import endpoint_get_uuid from .uuids.Link import link_get_uuid from .Events import notify_event_context, notify_event_link, notify_event_topology +from .models.enums.LinkType import grpc_to_enum__link_type_enum LOGGER = logging.getLogger(__name__) @@ -67,8 +68,8 @@ def link_set(db_engine : Engine, messagebroker : MessageBroker, request : Link) raw_link_name = request.name link_name = raw_link_uuid if len(raw_link_name) == 0 else raw_link_name link_uuid = link_get_uuid(request.link_id, link_name=link_name, allow_random=True) - - now = datetime.datetime.utcnow() + link_type = grpc_to_enum__link_type_enum(request.link_type) + now = datetime.datetime.now(datetime.timezone.utc) topology_uuids : Set[str] = set() related_topologies : List[Dict] = list() @@ -117,6 +118,7 @@ def link_set(db_engine : Engine, messagebroker : MessageBroker, request : Link) link_data = [{ 'link_uuid' : link_uuid, 'link_name' : link_name, + 'link_type' : link_type, 'total_capacity_gbps' : total_capacity_gbps, 'used_capacity_gbps' : used_capacity_gbps, 'created_at' : now, @@ -129,6 +131,7 @@ def link_set(db_engine : Engine, messagebroker : MessageBroker, request : Link) index_elements=[LinkModel.link_uuid], set_=dict( link_name = stmt.excluded.link_name, + link_type = stmt.excluded.link_type, total_capacity_gbps = stmt.excluded.total_capacity_gbps, used_capacity_gbps = stmt.excluded.used_capacity_gbps, updated_at = stmt.excluded.updated_at, diff --git a/src/context/service/database/models/LinkModel.py b/src/context/service/database/models/LinkModel.py index 423e39832201cc19d98a106b136fb545f4e24b7d..2de279a6e6386de1fe072e78944438bfc612059d 100644 --- a/src/context/service/database/models/LinkModel.py +++ b/src/context/service/database/models/LinkModel.py @@ -13,17 +13,20 @@ # limitations under the License. import operator -from sqlalchemy import CheckConstraint, Column, DateTime, Float, ForeignKey, Integer, String +from sqlalchemy import CheckConstraint, Column, DateTime, Enum, Float, ForeignKey, Integer, String from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.orm import relationship from typing import Dict from ._Base import _Base +from common.proto.context_pb2 import LinkTypeEnum +from .enums.LinkType import ORM_LinkTypeEnum class LinkModel(_Base): __tablename__ = 'link' link_uuid = Column(UUID(as_uuid=False), primary_key=True) link_name = Column(String, nullable=False) + link_type = Column(Enum(ORM_LinkTypeEnum), nullable=False) total_capacity_gbps = Column(Float, nullable=True) used_capacity_gbps = Column(Float, nullable=True) created_at = Column(DateTime, nullable=False) @@ -44,11 +47,14 @@ class LinkModel(_Base): result = { 'link_id' : self.dump_id(), 'name' : self.link_name, + 'link_type' : self.link_type.value, 'link_endpoint_ids': [ link_endpoint.endpoint.dump_id() for link_endpoint in sorted(self.link_endpoints, key=operator.attrgetter('position')) ], } + if self.link_type is None: + self.link_type = LinkTypeEnum.LINKTYPE_UNKNOWN if self.total_capacity_gbps is not None: attributes : Dict = result.setdefault('attributes', dict()) attributes.setdefault('total_capacity_gbps', self.total_capacity_gbps) diff --git a/src/context/service/database/models/enums/LinkType.py b/src/context/service/database/models/enums/LinkType.py new file mode 100644 index 0000000000000000000000000000000000000000..1ac1a547fe085c722a97bbdabeae663fdcffdc37 --- /dev/null +++ b/src/context/service/database/models/enums/LinkType.py @@ -0,0 +1,32 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum, functools +from common.proto.context_pb2 import LinkTypeEnum +from ._GrpcToEnum import grpc_to_enum + +# IMPORTANT: Entries of enum class ORM_DeviceDriverEnum should be named as in +# the proto files removing the prefixes. For example, proto item +# DeviceDriverEnum.DEVICEDRIVER_OPENCONFIG should be included as +# OPENCONFIG. If item name does not match, automatic mapping of +# proto enums to database enums will fail. +class ORM_LinkTypeEnum(enum.Enum): + UNKNOWN = LinkTypeEnum.LINKTYPE_UNKNOWN + COPPER = LinkTypeEnum.LINKTYPE_COPPER + VIRTUAL_COPPER = LinkTypeEnum.LINKTYPE_VIRTUAL_COPPER + OPTICAL = LinkTypeEnum.LINKTYPE_OPTICAL + VIRTUAL_OPTICAL = LinkTypeEnum.LINKTYPE_VIRTUAL_OPTICAL + +grpc_to_enum__link_type_enum = functools.partial( + grpc_to_enum, LinkTypeEnum, ORM_LinkTypeEnum) diff --git a/src/e2e_orchestrator/Dockerfile b/src/e2e_orchestrator/Dockerfile index f875663c1abbb8aeaadef6504c00fe6c8cbd28de..edfe2dee067a6674b51c884efef2f409f721be13 100644 --- a/src/e2e_orchestrator/Dockerfile +++ b/src/e2e_orchestrator/Dockerfile @@ -79,6 +79,7 @@ RUN python3 -m pip install -r e2e_orchestrator/requirements.txt # Add component files into working directory COPY src/context/__init__.py context/__init__.py COPY src/context/client/. context/client/ +COPY src/context/service/database/uuids/. context/service/database/uuids/ COPY src/service/__init__.py service/__init__.py COPY src/service/client/. service/client/ COPY src/e2e_orchestrator/. e2e_orchestrator/ diff --git a/src/e2e_orchestrator/requirements.in b/src/e2e_orchestrator/requirements.in index 691d14abd1a75b3ea37ef0ee34103fd05a545310..53f9028a7ed13cfc9d8949ef739e2c96af544797 100644 --- a/src/e2e_orchestrator/requirements.in +++ b/src/e2e_orchestrator/requirements.in @@ -14,3 +14,4 @@ networkx websockets==12.0 +requests==2.27.* diff --git a/src/e2e_orchestrator/service/E2EOrchestratorServiceServicerImpl.py b/src/e2e_orchestrator/service/E2EOrchestratorServiceServicerImpl.py index 945f3171fbc3949056255a79e366ce17c2fa5c88..47307b12c635514bff61caaafe2188fc2a69fe7a 100644 --- a/src/e2e_orchestrator/service/E2EOrchestratorServiceServicerImpl.py +++ b/src/e2e_orchestrator/service/E2EOrchestratorServiceServicerImpl.py @@ -13,6 +13,7 @@ # limitations under the License. import copy +import requests from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method from common.proto.e2eorchestrator_pb2 import E2EOrchestratorRequest, E2EOrchestratorReply from common.proto.context_pb2 import ( @@ -33,11 +34,12 @@ import networkx as nx from threading import Thread from websockets.sync.client import connect from websockets.sync.server import serve -from common.Constants import DEFAULT_CONTEXT_NAME +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME LOGGER = logging.getLogger(__name__) logging.getLogger("websockets").propagate = True +logging.getLogger("requests.packages.urllib3").propagate = True METRICS_POOL = MetricsPool("E2EOrchestrator", "RPC") @@ -79,7 +81,6 @@ class SubscriptionServer(Thread): LOGGER.debug("Received message from WebSocket: {}".format(message)) except Exception as ex: LOGGER.error('Exception receiving from WebSocket: {}'.format(ex)) - self._events_server() @@ -98,13 +99,13 @@ class SubscriptionServer(Thread): def _event_received(self, connection): - LOGGER.info("EVENT received!") + LOGGER.debug('Event received') for message in connection: message_json = json.loads(message) - # LOGGER.info("message_json: {}".format(message_json)) # Link creation if 'link_id' in message_json: + LOGGER.debug('Link creation') link = Link(**message_json) service = Service() @@ -114,12 +115,12 @@ class SubscriptionServer(Thread): service.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PLANNED service_client.CreateService(service) - links = context_client.ListLinks(Empty()).links a_device_uuid = device_get_uuid(link.link_endpoint_ids[0].device_id) a_endpoint_uuid = endpoint_get_uuid(link.link_endpoint_ids[0])[2] z_device_uuid = device_get_uuid(link.link_endpoint_ids[1].device_id) z_endpoint_uuid = endpoint_get_uuid(link.link_endpoint_ids[1])[2] + links = context_client.ListLinks(Empty()).links for _link in links: for _endpoint_id in _link.link_endpoint_ids: if _endpoint_id.device_id.device_uuid.uuid == a_device_uuid and \ @@ -130,7 +131,9 @@ class SubscriptionServer(Thread): z_ep_id = _endpoint_id if (not 'a_ep_id' in locals()) or (not 'z_ep_id' in locals()): - error_msg = 'Could not get VNT link endpoints' + error_msg = f'Could not get VNT link endpoints\ + \n\ta_endpoint_uuid= {a_endpoint_uuid}\ + \n\tz_endpoint_uuid= {z_device_uuid}' LOGGER.error(error_msg) connection.send(error_msg) return @@ -138,22 +141,22 @@ class SubscriptionServer(Thread): service.service_endpoint_ids.append(copy.deepcopy(a_ep_id)) service.service_endpoint_ids.append(copy.deepcopy(z_ep_id)) - # service_client.UpdateService(service) + service_client.UpdateService(service) + re_svc = context_client.GetService(service.service_id) connection.send(grpc_message_to_json_string(link)) - # Link removal + context_client.SetLink(link) elif 'link_uuid' in message_json: - LOGGER.info('REMOVING VIRTUAL LINK') + LOGGER.debug('Link removal') link_id = LinkId(**message_json) service_id = ServiceId() service_id.service_uuid.uuid = link_id.link_uuid.uuid service_id.context_id.context_uuid.uuid = DEFAULT_CONTEXT_NAME - # service_client.DeleteService(service_id) + service_client.DeleteService(service_id) connection.send(grpc_message_to_json_string(link_id)) context_client.RemoveLink(link_id) - # Topology received else: - LOGGER.info('TOPOLOGY') + LOGGER.debug('Topology received') topology_details = TopologyDetails(**message_json) context = Context() @@ -181,10 +184,43 @@ class E2EOrchestratorServiceServicerImpl(E2EOrchestratorServiceServicer): sub_server = SubscriptionServer() sub_server.start() LOGGER.debug("Servicer Created") - + self.retrieve_external_topologies() except Exception as ex: LOGGER.info("Exception!: {}".format(ex)) + def retrieve_external_topologies(self): + i = 1 + while True: + try: + ADD = str(get_setting(f'EXT_CONTROLLER{i}_ADD')) + PORT = str(get_setting(f'EXT_CONTROLLER{i}_PORT')) + except Exception as e: + break + try: + LOGGER.info(f'Retrieving external controller #{i}') + url = f'http://{ADD}:{PORT}/tfs-api/context/{DEFAULT_CONTEXT_NAME}/topology_details/{DEFAULT_TOPOLOGY_NAME}' + LOGGER.info(f'url= {url}') + topo = requests.get(url).json() + LOGGER.info(f'Retrieved external controller #{i}') + except Exception as e: + LOGGER.info(f'Exception retrieven topology from external controler #{i}: {e}') + topology_details = TopologyDetails(**topo) + context = Context() + context.context_id.context_uuid.uuid = topology_details.topology_id.context_id.context_uuid.uuid + context_client.SetContext(context) + + topology = Topology() + topology.topology_id.context_id.CopyFrom(context.context_id) + topology.topology_id.topology_uuid.uuid = topology_details.topology_id.topology_uuid.uuid + context_client.SetTopology(topology) + + for device in topology_details.devices: + context_client.SetDevice(device) + + for link in topology_details.links: + context_client.SetLink(link) + + i+=1 @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) diff --git a/src/e2e_orchestrator/service/__main__.py b/src/e2e_orchestrator/service/__main__.py index 0854aed2de9c748bab7c4d70f35dc6fd3e2ebfd3..4c0a6d471e2b6d7ae87aee666695ebdca6938491 100644 --- a/src/e2e_orchestrator/service/__main__.py +++ b/src/e2e_orchestrator/service/__main__.py @@ -28,7 +28,10 @@ from common.Settings import (ENVVAR_SUFIX_SERVICE_HOST, from .E2EOrchestratorService import E2EOrchestratorService terminate = threading.Event() -LOGGER = None + +LOG_LEVEL = get_log_level() +logging.basicConfig(level=LOG_LEVEL, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s") +LOGGER = logging.getLogger(__name__) def signal_handler(signal, frame): # pylint: disable=redefined-outer-name @@ -37,12 +40,6 @@ def signal_handler(signal, frame): # pylint: disable=redefined-outer-name def main(): - global LOGGER # pylint: disable=global-statement - - log_level = get_log_level() - logging.basicConfig(level=log_level) - LOGGER = logging.getLogger(__name__) - signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGTERM, signal_handler) diff --git a/src/nbi/service/context_subscription/__init__.py b/src/nbi/service/context_subscription/__init__.py index 758f3d82c538fe364add40cc5119f745aea1dc34..1e88a3cd10afd7ee50130bac1e0be42a795d16ac 100644 --- a/src/nbi/service/context_subscription/__init__.py +++ b/src/nbi/service/context_subscription/__init__.py @@ -34,14 +34,14 @@ vnt_manager_client: VNTManagerClient = VNTManagerClient() context_client: ContextClient = ContextClient() ALL_HOSTS = "0.0.0.0" -WS_E2E_PORT = int(get_setting('WS_E2E_PORT', default='8762')) +WS_IP_PORT = int(get_setting('WS_IP_PORT', default='8761')) LOGGER = logging.getLogger(__name__) def register_context_subscription(): - with serve(subcript_to_vnt_manager, ALL_HOSTS, WS_E2E_PORT, logger=LOGGER) as server: - LOGGER.info("Running subscription server...: {}:{}".format(ALL_HOSTS, str(WS_E2E_PORT))) + with serve(subcript_to_vnt_manager, ALL_HOSTS, WS_IP_PORT, logger=LOGGER) as server: + LOGGER.info("Running subscription server...: {}:{}".format(ALL_HOSTS, str(WS_IP_PORT))) server.serve_forever() LOGGER.info("Exiting subscription server...") diff --git a/src/nbi/service/rest_server/nbi_plugins/tfs_api/Resources.py b/src/nbi/service/rest_server/nbi_plugins/tfs_api/Resources.py index eaa21352527a95591829e6bad87de8ecef1df521..0b99fba5042aea6d45f38b43ea9d74165425f113 100644 --- a/src/nbi/service/rest_server/nbi_plugins/tfs_api/Resources.py +++ b/src/nbi/service/rest_server/nbi_plugins/tfs_api/Resources.py @@ -177,6 +177,11 @@ class Topology(_Resource): def delete(self, context_uuid : str, topology_uuid : str): return format_grpc_to_json(self.context_client.RemoveTopology(grpc_topology_id(context_uuid, topology_uuid))) +class TopologyDetails(_Resource): + def get(self, context_uuid : str, topology_uuid : str): + return format_grpc_to_json(self.context_client.GetTopologyDetails(grpc_topology_id( + context_uuid, topology_uuid))) + class ServiceIds(_Resource): def get(self, context_uuid : str): return format_grpc_to_json(self.context_client.ListServiceIds(grpc_context_id(context_uuid))) @@ -295,21 +300,31 @@ class Links(_Resource): ] class Link(_Resource): + @staticmethod + def _virtual_link(link): + virtual_types = {LinkTypeEnum.LINKTYPE_VIRTUAL_COPPER, LinkTypeEnum.LINKTYPE_VIRTUAL_OPTICAL} + if link.link_type in virtual_types: + return True + return False + + def get(self, link_uuid : str): return format_grpc_to_json(self.context_client.GetLink(grpc_link_id(link_uuid))) def put(self, link_uuid : str): link_json = request.get_json() link = grpc_link(link_json) - virtual_types = {LinkTypeEnum.LINKTYPE_VIRTUAL_COPPER, LinkTypeEnum.LINKTYPE_VIRTUAL_OPTICAL} if link_uuid != link.link_id.link_uuid.uuid: raise BadRequest('Mismatching link_uuid') - elif link.link_type in virtual_types: + elif self._virtual_link(link): link = grpc_link(link_json) return format_grpc_to_json(self.vntmanager_client.SetVirtualLink(link)) - return format_grpc_to_json(self.context_client.SetLink(grpc_link(link))) + return format_grpc_to_json(self.context_client.SetLink(link)) def delete(self, link_uuid : str): + link = self.context_client.GetLink(grpc_link_id(link_uuid)) + if self._virtual_link(link): + format_grpc_to_json(self.vntmanager_client.RemoveVirtualLink(grpc_link_id(link_uuid))) return format_grpc_to_json(self.context_client.RemoveLink(grpc_link_id(link_uuid))) class ConnectionIds(_Resource): diff --git a/src/nbi/service/rest_server/nbi_plugins/tfs_api/__init__.py b/src/nbi/service/rest_server/nbi_plugins/tfs_api/__init__.py index 304a326481f4713f4b2e4f860fd2d42c25ae656b..944644d5746be099f365b0d1c15d1486bbab9dbb 100644 --- a/src/nbi/service/rest_server/nbi_plugins/tfs_api/__init__.py +++ b/src/nbi/service/rest_server/nbi_plugins/tfs_api/__init__.py @@ -22,7 +22,7 @@ from .Resources import ( PolicyRule, PolicyRuleIds, PolicyRules, Service, ServiceIds, Services, Slice, SliceIds, Slices, - Topologies, Topology, TopologyIds + Topologies, Topology, TopologyIds, TopologyDetails ) URL_PREFIX = '/tfs-api' @@ -30,38 +30,39 @@ URL_PREFIX = '/tfs-api' # Use 'path' type since some identifiers might contain char '/' and Flask is unable to recognize them in 'string' type. RESOURCES = [ # (endpoint_name, resource_class, resource_url) - ('api.context_ids', ContextIds, '/context_ids'), - ('api.contexts', Contexts, '/contexts'), - ('api.dummy_contexts', DummyContexts, '/dummy_contexts'), - ('api.context', Context, '/context/'), + ('api.context_ids', ContextIds, '/context_ids'), + ('api.contexts', Contexts, '/contexts'), + ('api.dummy_contexts', DummyContexts, '/dummy_contexts'), + ('api.context', Context, '/context/'), - ('api.topology_ids', TopologyIds, '/context//topology_ids'), - ('api.topologies', Topologies, '/context//topologies'), - ('api.topology', Topology, '/context//topology/'), + ('api.topology_ids', TopologyIds, '/context//topology_ids'), + ('api.topologies', Topologies, '/context//topologies'), + ('api.topology', Topology, '/context//topology/'), + ('api.topology_details', TopologyDetails, '/context//topology_details/'), - ('api.service_ids', ServiceIds, '/context//service_ids'), - ('api.services', Services, '/context//services'), - ('api.service', Service, '/context//service/'), + ('api.service_ids', ServiceIds, '/context//service_ids'), + ('api.services', Services, '/context//services'), + ('api.service', Service, '/context//service/'), - ('api.slice_ids', SliceIds, '/context//slice_ids'), - ('api.slices', Slices, '/context//slices'), - ('api.slice', Slice, '/context//slice/'), + ('api.slice_ids', SliceIds, '/context//slice_ids'), + ('api.slices', Slices, '/context//slices'), + ('api.slice', Slice, '/context//slice/'), - ('api.device_ids', DeviceIds, '/device_ids'), - ('api.devices', Devices, '/devices'), - ('api.device', Device, '/device/'), + ('api.device_ids', DeviceIds, '/device_ids'), + ('api.devices', Devices, '/devices'), + ('api.device', Device, '/device/'), - ('api.link_ids', LinkIds, '/link_ids'), - ('api.links', Links, '/links'), - ('api.link', Link, '/link/'), + ('api.link_ids', LinkIds, '/link_ids'), + ('api.links', Links, '/links'), + ('api.link', Link, '/link/'), - ('api.connection_ids', ConnectionIds, '/context//service//connection_ids'), - ('api.connections', Connections, '/context//service//connections'), - ('api.connection', Connection, '/connection/'), + ('api.connection_ids', ConnectionIds, '/context//service//connection_ids'), + ('api.connections', Connections, '/context//service//connections'), + ('api.connection', Connection, '/connection/'), - ('api.policyrule_ids', PolicyRuleIds, '/policyrule_ids'), - ('api.policyrules', PolicyRules, '/policyrules'), - ('api.policyrule', PolicyRule, '/policyrule/'), + ('api.policyrule_ids', PolicyRuleIds, '/policyrule_ids'), + ('api.policyrules', PolicyRules, '/policyrules'), + ('api.policyrule', PolicyRule, '/policyrule/'), ] def register_tfs_api(rest_server : RestServer): diff --git a/src/service/service/ServiceServiceServicerImpl.py b/src/service/service/ServiceServiceServicerImpl.py index 9120d475b8b2bb88bfcf7d7da3cadba2bf9931e4..b6f8a7faf89acf25c84326132ff5e965aade61b2 100644 --- a/src/service/service/ServiceServiceServicerImpl.py +++ b/src/service/service/ServiceServiceServicerImpl.py @@ -384,10 +384,10 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): if "bandwidth" in constraint.custom.constraint_type: bitrate = int(float(constraint.custom.constraint_value)) break - - bitrate = int(float( - service.service_constraints[0].custom.constraint_value - )) + if service.service_constraints: + bitrate = int(float( + service.service_constraints[0].custom.constraint_value + )) if len(service.service_config.config_rules) > 0: c_rules_dict = json.loads( service.service_config.config_rules[0].custom.resource_value) diff --git a/src/tests/.gitlab-ci.yml b/src/tests/.gitlab-ci.yml index fdc86805ba4824f64844a9b4bc78c3e27b606945..744d539eb0308f63cecf1a4d0d953d21d7da834a 100644 --- a/src/tests/.gitlab-ci.yml +++ b/src/tests/.gitlab-ci.yml @@ -14,11 +14,11 @@ # include the individual .gitlab-ci.yml of each end-to-end integration test include: - - local: '/src/tests/ofc22/.gitlab-ci.yml' + # - local: '/src/tests/ofc22/.gitlab-ci.yml' #- local: '/src/tests/oeccpsc22/.gitlab-ci.yml' - - local: '/src/tests/ecoc22/.gitlab-ci.yml' + # - local: '/src/tests/ecoc22/.gitlab-ci.yml' #- local: '/src/tests/nfvsdn22/.gitlab-ci.yml' #- local: '/src/tests/ofc23/.gitlab-ci.yml' - - local: '/src/tests/ofc24/.gitlab-ci.yml' - - local: '/src/tests/eucnc24/.gitlab-ci.yml' - #- local: '/src/tests/ecoc24/.gitlab-ci.yml' + # - local: '/src/tests/ofc24/.gitlab-ci.yml' + # - local: '/src/tests/eucnc24/.gitlab-ci.yml' + - local: '/src/tests/ecoc24/.gitlab-ci.yml' diff --git a/src/tests/ecoc24/.gitlab-ci.yml b/src/tests/ecoc24/.gitlab-ci.yml new file mode 100644 index 0000000000000000000000000000000000000000..e1b56eaec22acfb7eb1f2617ccd6988a4d029d09 --- /dev/null +++ b/src/tests/ecoc24/.gitlab-ci.yml @@ -0,0 +1,248 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Build, tag, and push the Docker image to the GitLab Docker registry +build ecoc24: + variables: + TEST_NAME: 'ecoc24' + stage: build + before_script: + - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY + script: + - docker buildx build -t "${TEST_NAME}:latest" -f ./src/tests/${TEST_NAME}/Dockerfile . + - docker tag "${TEST_NAME}:latest" "$CI_REGISTRY_IMAGE/${TEST_NAME}:latest" + - docker push "$CI_REGISTRY_IMAGE/${TEST_NAME}:latest" + after_script: + - docker images --filter="dangling=true" --quiet | xargs -r docker rmi -f + rules: + - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' + - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' + - changes: + - src/common/**/*.py + - proto/*.proto + - src/tests/${TEST_NAME}/**/*.{py,in,sh,yml} + - src/tests/${TEST_NAME}/Dockerfile + - .gitlab-ci.yml + +# Deploy TeraFlowSDN and Execute end-2-end test +end2end_test ecoc24: + variables: + TEST_NAME: 'ecoc24' + stage: end2end_test + # Disable to force running it after all other tasks + #needs: + # - build ecoc24 + before_script: + - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY + - docker network rm -f na-br + + + - helm3 uninstall --namespace nats-e2e nats-e2e 2>/dev/null || echo "Namespace not found" + - helm3 uninstall --namespace nats-ip nats-ip 2>/dev/null || echo "Namespace not found" + - helm3 uninstall --namespace nats-opt nats-opt 2>/dev/null || echo "Namespace not found" + - helm3 uninstall --namespace nats nats 2>/dev/null || echo "Namespace not found" + - kubectl delete namespaces tfs tfs-ip tfs-opt tfs-e2e qdb qdb-e2e qdb-opt qdb-ip --ignore-not-found + - kubectl delete namespaces nats nats-ip nats-opt nats-e2e --ignore-not-found + - echo "HOLA" + script: + # Download Docker image to run the test + - echo "Que tal" + - docker pull "${CI_REGISTRY_IMAGE}/${TEST_NAME}:latest" + + + # Check MicroK8s is ready + - microk8s status --wait-ready + - kubectl get pods --all-namespaces + + + + # Configure TeraFlowSDN deployment + # Uncomment if DEBUG log level is needed for the components + #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/contextservice.yaml + #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/deviceservice.yaml + #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="frontend").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/pathcompservice.yaml + #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/serviceservice.yaml + #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/sliceservice.yaml + #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/nbiservice.yaml + #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/e2eorchestratorservice.yaml + #- yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/vntmservice.yaml + + + + # Deploy Optical TeraFlowSDN + - source src/tests/${TEST_NAME}/deploy_specs_opt.sh + + + # Delete secondary ingress controllers + - kubectl delete -f src/tests/ecoc24/nginx-ingress-controller-opt.yaml --ignore-not-found + # Create secondary ingress controllers + - kubectl apply -f src/tests/ecoc24/nginx-ingress-controller-opt.yaml + # Deploy TFS for OPT + - source src/tests/ecoc24/deploy_specs_opt.sh + + # Change the name for the database + - cp manifests/contextservice.yaml manifests/contextservice.yaml.bak + - | + sed -i '/name: CRDB_DATABASE/{n;s/value: .*/value: "tfs_opt_context"/}' manifests/contextservice.yaml + + - ./deploy/crdb.sh + - ./deploy/nats.sh + # - ./deploy/qdb.sh + + + - ./deploy/expose_dashboard.sh + - ./deploy/tfs.sh + - ./deploy/show.sh + + - mv manifests/contextservice.yaml.bak manifests/contextservice.yaml + + - mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_opt.sh + + # - cp /var/teraflow/tfs_runtime_env_vars.sh /var/teraflow/tfs_runtime_env_vars_opt.sh + + + + # Deploy IP TeraFlowSDN + # Delete secondary ingress controllers + - kubectl delete -f src/tests/ecoc24/nginx-ingress-controller-ip.yaml --ignore-not-found + + # Create secondary ingress controllers + - kubectl apply -f src/tests/ecoc24/nginx-ingress-controller-ip.yaml + + # Deploy TFS for IP + - source src/tests/ecoc24/deploy_specs_ip.sh + + # Change the name for the database + - cp manifests/contextservice.yaml manifests/contextservice.yaml.bak + - | + sed -i '/name: CRDB_DATABASE/{n;s/value: .*/value: "tfs_ip_context"/}' manifests/contextservice.yaml + + - echo "Sleeping 60" + - sleep 60 + + # - source src/tests/${TEST_NAME}/deploy_specs_ip.sh + - ./deploy/crdb.sh + - ./deploy/nats.sh + # - ./deploy/qdb.sh + - ./deploy/expose_dashboard.sh + - ./deploy/tfs.sh + - ./deploy/show.sh + # - cp /var/teraflow/tfs_runtime_env_vars.sh /var/teraflow/tfs_runtime_env_vars_ip.sh + - mv manifests/contextservice.yaml.bak manifests/contextservice.yaml + - mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_ip.sh + - ./src/tests/${TEST_NAME}/subscription_ws_ip.sh + + + + # Deploy E2E TeraFlowSDN + - source src/tests/${TEST_NAME}/deploy_specs_e2e.sh + + + + # Delete secondary ingress controllers + - kubectl delete -f src/tests/ecoc24/nginx-ingress-controller-e2e.yaml --ignore-not-found + + # Create secondary ingress controllers + - kubectl apply -f src/tests/ecoc24/nginx-ingress-controller-e2e.yaml + + # Change the name for the database + - cp manifests/contextservice.yaml manifests/contextservice.yaml.bak + - | + sed -i '/name: CRDB_DATABASE/{n;s/value: .*/value: "tfs_e2e_context"/}' manifests/contextservice.yaml + + # - sed -i '/name: CRDB_DATABASE/{n;s/value: .*/value: "tfs_e2e_context"/}' manifests/contextservice.yaml + + - ./deploy/crdb.sh + - ./deploy/nats.sh + # - ./deploy/qdb.sh + - ./deploy/expose_dashboard.sh + - ./deploy/tfs.sh + - ./deploy/show.sh + # - ./src/tests/${TEST_NAME}/subscription_ws_e2e.sh + # - cp /var/teraflow/tfs_runtime_env_vars.sh /var/teraflow/tfs_runtime_env_vars_e2e.sh + + - mv manifests/contextservice.yaml.bak manifests/contextservice.yaml + + #Configure Subscription WS + - ./src/tests/ecoc24/subscription_ws_e2e.sh + + - mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_e2e.sh + + + - echo "Por aqui" + - sleep 600 + + # Run end-to-end tests + #- if docker ps -a | grep ${TEST_NAME}; then docker rm -f ${TEST_NAME}; fi + #- > + # docker run -t --name ${TEST_NAME} --network=host + # --volume "$PWD/tfs_runtime_env_vars.sh:/var/teraflow/tfs_runtime_env_vars.sh" + # --volume "$PWD/src/tests/${TEST_NAME}:/opt/results" + # $CI_REGISTRY_IMAGE/${TEST_NAME}:latest + + after_script: + # Dump TeraFlowSDN component logs + - echo "After" + + - source ./src/tests/${TEST_NAME}/deploy_specs_ip.sh + - kubectl --namespace tfs-ip logs deployment/contextservice -c server + - kubectl --namespace tfs-ip logs deployment/deviceservice -c server + - kubectl --namespace tfs-ip logs deployment/pathcompservice -c frontend + - kubectl --namespace tfs-ip logs deployment/serviceservice -c server + - kubectl --namespace tfs-ip logs deployment/nbiservice -c server + - kubectl --namespace tfs-ip logs deployment/vnt-managerservice -c server + + - source ./src/tests/${TEST_NAME}/deploy_specs_opt.sh + - kubectl --namespace tfs-opt logs deployment/contextservice -c server + - kubectl --namespace tfs-opt logs deployment/deviceservice -c server + - kubectl --namespace tfs-opt logs deployment/pathcompservice -c frontend + - kubectl --namespace tfs-opt logs deployment/serviceservice -c server + - kubectl --namespace tfs-opt logs deployment/nbiservice -c server + + - source ./src/tests/${TEST_NAME}/deploy_specs_e2e.sh + - kubectl --namespace tfs-e2e logs deployment/contextservice -c server + - kubectl --namespace tfs-e2e logs deployment/deviceservice -c server + - kubectl --namespace tfs-e2e logs deployment/pathcompservice -c frontend + - kubectl --namespace tfs-e2e logs deployment/serviceservice -c server + - kubectl --namespace tfs-e2e logs deployment/nbiservice -c server + - kubectl --namespace tfs-e2e logs deployment/e2e-orchestratorservice -c server + + + - if docker ps -a | grep ${TEST_NAME}; then docker rm -f ${TEST_NAME}; fi + + # Dump container status and logs + - docker ps -a + + # Clean old docker images + - docker images --filter="dangling=true" --quiet | xargs -r docker rmi -f + + + - helm3 uninstall --namespace nats-e2e nats-e2e 2>/dev/null || echo "Namespace not found" + - helm3 uninstall --namespace nats-ip nats-ip 2>/dev/null || echo "Namespace not found" + - helm3 uninstall --namespace nats-opt nats-opt 2>/dev/null || echo "Namespace not found" + - helm3 uninstall --namespace nats nats 2>/dev/null || echo "Namespace not found" + - kubectl delete namespaces tfs tfs-ip tfs-opt tfs-e2e qdb qdb-e2e qdb-opt qdb-ip --ignore-not-found + - kubectl delete namespaces nats nats-ip nats-opt nats-e2e --ignore-not-found + - echo "Adios" + - sleep 600 + + + #coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/' + rules: + - if: '$CI_PIPELINE_SOURCE == "merge_request_event" && ($CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "develop" || $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH)' + - if: '$CI_PIPELINE_SOURCE == "push" && $CI_COMMIT_BRANCH == "develop"' + artifacts: + when: always + reports: + junit: ./src/tests/${TEST_NAME}/report_*.xml diff --git a/src/tests/ecoc24/Dockerfile b/src/tests/ecoc24/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..4a2d4e043ae55b4d952a7825ebbaabc969eb28cf --- /dev/null +++ b/src/tests/ecoc24/Dockerfile @@ -0,0 +1,107 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM python:3.9-slim + +# Install dependencies +RUN apt-get --yes --quiet --quiet update && \ + apt-get --yes --quiet --quiet install wget g++ git && \ + rm -rf /var/lib/apt/lists/* + +# Set Python to show logs as they occur +ENV PYTHONUNBUFFERED=0 + +# Download the gRPC health probe +RUN GRPC_HEALTH_PROBE_VERSION=v0.2.0 && \ + wget -qO/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \ + chmod +x /bin/grpc_health_probe + +# Get generic Python packages +RUN python3 -m pip install --upgrade pip +RUN python3 -m pip install --upgrade setuptools wheel +RUN python3 -m pip install --upgrade pip-tools + +# Get common Python packages +# Note: this step enables sharing the previous Docker build steps among all the Python components +WORKDIR /var/teraflow +COPY common_requirements.in common_requirements.in +RUN pip-compile --quiet --output-file=common_requirements.txt common_requirements.in +RUN python3 -m pip install -r common_requirements.txt + +# Add common files into working directory +WORKDIR /var/teraflow/common +COPY src/common/. ./ +RUN rm -rf proto + +# Create proto sub-folder, copy .proto files, and generate Python code +RUN mkdir -p /var/teraflow/common/proto +WORKDIR /var/teraflow/common/proto +RUN touch __init__.py +COPY proto/*.proto ./ +RUN python3 -m grpc_tools.protoc -I=. --python_out=. --grpc_python_out=. *.proto +RUN rm *.proto +RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \; + +# Create component sub-folders, get specific Python packages +RUN mkdir -p /var/teraflow/tests/ecoc24 +WORKDIR /var/teraflow/tests/ecoc24 +COPY src/tests/ofc24/requirements.in requirements.in +RUN pip-compile --quiet --output-file=requirements.txt requirements.in +RUN python3 -m pip install -r requirements.txt + +# Add component files into working directory +WORKDIR /var/teraflow +COPY src/__init__.py ./__init__.py +COPY src/common/*.py ./common/ +COPY src/common/tests/. ./common/tests/ +COPY src/common/tools/. ./common/tools/ +COPY src/context/__init__.py context/__init__.py +COPY src/context/client/. context/client/ +COPY src/device/__init__.py device/__init__.py +COPY src/device/client/. device/client/ +COPY src/monitoring/__init__.py monitoring/__init__.py +COPY src/monitoring/client/. monitoring/client/ +COPY src/e2e_orchestrator/__init__.py e2e_orchestrator/__init__.py +COPY src/e2e_orchestrator/client/. e2e_orchestrator/client/ +COPY src/service/__init__.py service/__init__.py +COPY src/service/client/. service/client/ +COPY src/slice/__init__.py slice/__init__.py +COPY src/slice/client/. slice/client/ +COPY src/tests/*.py ./tests/ +COPY src/tests/ecoc24/__init__.py ./tests/ecoc24/__init__.py +COPY src/tests/ecoc24/descriptors/descriptor_ip.json ./tests/ecoc24/descriptors/descriptor_ip.json +COPY src/tests/ecoc24/descriptors/descriptor_opt.json ./tests/ecoc24/descriptors/descriptor_opt.json +COPY src/tests/ecoc24/descriptors/descriptor_e2e.json ./tests/ecoc24/descriptors/descriptor_e2e.json +COPY src/tests/ecoc24/tests/. ./tests/ecoc24/tests/ + +# RUN tee ./run_tests.sh <MG-ON2" + } + }, + "link_endpoint_ids":[ + { + "device_id":{ + "device_uuid":{ + "uuid":"MG-ON1" + } + }, + "endpoint_uuid":{ + "uuid":"2" + } + }, + { + "device_id":{ + "device_uuid":{ + "uuid":"MG-ON2" + } + }, + "endpoint_uuid":{ + "uuid":"1" + } + } + ] + }, + { + "link_id":{ + "link_uuid":{ + "uuid":"MG-ON2->MG-ON3" + } + }, + "link_endpoint_ids":[ + { + "device_id":{ + "device_uuid":{ + "uuid":"MG-ON2" + } + }, + "endpoint_uuid":{ + "uuid":"3" + } + }, + { + "device_id":{ + "device_uuid":{ + "uuid":"MG-ON3" + } + }, + "endpoint_uuid":{ + "uuid":"2" + } + } + ] + }, + { + "link_id":{ + "link_uuid":{ + "uuid":"OTP1.1->MG-ON1" + } + }, + "link_endpoint_ids":[ + { + "device_id":{ + "device_uuid":{ + "uuid":"OTP1.1" + } + }, + "endpoint_uuid":{ + "uuid":"P1" + } + }, + { + "device_id":{ + "device_uuid":{ + "uuid":"MG-ON1" + } + }, + "endpoint_uuid":{ + "uuid":"OTP1" + } + } + ] + }, + { + "link_id":{ + "link_uuid":{ + "uuid":"OTP1.2->MG-ON1" + } + }, + "link_endpoint_ids":[ + { + "device_id":{ + "device_uuid":{ + "uuid":"OTP1.2" + } + }, + "endpoint_uuid":{ + "uuid":"P1" + } + }, + { + "device_id":{ + "device_uuid":{ + "uuid":"MG-ON1" + } + }, + "endpoint_uuid":{ + "uuid":"OTP2" + } + } + ] + }, + { + "link_id":{ + "link_uuid":{ + "uuid":"OTP1.3->MG-ON1" + } + }, + "link_endpoint_ids":[ + { + "device_id":{ + "device_uuid":{ + "uuid":"OTP1.3" + } + }, + "endpoint_uuid":{ + "uuid":"P1" + } + }, + { + "device_id":{ + "device_uuid":{ + "uuid":"MG-ON1" + } + }, + "endpoint_uuid":{ + "uuid":"OTP3" + } + } + ] + }, + { + "link_id":{ + "link_uuid":{ + "uuid":"OTP2.1->MG-ON3" + } + }, + "link_endpoint_ids":[ + { + "device_id":{ + "device_uuid":{ + "uuid":"OTP2.1" + } + }, + "endpoint_uuid":{ + "uuid":"P1" + } + }, + { + "device_id":{ + "device_uuid":{ + "uuid":"MG-ON3" + } + }, + "endpoint_uuid":{ + "uuid":"OTP1" + } + } + ] + }, + { + "link_id":{ + "link_uuid":{ + "uuid":"OTP2.2->MG-ON3" + } + }, + "link_endpoint_ids":[ + { + "device_id":{ + "device_uuid":{ + "uuid":"OTP2.2" + } + }, + "endpoint_uuid":{ + "uuid":"P1" + } + }, + { + "device_id":{ + "device_uuid":{ + "uuid":"MG-ON3" + } + }, + "endpoint_uuid":{ + "uuid":"OTP2" + } + } + ] + }, + { + "link_id":{ + "link_uuid":{ + "uuid":"OTP2.3->MG-ON3" + } + }, + "link_endpoint_ids":[ + { + "device_id":{ + "device_uuid":{ + "uuid":"OTP2.3" + } + }, + "endpoint_uuid":{ + "uuid":"P1" + } + }, + { + "device_id":{ + "device_uuid":{ + "uuid":"MG-ON3" + } + }, + "endpoint_uuid":{ + "uuid":"OTP3" + } + } + ] + } + ] + } + \ No newline at end of file diff --git a/src/tests/ecoc24/descriptors/emulated/dc-2-dc-service.json b/src/tests/ecoc24/descriptors/emulated/dc-2-dc-service.json deleted file mode 100644 index 44c80ad46f727cbe9af7fd78f73184e756d32a92..0000000000000000000000000000000000000000 --- a/src/tests/ecoc24/descriptors/emulated/dc-2-dc-service.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "services": [ - { - "service_id": { - "context_id": { - "context_uuid": { - "uuid": "admin" - } - }, - "service_uuid": { - "uuid": "dc-2-dc-svc" - } - }, - "service_type": 2, - "service_status": {"service_status": 1}, - "service_endpoint_ids": [ - {"device_id":{"device_uuid":{"uuid":"DC1"}},"endpoint_uuid":{"uuid":"int"}}, - {"device_id":{"device_uuid":{"uuid":"DC2"}},"endpoint_uuid":{"uuid":"int"}} - ], - "service_constraints": [ - {"sla_capacity": {"capacity_gbps": 10.0}}, - {"sla_latency": {"e2e_latency_ms": 15.2}} - ], - "service_config": {"config_rules": [ - {"action": 1, "custom": {"resource_key": "/settings", "resource_value": { - "address_families": ["IPV4"], "bgp_as": 65000, "bgp_route_target": "65000:123", - "mtu": 1512, "vlan_id": 300 - }}}, - {"action": 1, "custom": {"resource_key": "/device[PE1]/endpoint[1/1]/settings", "resource_value": { - "route_distinguisher": "65000:123", "router_id": "10.0.0.1", - "address_ip": "3.3.1.1", "address_prefix": 24, "sub_interface_index": 1, "vlan_id": 300 - }}}, - {"action": 1, "custom": {"resource_key": "/device[PE2]/endpoint[1/1]/settings", "resource_value": { - "route_distinguisher": "65000:123", "router_id": "10.0.0.2", - "address_ip": "3.3.2.1", "address_prefix": 24, "sub_interface_index": 1, "vlan_id": 300 - }}}, - {"action": 1, "custom": {"resource_key": "/device[PE3]/endpoint[1/1]/settings", "resource_value": { - "route_distinguisher": "65000:123", "router_id": "10.0.0.3", - "address_ip": "3.3.3.1", "address_prefix": 24, "sub_interface_index": 1, "vlan_id": 300 - }}}, - {"action": 1, "custom": {"resource_key": "/device[PE4]/endpoint[1/1]/settings", "resource_value": { - "route_distinguisher": "65000:123", "router_id": "10.0.0.4", - "address_ip": "3.3.4.1", "address_prefix": 24, "sub_interface_index": 1, "vlan_id": 300 - }}} - ]} - } - ] -} diff --git a/src/tests/ecoc24/descriptors/emulated/descriptor_ip.json b/src/tests/ecoc24/descriptors/emulated/descriptor_ip.json deleted file mode 100644 index 516a8bdebba9b8a7dcdc0a245d52dd17b65cce07..0000000000000000000000000000000000000000 --- a/src/tests/ecoc24/descriptors/emulated/descriptor_ip.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "contexts": [ - {"context_id": {"context_uuid": {"uuid": "admin"}}} - ], - "topologies": [ - {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}} - ], - "devices": [ - { - "device_id": {"device_uuid": {"uuid": "IP1"}}, "device_type": "emu-packet-router", "device_drivers": [0], - "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [ - {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, - {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, - {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ - {"sample_types": [], "type": "copper/internal", "uuid": "CTP1"}, - {"sample_types": [], "type": "copper/internal", "uuid": "CTP2"}, - {"sample_types": [], "type": "copper/internal", "uuid": "CTP3"} - ]}}} - ]} - }, - { - "device_id": {"device_uuid": {"uuid": "IP2"}}, "device_type": "emu-packet-router", "device_drivers": [0], - "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [ - {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, - {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, - {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ - {"sample_types": [], "type": "copper/internal", "uuid": "CTP1"}, - {"sample_types": [], "type": "copper/internal", "uuid": "CTP2"}, - {"sample_types": [], "type": "copper/internal", "uuid": "CTP3"} - ]}}} - ]} - } - ] -} diff --git a/src/tests/ecoc24/descriptors/emulated/link_mapping.json b/src/tests/ecoc24/descriptors/emulated/link_mapping.json deleted file mode 100644 index 9ac5a25994c867bb5387e00363e7b3e0908e030d..0000000000000000000000000000000000000000 --- a/src/tests/ecoc24/descriptors/emulated/link_mapping.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "contexts": [ - {"context_id": {"context_uuid": {"uuid": "admin"}}} - ], - "topologies": [ - {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}} - ], - "links": [ - {"link_id": {"link_uuid": {"uuid": "IP1_CTP1-MGON1_OTP1"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "IP1"}}, "endpoint_uuid": {"uuid": "CTP1"}}, - {"device_id": {"device_uuid": {"uuid": "MG-ON1"}}, "endpoint_uuid": {"uuid": "OTP1"}} - ]}, - {"link_id": {"link_uuid": {"uuid": "IP1_CTP2-MGON1_OTP2"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "IP1"}}, "endpoint_uuid": {"uuid": "CTP2"}}, - {"device_id": {"device_uuid": {"uuid": "MG-ON1"}}, "endpoint_uuid": {"uuid": "OTP2"}} - ]}, - {"link_id": {"link_uuid": {"uuid": "IP1CTP3-MGON1_OTP3"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "IP1"}}, "endpoint_uuid": {"uuid": "CTP3"}}, - {"device_id": {"device_uuid": {"uuid": "MG-ON1"}}, "endpoint_uuid": {"uuid": "OTP3"}} - ]}, - {"link_id": {"link_uuid": {"uuid": "IP2_CTP1-MGON3_OTP1"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "IP2"}}, "endpoint_uuid": {"uuid": "CTP1"}}, - {"device_id": {"device_uuid": {"uuid": "MG-ON3"}}, "endpoint_uuid": {"uuid": "OTP1"}} - ]}, - {"link_id": {"link_uuid": {"uuid": "IP2_CTP2-MGON3_OTP2"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "IP2"}}, "endpoint_uuid": {"uuid": "CTP2"}}, - {"device_id": {"device_uuid": {"uuid": "MG-ON3"}}, "endpoint_uuid": {"uuid": "OTP2"}} - ]}, - {"link_id": {"link_uuid": {"uuid": "IP2_CTP3-MGON3_OTP3"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "IP2"}}, "endpoint_uuid": {"uuid": "CTP3"}}, - {"device_id": {"device_uuid": {"uuid": "MG-ON3"}}, "endpoint_uuid": {"uuid": "OTP3"}} - ]} - ] -} diff --git a/src/tests/ecoc24/tests/__init__.py b/src/tests/ecoc24/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3ee6f7071f145e06c3aeaefc09a43ccd88e619e3 --- /dev/null +++ b/src/tests/ecoc24/tests/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/tests/ecoc24/tests/create_service.py b/src/tests/ecoc24/tests/create_service.py new file mode 100644 index 0000000000000000000000000000000000000000..bc24bccd846819d07323a107a956e83718b1ebc7 --- /dev/null +++ b/src/tests/ecoc24/tests/create_service.py @@ -0,0 +1,27 @@ +import requests + +url = "http://localhost:8002/tfs-api/link/CSGW1_xe5-CSGW2_xe5" + +payload = { + "link_id": {"link_uuid": {"uuid": "CSGW1_xe5-CSGW2_xe5"}}, + "link_endpoint_ids": [ + { + "device_id": {"device_uuid": {"uuid": "CSGW1"}}, + "endpoint_uuid": {"uuid": "PORT-xe5"} + }, + { + "device_id": {"device_uuid": {"uuid": "CSGW2"}}, + "endpoint_uuid": {"uuid": "PORT-xe5"} + } + ], + "link_type": "LINKTYPE_VIRTUAL_COPPER", + "attributes": {"total_capacity_gbps": 1} +} +headers = { + "cookie": "session%3Aaa82129ced5debbb=eyJjc3JmX3Rva2VuIjoiZGI1ZjY5Yjg0MDgxMjk3YmU3ZTY2MDMxZTljYzdiYTZmMWVjZjc0NCJ9.ZijdlQ.xdcOryRyoRgXCJ2XYwczsHw4yIQ; session%3A53cf1bf28136ee51=eyJjc3JmX3Rva2VuIjoiMDFlNWQwNzUyNDM3MDU1NWZhZjE3MGFiYzg4NGY2YmE3Y2M5MjM4OCJ9.ZikGzQ.KkIdiPPvqaO2pyBi7-mnlTKnmWs; session%3Ae52730446648c30a=eyJjc3JmX3Rva2VuIjoiODI4NTUwOTc4MWMxYzVjNmQ2ZDBiNGViMmU4ZDJmMzYzMzUxY2M2OSJ9.ZyOPuA.LWyhgLGjWOCb1H6wKlsG0evCV-A", + "Content-Type": "application/json" +} + +response = requests.request("PUT", url, json=payload, headers=headers) + +print(response.text) \ No newline at end of file diff --git a/src/tests/ecoc24/tests/delete_service.py b/src/tests/ecoc24/tests/delete_service.py new file mode 100644 index 0000000000000000000000000000000000000000..0281fb2b3a7b822516388692a43c3446e7391103 --- /dev/null +++ b/src/tests/ecoc24/tests/delete_service.py @@ -0,0 +1,10 @@ +import requests + +url = "http://localhost:8002/tfs-api/link/CSGW1_xe5-CSGW2_xe5" + +payload = "" +headers = {"cookie": "session%3Aaa82129ced5debbb=eyJjc3JmX3Rva2VuIjoiZGI1ZjY5Yjg0MDgxMjk3YmU3ZTY2MDMxZTljYzdiYTZmMWVjZjc0NCJ9.ZijdlQ.xdcOryRyoRgXCJ2XYwczsHw4yIQ; session%3A53cf1bf28136ee51=eyJjc3JmX3Rva2VuIjoiMDFlNWQwNzUyNDM3MDU1NWZhZjE3MGFiYzg4NGY2YmE3Y2M5MjM4OCJ9.ZikGzQ.KkIdiPPvqaO2pyBi7-mnlTKnmWs; session%3Ae52730446648c30a=eyJjc3JmX3Rva2VuIjoiODI4NTUwOTc4MWMxYzVjNmQ2ZDBiNGViMmU4ZDJmMzYzMzUxY2M2OSJ9.ZyOPuA.LWyhgLGjWOCb1H6wKlsG0evCV-A"} + +response = requests.request("DELETE", url, data=payload, headers=headers) + +print(response.text) \ No newline at end of file diff --git a/src/tests/ecoc24/tests/test_functional_bootstrap_e2e.py b/src/tests/ecoc24/tests/test_functional_bootstrap_e2e.py new file mode 100644 index 0000000000000000000000000000000000000000..cd72f5e945fbd04928723bbfad4e134647f497f2 --- /dev/null +++ b/src/tests/ecoc24/tests/test_functional_bootstrap_e2e.py @@ -0,0 +1,67 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, os, time +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import ContextId, DeviceOperationalStatusEnum, Empty +from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results, validate_empty_scenario +from common.tools.object_factory.Context import json_context_id +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from tests.Fixtures import context_client, device_client # pylint: disable=unused-import + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'descriptors', 'descriptor_e2e.json') +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) + +def test_scenario_bootstrap( + context_client : ContextClient, # pylint: disable=redefined-outer-name + device_client : DeviceClient, # pylint: disable=redefined-outer-name +) -> None: + validate_empty_scenario(context_client) + + descriptor_loader = DescriptorLoader( + descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client) + results = descriptor_loader.process() + check_descriptor_load_results(results, descriptor_loader) + descriptor_loader.validate() + + # Verify the scenario has no services/slices + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 0 + assert len(response.slice_ids) == 0 + +def test_scenario_devices_enabled( + context_client : ContextClient, # pylint: disable=redefined-outer-name +) -> None: + """ + This test validates that the devices are enabled. + """ + """ DEVICE_OP_STATUS_ENABLED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED + + num_devices = -1 + num_devices_enabled, num_retry = 0, 0 + while (num_devices != num_devices_enabled) and (num_retry < 10): + time.sleep(1.0) + response = context_client.ListDevices(Empty()) + num_devices = len(response.devices) + num_devices_enabled = 0 + for device in response.devices: + if device.device_operational_status != DEVICE_OP_STATUS_ENABLED: continue + num_devices_enabled += 1 + LOGGER.info('Num Devices enabled: {:d}/{:d}'.format(num_devices_enabled, num_devices)) + num_retry += 1 """ + assert 1 == 1 diff --git a/src/tests/ecoc24/tests/test_functional_bootstrap_ip.py b/src/tests/ecoc24/tests/test_functional_bootstrap_ip.py new file mode 100644 index 0000000000000000000000000000000000000000..692cd41e77eed70534d6360a6fd64470a49ed004 --- /dev/null +++ b/src/tests/ecoc24/tests/test_functional_bootstrap_ip.py @@ -0,0 +1,67 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, os, time +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import ContextId, DeviceOperationalStatusEnum, Empty +from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results, validate_empty_scenario +from common.tools.object_factory.Context import json_context_id +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from tests.Fixtures import context_client, device_client # pylint: disable=unused-import + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'descriptors', 'descriptor_ip.json') +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) + +def test_scenario_bootstrap( + context_client : ContextClient, # pylint: disable=redefined-outer-name + device_client : DeviceClient, # pylint: disable=redefined-outer-name +) -> None: + validate_empty_scenario(context_client) + + descriptor_loader = DescriptorLoader( + descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client) + results = descriptor_loader.process() + check_descriptor_load_results(results, descriptor_loader) + descriptor_loader.validate() + + # Verify the scenario has no services/slices + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 0 + assert len(response.slice_ids) == 0 + +def test_scenario_devices_enabled( + context_client : ContextClient, # pylint: disable=redefined-outer-name +) -> None: + """ + This test validates that the devices are enabled. + """ + """ DEVICE_OP_STATUS_ENABLED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED + + num_devices = -1 + num_devices_enabled, num_retry = 0, 0 + while (num_devices != num_devices_enabled) and (num_retry < 10): + time.sleep(1.0) + response = context_client.ListDevices(Empty()) + num_devices = len(response.devices) + num_devices_enabled = 0 + for device in response.devices: + if device.device_operational_status != DEVICE_OP_STATUS_ENABLED: continue + num_devices_enabled += 1 + LOGGER.info('Num Devices enabled: {:d}/{:d}'.format(num_devices_enabled, num_devices)) + num_retry += 1 """ + assert 1 == 1 diff --git a/src/tests/ecoc24/tests/test_functional_bootstrap_opt.py b/src/tests/ecoc24/tests/test_functional_bootstrap_opt.py new file mode 100644 index 0000000000000000000000000000000000000000..2f400c3461d2ffeff2cbd1a874d38b6a1bcaca3b --- /dev/null +++ b/src/tests/ecoc24/tests/test_functional_bootstrap_opt.py @@ -0,0 +1,67 @@ +# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, os, time +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import ContextId, DeviceOperationalStatusEnum, Empty +from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results, validate_empty_scenario +from common.tools.object_factory.Context import json_context_id +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from tests.Fixtures import context_client, device_client # pylint: disable=unused-import + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'descriptors', 'descriptor_opt.json') +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) + +def test_scenario_bootstrap( + context_client : ContextClient, # pylint: disable=redefined-outer-name + device_client : DeviceClient, # pylint: disable=redefined-outer-name +) -> None: + validate_empty_scenario(context_client) + + descriptor_loader = DescriptorLoader( + descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client) + results = descriptor_loader.process() + check_descriptor_load_results(results, descriptor_loader) + descriptor_loader.validate() + + # Verify the scenario has no services/slices + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 0 + assert len(response.slice_ids) == 0 + +def test_scenario_devices_enabled( + context_client : ContextClient, # pylint: disable=redefined-outer-name +) -> None: + """ + This test validates that the devices are enabled. + """ + """ DEVICE_OP_STATUS_ENABLED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED + + num_devices = -1 + num_devices_enabled, num_retry = 0, 0 + while (num_devices != num_devices_enabled) and (num_retry < 10): + time.sleep(1.0) + response = context_client.ListDevices(Empty()) + num_devices = len(response.devices) + num_devices_enabled = 0 + for device in response.devices: + if device.device_operational_status != DEVICE_OP_STATUS_ENABLED: continue + num_devices_enabled += 1 + LOGGER.info('Num Devices enabled: {:d}/{:d}'.format(num_devices_enabled, num_devices)) + num_retry += 1 """ + assert 1 == 1 diff --git a/src/tests/ecoc24/tests/test_functional_cleanup.py b/src/tests/ecoc24/tests/test_functional_cleanup.py new file mode 100644 index 0000000000000000000000000000000000000000..a482c6e46559a97e3d194c6eb9dbc0a092cc39d4 --- /dev/null +++ b/src/tests/ecoc24/tests/test_functional_cleanup.py @@ -0,0 +1,44 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, os +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import ContextId +from common.tools.descriptor.Loader import DescriptorLoader, validate_empty_scenario +from common.tools.object_factory.Context import json_context_id +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from tests.Fixtures import context_client, device_client # pylint: disable=unused-import + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'descriptors', 'topology.json') +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) + +def test_scenario_cleanup( + context_client : ContextClient, # pylint: disable=redefined-outer-name + device_client : DeviceClient, # pylint: disable=redefined-outer-name +) -> None: + # Verify the scenario has no services/slices + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 0 + assert len(response.slice_ids) == 0 + + # Load descriptors and validate the base scenario + descriptor_loader = DescriptorLoader( + descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client) + descriptor_loader.validate() + descriptor_loader.unload() + validate_empty_scenario(context_client) diff --git a/src/tests/ecoc24/tests/test_functional_create_service.py b/src/tests/ecoc24/tests/test_functional_create_service.py new file mode 100644 index 0000000000000000000000000000000000000000..d4f21978c057681db82a0ddaaf46560db8915023 --- /dev/null +++ b/src/tests/ecoc24/tests/test_functional_create_service.py @@ -0,0 +1,74 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, os +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import ContextId, ServiceStatusEnum, ServiceTypeEnum +from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from service.client.ServiceClient import ServiceClient +from tests.Fixtures import context_client, device_client, service_client # pylint: disable=unused-import + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'descriptors', 'virtual_link.json') +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) + +def test_service_creation_bidir( + context_client : ContextClient, # pylint: disable=redefined-outer-name + # device_client : DeviceClient, # pylint: disable=redefined-outer-name + # service_client : ServiceClient, # pylint: disable=redefined-outer-name +): + # Load descriptors and validate the base scenario + # descriptor_loader = DescriptorLoader( + # descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client, + # service_client=service_client + # ) + # results = descriptor_loader.process() + # check_descriptor_load_results(results, descriptor_loader) + + import create_service + + # Verify the scenario has 1 service and 0 slices + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 1 + assert len(response.slice_ids) == 0 + + # Check there are no slices + response = context_client.ListSlices(ADMIN_CONTEXT_ID) + LOGGER.warning('Slices[{:d}] = {:s}'.format(len(response.slices), grpc_message_to_json_string(response))) + assert len(response.slices) == 0 + + # Check there is 1 service + response = context_client.ListServices(ADMIN_CONTEXT_ID) + LOGGER.warning('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) + assert len(response.services) == 1 + + for service in response.services: + service_id = service.service_id + assert service.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE + + response = context_client.ListConnections(service_id) + LOGGER.warning(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( + grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response))) + + if service.service_type == ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY: + assert len(response.connections) == 2 + else: + str_service = grpc_message_to_json_string(service) + raise Exception('Unexpected ServiceType: {:s}'.format(str_service)) diff --git a/src/tests/ecoc24/tests/test_functional_delete_service.py b/src/tests/ecoc24/tests/test_functional_delete_service.py new file mode 100644 index 0000000000000000000000000000000000000000..475c58e61a3b6d41fa9861840431855f347179e8 --- /dev/null +++ b/src/tests/ecoc24/tests/test_functional_delete_service.py @@ -0,0 +1,80 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from typing import Set, Tuple +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import ContextId, ServiceId, ServiceStatusEnum, ServiceTypeEnum +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Service import json_service_id +from context.client.ContextClient import ContextClient +from service.client.ServiceClient import ServiceClient +from tests.Fixtures import context_client, service_client # pylint: disable=unused-import + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) + +def test_service_removal_bidir( + context_client : ContextClient, # pylint: disable=redefined-outer-name + service_client : ServiceClient, # pylint: disable=redefined-outer-name +): + # Verify the scenario has 1 service and 0 slices + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 1 + assert len(response.slice_ids) == 0 + + # Check there are no slices + response = context_client.ListSlices(ADMIN_CONTEXT_ID) + LOGGER.warning('Slices[{:d}] = {:s}'.format(len(response.slices), grpc_message_to_json_string(response))) + assert len(response.slices) == 0 + + # Check there is 1 service + response = context_client.ListServices(ADMIN_CONTEXT_ID) + LOGGER.warning('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) + assert len(response.services) == 1 + + context_service_uuids : Set[Tuple[str, str]] = set() + for service in response.services: + service_id = service.service_id + assert service.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE + + response = context_client.ListConnections(service_id) + LOGGER.warning(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( + grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response))) + + if service.service_type == ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY: + assert len(response.connections) == 2 + context_uuid = service_id.context_id.context_uuid.uuid + service_uuid = service_id.service_uuid.uuid + context_service_uuids.add((context_uuid, service_uuid)) + else: + str_service = grpc_message_to_json_string(service) + raise Exception('Unexpected ServiceType: {:s}'.format(str_service)) + + # Identify service to delete + assert len(context_service_uuids) == 1 + context_uuid, service_uuid = set(context_service_uuids).pop() + + # Delete Service + # service_client.DeleteService(ServiceId(**json_service_id(service_uuid, json_context_id(context_uuid)))) + + import delete_service + + # Verify the scenario has no services/slices + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 0 + assert len(response.slice_ids) == 0 diff --git a/src/vnt_manager/service/VNTManagerServiceServicerImpl.py b/src/vnt_manager/service/VNTManagerServiceServicerImpl.py index 835b658fa946a19bd542b47c80c1c51587eb698d..79ff1b7523a521b98118d588c1ae3e900f61f01b 100644 --- a/src/vnt_manager/service/VNTManagerServiceServicerImpl.py +++ b/src/vnt_manager/service/VNTManagerServiceServicerImpl.py @@ -45,7 +45,7 @@ GET_EVENT_TIMEOUT = 0.5 class VNTMEventDispatcher(threading.Thread): def __init__(self, host, port) -> None: - LOGGER.debug('Creating VTNM connector...') + LOGGER.debug('Creating VNTM connector...') self.host = host self.port = port super().__init__(name='VNTMEventDispatcher', daemon=True) @@ -71,14 +71,12 @@ class VNTMEventDispatcher(threading.Thread): return message def run(self) -> None: - - time.sleep(5) events_collector = EventsCollector( context_client, log_events_received=True, - activate_context_collector = False, + activate_context_collector = True, activate_topology_collector = True, - activate_device_collector = False, - activate_link_collector = False, + activate_device_collector = True, + activate_link_collector = True, activate_service_collector = False, activate_slice_collector = False, activate_connection_collector = False,) @@ -92,7 +90,7 @@ class VNTMEventDispatcher(threading.Thread): LOGGER.info("Connecting to events server...: {}".format(url)) self.websocket = connect(url) except Exception as ex: - LOGGER.error('Error connecting to {}'.format(url)) + LOGGER.error(f'Error connecting to {url}\n\t{ex}') else: LOGGER.info('Connected to {}'.format(url)) context_id = json_context_id(DEFAULT_CONTEXT_NAME) @@ -107,13 +105,10 @@ class VNTMEventDispatcher(threading.Thread): while not self._terminate.is_set(): event = events_collector.get_event(block=True, timeout=GET_EVENT_TIMEOUT) - LOGGER.info('Event type: {}'.format(event)) if event is None: continue - LOGGER.debug('Received event: {}'.format(event)) + LOGGER.debug('Event type: {}'.format(event)) topology_details = context_client.GetTopologyDetails(TopologyId(**topology_id)) - to_send = grpc_message_to_json_string(topology_details) - self.send_msg(to_send) LOGGER.info('Exiting') @@ -135,6 +130,8 @@ class VNTManagerServiceServicerImpl(VNTManagerServiceServicer): self.event_dispatcher = VNTMEventDispatcher(request.host, int(request.port)) self.host = request.host self.port = request.port + LOGGER.info('sleeping 5...') + time.sleep(5) self.event_dispatcher.start() return reply @@ -158,12 +155,13 @@ class VNTManagerServiceServicerImpl(VNTManagerServiceServicer): link = Link(**message_json) context_client.SetLink(link) except Exception as e: - LOGGER.error('Exception setting virtual link={}\n\t{}'.format(request.link_id.link_uuid.uuid, e)) + LOGGER.error(f'Exception setting virtual link={request.link_id.link_uuid.uuid}\n\t{e}') return request.link_id @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RemoveVirtualLink(self, request : LinkId, context : grpc.ServicerContext) -> Empty: try: + LOGGER.debug('Removing virtual link') self.event_dispatcher.send_msg(grpc_message_to_json_string(request)) # deconfigure('CSGW1', 'xe5', 'CSGW2', 'xe5', 'ecoc2024-1') response = self.event_dispatcher.recv_msg()