diff --git a/deploy/kafka.sh b/deploy/kafka.sh index e426069034364d2d7a745b0633d531c7db811176..4f522df1aedad38ea0e2e99c32906f658c5db761 100755 --- a/deploy/kafka.sh +++ b/deploy/kafka.sh @@ -42,7 +42,7 @@ export KFK_REDEPLOY=${KFK_REDEPLOY:-""} mkdir -p ${TMP_MANIFESTS_FOLDER} function kafka_deploy() { - # copy zookeeper and kafka manifest files to temporary manifest location + # copy zookeeper and kafka manifest files to temporary manifest location cp "${KFK_MANIFESTS_PATH}/${KFK_ZOOKEEPER_MANIFEST}" "${TMP_MANIFESTS_FOLDER}/${KFK_ZOOKEEPER_MANIFEST}" cp "${KFK_MANIFESTS_PATH}/${KFK_MANIFEST}" "${TMP_MANIFESTS_FOLDER}/${KFK_MANIFEST}" @@ -57,11 +57,12 @@ function kafka_deploy() { # Kafka zookeeper service should be deployed before the kafka service kubectl --namespace ${KFK_NAMESPACE} apply -f "${TMP_MANIFESTS_FOLDER}/${KFK_ZOOKEEPER_MANIFEST}" - KFK_ZOOKEEPER_SERVICE="zookeeper-service" # this command may be replaced with command to extract service name automatically - KFK_ZOOKEEPER_IP=$(kubectl --namespace ${KFK_NAMESPACE} get service ${KFK_ZOOKEEPER_SERVICE} -o 'jsonpath={.spec.clusterIP}') + #KFK_ZOOKEEPER_SERVICE="zookeeper-service" # this command may be replaced with command to extract service name automatically + #KFK_ZOOKEEPER_IP=$(kubectl --namespace ${KFK_NAMESPACE} get service ${KFK_ZOOKEEPER_SERVICE} -o 'jsonpath={.spec.clusterIP}') # Kafka service should be deployed after the zookeeper service - sed -i "s/<ZOOKEEPER_INTERNAL_IP>/${KFK_ZOOKEEPER_IP}/" "${TMP_MANIFESTS_FOLDER}/$KFK_MANIFEST" + #sed -i "s/<ZOOKEEPER_INTERNAL_IP>/${KFK_ZOOKEEPER_IP}/" "${TMP_MANIFESTS_FOLDER}/$KFK_MANIFEST" + sed -i "s/<KAFKA_NAMESPACE>/${KFK_NAMESPACE}/" "${TMP_MANIFESTS_FOLDER}/$KFK_MANIFEST" # echo ">>> Deploying Apache Kafka Broker" kubectl --namespace ${KFK_NAMESPACE} apply -f "${TMP_MANIFESTS_FOLDER}/$KFK_MANIFEST" diff --git a/ecoc24 b/ecoc24 deleted file mode 120000 index 37c97d3a77727ec098303cc9a5e04d711a30fcec..0000000000000000000000000000000000000000 --- a/ecoc24 +++ /dev/null @@ -1 +0,0 @@ -src/tests/ecoc24/ \ No newline at end of file diff --git a/manifests/e2e_orchestratorservice.yaml b/manifests/e2e_orchestratorservice.yaml index 8c39f47122325842b444ff3b8be7160fa29006ec..c628df3a715beded87a6b57838b7b6404f830cc6 100644 --- a/manifests/e2e_orchestratorservice.yaml +++ b/manifests/e2e_orchestratorservice.yaml @@ -41,18 +41,6 @@ spec: env: - name: LOG_LEVEL value: "DEBUG" - - name: WS_IP_HOST - value: "nbiservice.tfs-ip.svc.cluster.local" - - name: WS_IP_PORT - value: "8761" - - name: WS_E2E_HOST - value: "e2e-orchestratorservice.tfs-e2e.svc.cluster.local" - - name: WS_E2E_PORT - value: "8762" - - name: EXT_CONTROLLER1_ADD - value: "10.1.1.96" - - name: EXT_CONTROLLER1_PORT - value: "8003" readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:10050"] diff --git a/manifests/kafka/01-zookeeper.yaml b/manifests/kafka/01-zookeeper.yaml index c0e87ae0c6f12ed56702220f9e15fbe90b3b9c31..f2cfb4f384303951983113a32680c0ad8ec65e89 100644 --- a/manifests/kafka/01-zookeeper.yaml +++ b/manifests/kafka/01-zookeeper.yaml @@ -19,14 +19,13 @@ metadata: labels: app: zookeeper-service name: zookeeper-service - namespace: kafka spec: - type: NodePort + type: ClusterIP ports: - name: zookeeper-port port: 2181 - nodePort: 30181 - targetPort: 2181 + #nodePort: 30181 + #targetPort: 2181 selector: app: zookeeper --- @@ -36,7 +35,6 @@ metadata: labels: app: zookeeper name: zookeeper - namespace: kafka spec: replicas: 1 selector: @@ -52,4 +50,4 @@ spec: imagePullPolicy: IfNotPresent name: zookeeper ports: - - containerPort: 2181 \ No newline at end of file + - containerPort: 2181 diff --git a/manifests/kafka/02-kafka.yaml b/manifests/kafka/02-kafka.yaml index 8400f5944193458ccdad8be5dbc189f8f40cdd7b..066f0151af73ed911efdc83b627f6d74e6d9e896 100644 --- a/manifests/kafka/02-kafka.yaml +++ b/manifests/kafka/02-kafka.yaml @@ -19,7 +19,6 @@ metadata: labels: app: kafka-broker name: kafka-service - namespace: kafka spec: ports: - port: 9092 @@ -32,7 +31,6 @@ metadata: labels: app: kafka-broker name: kafka-broker - namespace: kafka spec: replicas: 1 selector: @@ -49,11 +47,12 @@ spec: - name: KAFKA_BROKER_ID value: "1" - name: KAFKA_ZOOKEEPER_CONNECT - value: <ZOOKEEPER_INTERNAL_IP>:2181 + #value: <ZOOKEEPER_INTERNAL_IP>:2181 + value: zookeeper-service.<KAFKA_NAMESPACE>.svc.cluster.local:2181 - name: KAFKA_LISTENERS value: PLAINTEXT://:9092 - name: KAFKA_ADVERTISED_LISTENERS - value: PLAINTEXT://kafka-service.kafka.svc.cluster.local:9092 + value: PLAINTEXT://kafka-service.<KAFKA_NAMESPACE>.svc.cluster.local:9092 image: wurstmeister/kafka imagePullPolicy: IfNotPresent name: kafka-broker diff --git a/manifests/nbiservice.yaml b/manifests/nbiservice.yaml index d2d65c719d23780c31228936d9fb4893a7b247f9..2bf31678fe8f872555a144c740cdb5a0f906fa73 100644 --- a/manifests/nbiservice.yaml +++ b/manifests/nbiservice.yaml @@ -39,25 +39,28 @@ spec: #- containerPort: 9192 env: - name: LOG_LEVEL - value: "INFO" + value: "DEBUG" - name: FLASK_ENV value: "production" # change to "development" if developing - name: IETF_NETWORK_RENDERER value: "LIBYANG" + envFrom: + - secretRef: + name: kfk-kpi-data readinessProbe: httpGet: path: /healthz port: 8080 - initialDelaySeconds: 5 + initialDelaySeconds: 30 # NBI's gunicorn takes 30~40 seconds to bootstrap periodSeconds: 10 - failureThreshold: 3 + failureThreshold: 6 livenessProbe: httpGet: path: /healthz port: 8080 - initialDelaySeconds: 5 + initialDelaySeconds: 30 # NBI's gunicorn takes 30~40 seconds to bootstrap periodSeconds: 10 - failureThreshold: 3 + failureThreshold: 6 resources: requests: cpu: 150m diff --git a/manifests/nginx_ingress_http.yaml b/manifests/nginx_ingress_http.yaml index 5b77ed052f26eb28cb07ebecf70e163aeba9d642..3143a81988547c7eb9c1d4c372e1ce2425ccd725 100644 --- a/manifests/nginx_ingress_http.yaml +++ b/manifests/nginx_ingress_http.yaml @@ -17,12 +17,28 @@ kind: Ingress metadata: name: tfs-ingress annotations: - nginx.ingress.kubernetes.io/rewrite-target: /$2 - nginx.ingress.kubernetes.io/limit-rps: "50" - nginx.ingress.kubernetes.io/limit-connections: "50" - nginx.ingress.kubernetes.io/proxy-connect-timeout: "50" - nginx.ingress.kubernetes.io/proxy-send-timeout: "50" - nginx.ingress.kubernetes.io/proxy-read-timeout: "50" + nginx.ingress.kubernetes.io/rewrite-target: "/$2" + + # Enable websocket services and configure sticky cookies (seems not to work) + #nginx.org/websocket-services: "nbiservice" + #nginx.org/sticky-cookie-services: "serviceName=nbiservice tfs-nbi-session expires=1h path=/socket.io" + + # Enable sticky sessions (use same backend for all connections + # originated by a specific client, identified through its cookie) + nginx.ingress.kubernetes.io/affinity: "cookie" + nginx.ingress.kubernetes.io/affinity-mode: "persistent" + nginx.ingress.kubernetes.io/session-cookie-name: "tfs-nbi-session" + nginx.ingress.kubernetes.io/session-cookie-path: "/socket.io" + nginx.ingress.kubernetes.io/session-cookie-expires: "3600" + nginx.ingress.kubernetes.io/session-cookie-change-on-failure: "true" + + nginx.ingress.kubernetes.io/limit-rps: "50" # max requests per second per source IP + nginx.ingress.kubernetes.io/limit-connections: "50" # max concurrent connections per source IP + nginx.ingress.kubernetes.io/proxy-connect-timeout: "60" # max timeout for connecting to server + + # Enable long-lived connections, required for websocket/socket.io streams + nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" # max timeout between two successive read operations + nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" # max timeout between two successive write operations spec: rules: - http: @@ -48,6 +64,13 @@ spec: name: nbiservice port: number: 8080 + - path: /()(socket.io/.*) + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 - path: /()(tfs-api/.*) pathType: Prefix backend: diff --git a/manifests/pathcompservice.yaml b/manifests/pathcompservice.yaml index 0cac6cc18604f346214ebe44e7eb7d2d64763cbd..b6f969bf489bcf8c9f4f67b0a9427172db81596e 100644 --- a/manifests/pathcompservice.yaml +++ b/manifests/pathcompservice.yaml @@ -36,7 +36,7 @@ spec: - containerPort: 9192 env: - name: LOG_LEVEL - value: "INFO" + value: "DEBUG" - name: ENABLE_FORECASTER value: "NO" readinessProbe: diff --git a/manifests/vnt_managerservice.yaml b/manifests/vnt_managerservice.yaml index 41735a589f9e5b74d089bb9eef0e1ff362a3d12a..6f82a341ce618cff07bb5c3ed0166a6294d79cf0 100644 --- a/manifests/vnt_managerservice.yaml +++ b/manifests/vnt_managerservice.yaml @@ -39,11 +39,10 @@ spec: - containerPort: 9192 env: - name: LOG_LEVEL - value: "INFO" - - name: WS_IP_PORT - value: "8761" - - name: WS_E2E_PORT - value: "8762" + value: "DEBUG" + envFrom: + - secretRef: + name: kfk-kpi-data readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:10080"] diff --git a/ofc25 b/ofc25 new file mode 120000 index 0000000000000000000000000000000000000000..346ba6d2c770dfb630d6fb2062ac219c91eeba29 --- /dev/null +++ b/ofc25 @@ -0,0 +1 @@ +src/tests/ofc25/ \ No newline at end of file diff --git a/proto/vnt_manager.proto b/proto/vnt_manager.proto index 6442e7b90e021404a8ff76ef67f4f68c377f49d3..14126528c52c1a0508f3608778532ce1a6c49526 100644 --- a/proto/vnt_manager.proto +++ b/proto/vnt_manager.proto @@ -12,26 +12,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -// protocol buffers documentation: https://developers.google.com/protocol-buffers/docs/proto3 syntax = "proto3"; package vnt_manager; import "context.proto"; - service VNTManagerService { - rpc VNTSubscript (VNTSubscriptionRequest) returns (VNTSubscriptionReply) {} - rpc ListVirtualLinkIds (context.Empty) returns (context.LinkIdList) {} - rpc ListVirtualLinks (context.Empty) returns (context.LinkList) {} - rpc GetVirtualLink (context.LinkId) returns (context.Link) {} - rpc SetVirtualLink (context.Link) returns (context.LinkId) {} - rpc RemoveVirtualLink (context.LinkId) returns (context.Empty) {} -} - -message VNTSubscriptionRequest { - string host = 1; - string port = 2; -} - -message VNTSubscriptionReply { - string subscription = 1; + rpc ListVirtualLinkIds(context.Empty ) returns (context.LinkIdList) {} + rpc ListVirtualLinks (context.Empty ) returns (context.LinkList ) {} + rpc GetVirtualLink (context.LinkId) returns (context.Link ) {} + rpc SetVirtualLink (context.Link ) returns (context.LinkId ) {} + rpc RemoveVirtualLink (context.LinkId) returns (context.Empty ) {} } diff --git a/src/common/tools/client/RestClient.py b/src/common/tools/client/RestClient.py new file mode 100644 index 0000000000000000000000000000000000000000..321f14ed440d2a492e871b20980b59958fba1d9d --- /dev/null +++ b/src/common/tools/client/RestClient.py @@ -0,0 +1,162 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import enum, logging, requests +from requests.auth import HTTPBasicAuth +from typing import Any, Optional, Set + +class RestRequestMethod(enum.Enum): + GET = 'get' + POST = 'post' + PUT = 'put' + PATCH = 'patch' + DELETE = 'delete' + +EXPECTED_STATUS_CODES : Set[int] = { + requests.codes['OK' ], # 200 - OK + requests.codes['CREATED' ], # 201 - Created + requests.codes['ACCEPTED' ], # 202 - Accepted + requests.codes['NO_CONTENT'], # 204 - No Content +} + +URL_TEMPLATE = '{:s}://{:s}:{:d}/{:s}' + +def compose_basic_auth( + username : Optional[str] = None, password : Optional[str] = None +) -> Optional[HTTPBasicAuth]: + if username is None or password is None: return None + return HTTPBasicAuth(username, password) + +class SchemeEnum(enum.Enum): + HTTP = 'http' + HTTPS = 'https' + +def check_scheme(scheme : str) -> str: + str_scheme = str(scheme).lower() + enm_scheme = SchemeEnum._value2member_map_[str_scheme] + return enm_scheme.value + + +class RestClient: + def __init__( + self, address : str, port : int, scheme : str = 'http', + username : Optional[str] = None, password : Optional[str] = None, + timeout : int = 30, verify_certs : bool = True, allow_redirects : bool = True, + logger : Optional[logging.Logger] = None + ) -> None: + self._address = address + self._port = int(port) + self._scheme = check_scheme(scheme) + self._auth = compose_basic_auth(username=username, password=password) + self._timeout = int(timeout) + self._verify_certs = verify_certs + self._allow_redirects = allow_redirects + self._logger = logger + + def _compose_url(self, endpoint : str) -> str: + endpoint = endpoint.lstrip('/') + return URL_TEMPLATE.format(self._scheme, self._address, self._port, endpoint) + + def _log_msg_request( + self, method : RestRequestMethod, request_url : str, body : Optional[Any], + log_level : int = logging.INFO + ) -> str: + msg = 'Request: {:s} {:s}'.format(str(method.value).upper(), str(request_url)) + if body is not None: msg += ' body={:s}'.format(str(body)) + if self._logger is not None: self._logger.log(log_level, msg) + return msg + + def _log_msg_check_reply( + self, method : RestRequestMethod, request_url : str, body : Optional[Any], + reply : requests.Response, expected_status_codes : Set[int], + log_level : int = logging.INFO + ) -> str: + msg = 'Reply: {:s}'.format(str(reply.text)) + if self._logger is not None: self._logger.log(log_level, msg) + http_status_code = reply.status_code + if http_status_code in expected_status_codes: return msg + MSG = 'Request failed. method={:s} url={:s} body={:s} status_code={:s} reply={:s}' + msg = MSG.format( + str(method.value).upper(), str(request_url), str(body), + str(http_status_code), str(reply.text) + ) + self._logger.error(msg) + raise Exception(msg) + + def _do_rest_request( + self, method : RestRequestMethod, endpoint : str, body : Optional[Any] = None, + expected_status_codes : Set[int] = EXPECTED_STATUS_CODES + ) -> Optional[Any]: + request_url = self._compose_url(endpoint) + self._log_msg_request(method, request_url, body) + try: + headers = {'accept': 'application/json'} + reply = requests.request( + method.value, request_url, headers=headers, json=body, + auth=self._auth, verify=self._verify_certs, timeout=self._timeout, + allow_redirects=self._allow_redirects + ) + except Exception as e: + MSG = 'Request failed. method={:s} url={:s} body={:s}' + msg = MSG.format(str(method.value).upper(), request_url, str(body)) + self._logger.exception(msg) + raise Exception(msg) from e + self._log_msg_check_reply(method, request_url, body, reply, expected_status_codes) + if reply.content and len(reply.content) > 0: return reply.json() + return None + + def get( + self, endpoint : str, + expected_status_codes : Set[int] = EXPECTED_STATUS_CODES + ) -> Optional[Any]: + return self._do_rest_request( + RestRequestMethod.GET, endpoint, + expected_status_codes=expected_status_codes + ) + + def post( + self, endpoint : str, body : Optional[Any] = None, + expected_status_codes : Set[int] = EXPECTED_STATUS_CODES + ) -> Optional[Any]: + return self._do_rest_request( + RestRequestMethod.POST, endpoint, body=body, + expected_status_codes=expected_status_codes + ) + + def put( + self, endpoint : str, body : Optional[Any] = None, + expected_status_codes : Set[int] = EXPECTED_STATUS_CODES + ) -> Optional[Any]: + return self._do_rest_request( + RestRequestMethod.PUT, endpoint, body=body, + expected_status_codes=expected_status_codes + ) + + def patch( + self, endpoint : str, body : Optional[Any] = None, + expected_status_codes : Set[int] = EXPECTED_STATUS_CODES + ) -> Optional[Any]: + return self._do_rest_request( + RestRequestMethod.PATCH, endpoint, body=body, + expected_status_codes=expected_status_codes + ) + + def delete( + self, endpoint : str, body : Optional[Any] = None, + expected_status_codes : Set[int] = EXPECTED_STATUS_CODES + ) -> Optional[Any]: + return self._do_rest_request( + RestRequestMethod.DELETE, endpoint, body=body, + expected_status_codes=expected_status_codes + ) diff --git a/src/common/tools/descriptor/Loader.py b/src/common/tools/descriptor/Loader.py index 4cdac3674b3b2e5598ddd65e25aa5cefc93306a9..59d2db6e702219d67c9c0fb9fbd85d75e8e1e954 100644 --- a/src/common/tools/descriptor/Loader.py +++ b/src/common/tools/descriptor/Loader.py @@ -45,12 +45,13 @@ from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient from service.client.ServiceClient import ServiceClient from slice.client.SliceClient import SliceClient +from vnt_manager.client.VNTManagerClient import VNTManagerClient from .Tools import ( format_device_custom_config_rules, format_service_custom_config_rules, format_slice_custom_config_rules, get_descriptors_add_contexts, get_descriptors_add_services, get_descriptors_add_slices, get_descriptors_add_topologies, split_controllers_and_network_devices, - split_devices_by_rules + split_devices_by_rules, split_links_by_type ) LOGGER = logging.getLogger(__name__) @@ -112,7 +113,8 @@ class DescriptorLoader: self, descriptors : Optional[Union[str, Dict]] = None, descriptors_file : Optional[str] = None, num_workers : int = 1, context_client : Optional[ContextClient] = None, device_client : Optional[DeviceClient] = None, - service_client : Optional[ServiceClient] = None, slice_client : Optional[SliceClient] = None + service_client : Optional[ServiceClient] = None, slice_client : Optional[SliceClient] = None, + vntm_client : Optional[VNTManagerClient] = None ) -> None: if (descriptors is None) == (descriptors_file is None): # pylint: disable=broad-exception-raised @@ -190,10 +192,11 @@ class DescriptorLoader: self.__services_add = None self.__slices_add = None - self.__ctx_cli = ContextClient() if context_client is None else context_client - self.__dev_cli = DeviceClient() if device_client is None else device_client - self.__svc_cli = ServiceClient() if service_client is None else service_client - self.__slc_cli = SliceClient() if slice_client is None else slice_client + self.__ctx_cli = ContextClient() if context_client is None else context_client + self.__dev_cli = DeviceClient() if device_client is None else device_client + self.__svc_cli = ServiceClient() if service_client is None else service_client + self.__slc_cli = SliceClient() if slice_client is None else slice_client + self.__vnt_cli = VNTManagerClient() if vntm_client is None else vntm_client self.__results : TypeResults = list() @@ -351,22 +354,38 @@ class DescriptorLoader: controllers_add, network_devices_add = split_controllers_and_network_devices(self.__devices_add) + typed_links = split_links_by_type(self.__links) + typed_normal_links = typed_links.get('normal', list()) + typed_optical_links = typed_links.get('optical', list()) + typed_optical_links.extend(self.__optical_links) + typed_virtual_links = typed_links.get('virtual', list()) + self.__ctx_cli.connect() self.__dev_cli.connect() - self.__svc_cli.connect() - self.__slc_cli.connect() + if len(self.__services ) > 0: self.__svc_cli.connect() + if len(self.__slices ) > 0: self.__slc_cli.connect() + if len(typed_virtual_links) > 0: self.__vnt_cli.connect() self._process_descr('context', 'add', self.__ctx_cli.SetContext, Context, self.__contexts_add ) self._process_descr('topology', 'add', self.__ctx_cli.SetTopology, Topology, self.__topologies_add) self._process_descr('controller', 'add', self.__dev_cli.AddDevice, Device, controllers_add ) self._process_descr('device', 'add', self.__dev_cli.AddDevice, Device, network_devices_add ) self._process_descr('device', 'config', self.__dev_cli.ConfigureDevice, Device, self.__devices_config) - self._process_descr('link', 'add', self.__ctx_cli.SetLink, Link, self.__links ) - self._process_descr('link', 'add', self.__ctx_cli.SetOpticalLink, OpticalLink, self.__optical_links ) - self._process_descr('service', 'add', self.__svc_cli.CreateService, Service, self.__services_add ) - self._process_descr('service', 'update', self.__svc_cli.UpdateService, Service, self.__services ) - self._process_descr('slice', 'add', self.__slc_cli.CreateSlice, Slice, self.__slices_add ) - self._process_descr('slice', 'update', self.__slc_cli.UpdateSlice, Slice, self.__slices ) + self._process_descr('link', 'add', self.__ctx_cli.SetLink, Link, typed_normal_links ) + + if len(typed_optical_links) > 0: + self._process_descr('link', 'add', self.__ctx_cli.SetOpticalLink, OpticalLink, typed_optical_links ) + + if len(typed_virtual_links) > 0: + self._process_descr('link', 'add', self.__vnt_cli.SetVirtualLink, Link, typed_virtual_links ) + + if len(self.__services) > 0: + self._process_descr('service','add', self.__svc_cli.CreateService, Service, self.__services_add ) + self._process_descr('service','update', self.__svc_cli.UpdateService, Service, self.__services ) + + if len(self.__slices) > 0: + self._process_descr('slice', 'add', self.__slc_cli.CreateSlice, Slice, self.__slices_add ) + self._process_descr('slice', 'update', self.__slc_cli.UpdateSlice, Slice, self.__slices ) # By default the Context component automatically assigns devices and links to topologies based on their # endpoints, and assigns topologies, services, and slices to contexts based on their identifiers. @@ -467,10 +486,17 @@ class DescriptorLoader: def _unload_normal_mode(self) -> None: # Normal mode: follows the automated workflows in the different components + typed_links = split_links_by_type(self.links) + typed_normal_links = typed_links.get('normal', list()) + typed_optical_links = typed_links.get('optical', list()) + typed_optical_links.extend(self.optical_links) + typed_virtual_links = typed_links.get('virtual', list()) + self.__ctx_cli.connect() self.__dev_cli.connect() - self.__svc_cli.connect() - self.__slc_cli.connect() + if len(self.services ) > 0: self.__svc_cli.connect() + if len(self.slices ) > 0: self.__slc_cli.connect() + if len(typed_virtual_links) > 0: self.__vnt_cli.connect() for _, slice_list in self.slices.items(): for slice_ in slice_list: @@ -480,10 +506,13 @@ class DescriptorLoader: for service in service_list: self.__svc_cli.DeleteService(ServiceId(**service['service_id'])) - for optical_link in self.optical_links: + for virtual_link in typed_virtual_links: + self.__vnt_cli.RemoveVirtualLink(LinkId(**virtual_link['link_id'])) + + for optical_link in typed_optical_links: self.__ctx_cli.DeleteOpticalLink(LinkId(**optical_link['link_id'])) - for link in self.links: + for link in typed_normal_links: self.__ctx_cli.RemoveLink(LinkId(**link['link_id'])) for device in self.devices: diff --git a/src/common/tools/descriptor/Tools.py b/src/common/tools/descriptor/Tools.py index a6e39e95d9f0db4207359f8e37ce23e6a2d513ae..07bc18d31b31ede49bd8f6ecc2e478fdd3dc3024 100644 --- a/src/common/tools/descriptor/Tools.py +++ b/src/common/tools/descriptor/Tools.py @@ -12,10 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. +import collections import copy, json from typing import Dict, List, Optional, Tuple, Union from common.DeviceTypes import DeviceTypeEnum -from common.proto.context_pb2 import DeviceDriverEnum +from common.proto.context_pb2 import DeviceDriverEnum, LinkTypeEnum def get_descriptors_add_contexts(contexts : List[Dict]) -> List[Dict]: contexts_add = copy.deepcopy(contexts) @@ -131,3 +132,30 @@ def split_controllers_and_network_devices(devices : List[Dict]) -> Tuple[List[Di else: network_devices.append(device) return controllers, network_devices + +def link_type_to_str(link_type : Union[int, str]) -> Optional[str]: + if isinstance(link_type, int): return LinkTypeEnum.Name(link_type) + if isinstance(link_type, str): return LinkTypeEnum.Name(LinkTypeEnum.Value(link_type)) + return None + +def split_links_by_type(links : List[Dict]) -> Dict[str, List[Dict]]: + typed_links = collections.defaultdict(list) + for link in links: + link_type = link.get('link_type', LinkTypeEnum.LINKTYPE_UNKNOWN) + str_link_type = link_type_to_str(link_type) + if str_link_type is None: + MSG = 'Unsupported LinkType in Link({:s})' + raise Exception(MSG.format(str(link))) + + link_type = LinkTypeEnum.Value(str_link_type) + if link_type in {LinkTypeEnum.LINKTYPE_UNKNOWN, LinkTypeEnum.LINKTYPE_COPPER, LinkTypeEnum.LINKTYPE_RADIO}: + typed_links['normal'].append(link) + elif link_type in {LinkTypeEnum.LINKTYPE_FIBER}: + typed_links['optical'].append(link) + elif link_type in {LinkTypeEnum.LINKTYPE_VIRTUAL}: + typed_links['virtual'].append(link) + else: + MSG = 'Unsupported LinkType({:s}) in Link({:s})' + raise Exception(MSG.format(str_link_type, str(link))) + + return typed_links diff --git a/src/common/tools/kafka/Variables.py b/src/common/tools/kafka/Variables.py index 5c7501b6c07e6aaa26569e2817fca374e6b0c12e..515a43f16773510c71c15471736a6a88b81ef856 100644 --- a/src/common/tools/kafka/Variables.py +++ b/src/common/tools/kafka/Variables.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging +import logging, time from enum import Enum from confluent_kafka.admin import AdminClient, NewTopic from common.Settings import get_setting @@ -21,6 +21,12 @@ from common.Settings import get_setting LOGGER = logging.getLogger(__name__) KFK_SERVER_ADDRESS_TEMPLATE = 'kafka-service.{:s}.svc.cluster.local:{:s}' +KAFKA_TOPIC_NUM_PARTITIONS = 1 +KAFKA_TOPIC_REPLICATION_FACTOR = 1 +KAFKA_TOPIC_LIST_TIMEOUT = 5 +TOPIC_CREATE_WAIT_ITERATIONS = 10 +TOPIC_CREATE_WAIT_TIME = 1 + class KafkaConfig(Enum): @staticmethod @@ -35,59 +41,87 @@ class KafkaConfig(Enum): @staticmethod def get_admin_client(): SERVER_ADDRESS = KafkaConfig.get_kafka_address() - ADMIN_CLIENT = AdminClient({'bootstrap.servers': SERVER_ADDRESS }) + ADMIN_CLIENT = AdminClient({'bootstrap.servers': SERVER_ADDRESS}) return ADMIN_CLIENT class KafkaTopic(Enum): # TODO: Later to be populated from ENV variable. - TELEMETRY_REQUEST = 'topic_telemetry_request' - TELEMETRY_RESPONSE = 'topic_telemetry_response' - RAW = 'topic_raw' - LABELED = 'topic_labeled' - VALUE = 'topic_value' - ALARMS = 'topic_alarms' - ANALYTICS_REQUEST = 'topic_analytics_request' - ANALYTICS_RESPONSE = 'topic_analytics_response' + TELEMETRY_REQUEST = 'topic_telemetry_request' + TELEMETRY_RESPONSE = 'topic_telemetry_response' + RAW = 'topic_raw' + LABELED = 'topic_labeled' + VALUE = 'topic_value' + ALARMS = 'topic_alarms' + ANALYTICS_REQUEST = 'topic_analytics_request' + ANALYTICS_RESPONSE = 'topic_analytics_response' + VNTMANAGER_REQUEST = 'topic_vntmanager_request' + VNTMANAGER_RESPONSE = 'topic_vntmanager_response' + NBI_SOCKETIO_WORKERS = 'tfs_nbi_socketio' @staticmethod def create_all_topics() -> bool: - """ + ''' Method to create Kafka topics defined as class members - """ - all_topics = [member.value for member in KafkaTopic] - LOGGER.debug("Kafka server address is: {:} ".format(KafkaConfig.get_kafka_address())) - if( KafkaTopic.create_new_topic_if_not_exists( all_topics )): - LOGGER.debug("All topics are created sucsessfully or Already Exists") + ''' + LOGGER.debug('Kafka server address: {:s}'.format(str(KafkaConfig.get_kafka_address()))) + kafka_admin_client = KafkaConfig.get_admin_client() + + topic_metadata = kafka_admin_client.list_topics(timeout=KAFKA_TOPIC_LIST_TIMEOUT) + existing_topics = set(topic_metadata.topics.keys()) + LOGGER.debug('Existing Kafka topics: {:s}'.format(str(existing_topics))) + + missing_topics = [ + NewTopic(topic.value, KAFKA_TOPIC_NUM_PARTITIONS, KAFKA_TOPIC_REPLICATION_FACTOR) + for topic in KafkaTopic + if topic.value not in existing_topics + ] + LOGGER.debug('Missing Kafka topics: {:s}'.format(str(missing_topics))) + + if len(missing_topics) == 0: + LOGGER.debug('All topics already existed.') return True - else: - LOGGER.debug("Error creating all topics") - return False - - @staticmethod - def create_new_topic_if_not_exists(new_topics: list) -> bool: - """ - Method to create Kafka topic if it does not exist. - Args: - list of topic: containing the topic name(s) to be created on Kafka - """ - LOGGER.debug("Topics names to be verified and created: {:}".format(new_topics)) - for topic in new_topics: + + create_topic_future_map = kafka_admin_client.create_topics(missing_topics) + LOGGER.debug('create_topic_future_map: {:s}'.format(str(create_topic_future_map))) + failed_topic_creations = set() + for topic, future in create_topic_future_map.items(): try: - topic_metadata = KafkaConfig.get_admin_client().list_topics(timeout=5) - # LOGGER.debug("Existing topic list: {:}".format(topic_metadata.topics)) - if topic not in topic_metadata.topics: - # If the topic does not exist, create a new topic - print("Topic {:} does not exist. Creating...".format(topic)) - LOGGER.debug("Topic {:} does not exist. Creating...".format(topic)) - new_topic = NewTopic(topic, num_partitions=1, replication_factor=1) - KafkaConfig.get_admin_client().create_topics([new_topic]) - else: - print("Topic name already exists: {:}".format(topic)) - LOGGER.debug("Topic name already exists: {:}".format(topic)) - except Exception as e: - LOGGER.debug("Failed to create topic: {:}".format(e)) - return False - return True + LOGGER.info('Waiting for Topic({:s})...'.format(str(topic))) + future.result() # Blocks until topic is created or raises an exception + LOGGER.info('Topic({:s}) successfully created.'.format(str(topic))) + except: # pylint: disable=bare-except + LOGGER.exception('Failed to create Topic({:s})'.format(str(topic))) + failed_topic_creations.add(topic) + + if len(failed_topic_creations) > 0: return False + + LOGGER.debug('All topics created.') + + # Wait until topics appear in metadata + desired_topics = {topic.value for topic in KafkaTopic} + missing_topics = set() + for _ in range(TOPIC_CREATE_WAIT_ITERATIONS): + topic_metadata = kafka_admin_client.list_topics(timeout=KAFKA_TOPIC_LIST_TIMEOUT) + existing_topics = set(topic_metadata.topics.keys()) + missing_topics = desired_topics.difference(existing_topics) + if len(missing_topics) == 0: break + MSG = 'Waiting for Topics({:s}) to appear in metadata...' + LOGGER.debug(MSG.format(str(missing_topics))) + time.sleep(TOPIC_CREATE_WAIT_TIME) + + if len(missing_topics) > 0: + MSG = 'Something went wrong... Topics({:s}) does not appear in metadata' + LOGGER.error(MSG.format(str(missing_topics))) + return False + else: + LOGGER.debug('All topics created and available.') + return True # TODO: create all topics after the deployments (Telemetry and Analytics) + +if __name__ == '__main__': + import os + if 'KFK_SERVER_ADDRESS' not in os.environ: + os.environ['KFK_SERVER_ADDRESS'] = 'kafka-service.kafka.svc.cluster.local:9092' + KafkaTopic.create_all_topics() diff --git a/src/common/tools/object_factory/Link.py b/src/common/tools/object_factory/Link.py index 358a30b96c5080b74237526238df43ea2675c551..35cef889af74c9698ea9d285645095f5241207fe 100644 --- a/src/common/tools/object_factory/Link.py +++ b/src/common/tools/object_factory/Link.py @@ -15,6 +15,8 @@ import copy from typing import Dict, List, Optional, Tuple +from common.proto.context_pb2 import LinkTypeEnum + def get_link_uuid(a_endpoint_id : Dict, z_endpoint_id : Dict) -> str: return '{:s}/{:s}=={:s}/{:s}'.format( a_endpoint_id['device_id']['device_uuid']['uuid'], a_endpoint_id['endpoint_uuid']['uuid'], @@ -25,9 +27,13 @@ def json_link_id(link_uuid : str) -> Dict: def json_link( link_uuid : str, endpoint_ids : List[Dict], name : Optional[str] = None, + link_type : LinkTypeEnum = LinkTypeEnum.LINKTYPE_UNKNOWN, total_capacity_gbps : Optional[float] = None, used_capacity_gbps : Optional[float] = None ) -> Dict: - result = {'link_id': json_link_id(link_uuid), 'link_endpoint_ids': copy.deepcopy(endpoint_ids)} + result = { + 'link_id': json_link_id(link_uuid), 'link_type': link_type, + 'link_endpoint_ids': copy.deepcopy(endpoint_ids), + } if name is not None: result['name'] = name if total_capacity_gbps is not None: attributes : Dict = result.setdefault('attributes', dict()) diff --git a/src/common/tools/object_factory/Service.py b/src/common/tools/object_factory/Service.py index ab399adbe734adeb55f60c804aea0e4877072316..74c18323015f1035be1effbe27f8d48a2a5d6d1f 100644 --- a/src/common/tools/object_factory/Service.py +++ b/src/common/tools/object_factory/Service.py @@ -30,10 +30,10 @@ def json_service_id(service_uuid : str, context_id : Optional[Dict] = None): def json_service( service_uuid : str, service_type : ServiceTypeEnum, context_id : Optional[Dict] = None, - status : ServiceStatusEnum = ServiceStatusEnum.SERVICESTATUS_PLANNED, - endpoint_ids : List[Dict] = [], constraints : List[Dict] = [], config_rules : List[Dict] = []): - - return { + name : Optional[str] = None, status : ServiceStatusEnum = ServiceStatusEnum.SERVICESTATUS_PLANNED, + endpoint_ids : List[Dict] = [], constraints : List[Dict] = [], config_rules : List[Dict] = [] +) -> Dict: + result = { 'service_id' : json_service_id(service_uuid, context_id=context_id), 'service_type' : service_type, 'service_status' : {'service_status': status}, @@ -41,6 +41,8 @@ def json_service( 'service_constraints' : copy.deepcopy(constraints), 'service_config' : {'config_rules': copy.deepcopy(config_rules)}, } + if name is not None: result['name'] = name + return result def json_service_qkd_planned( service_uuid : str, endpoint_ids : List[Dict] = [], constraints : List[Dict] = [], diff --git a/src/context/service/database/Link.py b/src/context/service/database/Link.py index 6244a8517f66280893acb10944a235666beb80d4..5782667be8cc11c4dffbf68a8a75d297077d39c1 100644 --- a/src/context/service/database/Link.py +++ b/src/context/service/database/Link.py @@ -18,7 +18,9 @@ from sqlalchemy.engine import Engine from sqlalchemy.orm import Session, selectinload, sessionmaker from sqlalchemy_cockroachdb import run_transaction from typing import Dict, List, Optional, Set, Tuple -from common.proto.context_pb2 import Empty, EventTypeEnum, Link, LinkId, LinkIdList, LinkList, TopologyId +from common.proto.context_pb2 import ( + Empty, EventTypeEnum, Link, LinkId, LinkIdList, LinkList, TopologyId +) from common.message_broker.MessageBroker import MessageBroker from common.method_wrappers.ServiceExceptions import NotFoundException from common.tools.object_factory.Link import json_link_id diff --git a/src/context/service/database/OpticalLink.py b/src/context/service/database/OpticalLink.py index 1f45daf43bb805e750b1742e6e4ea7e18039cc43..785ce8098ff043c95b5803ee60605c2e7305da02 100644 --- a/src/context/service/database/OpticalLink.py +++ b/src/context/service/database/OpticalLink.py @@ -66,9 +66,10 @@ def optical_link_set(db_engine : Engine, messagebroker : MessageBroker, request now = datetime.datetime.now(datetime.timezone.utc) - # By default, always add link to default Context/Topology topology_uuids : Set[str] = set() related_topologies : List[Dict] = list() + + # By default, always add link to default Context/Topology _,topology_uuid = topology_get_uuid(TopologyId(), allow_random=False, allow_default=True) related_topologies.append({ 'topology_uuid': topology_uuid, @@ -77,15 +78,14 @@ def optical_link_set(db_engine : Engine, messagebroker : MessageBroker, request topology_uuids.add(topology_uuid) link_endpoints_data : List[Dict] = list() - for i,endpoint_id in enumerate(request.link_endpoint_ids): - endpoint_topology_uuid, endpoint_device_uuid, endpoint_uuid = endpoint_get_uuid( - endpoint_id, endpoint_name="", allow_random=True) + endpoint_topology_uuid, _, endpoint_uuid = endpoint_get_uuid( + endpoint_id, allow_random=False) link_endpoints_data.append({ 'link_uuid' : link_uuid, 'endpoint_uuid': endpoint_uuid, - + 'position' : i, }) if endpoint_topology_uuid not in topology_uuids: diff --git a/src/context/service/database/models/OpticalLinkModel.py b/src/context/service/database/models/OpticalLinkModel.py index 930e8935c5af48793915c72df451ec48dc782a04..d43fee01811c1a754451d97efb389ce464bd9143 100644 --- a/src/context/service/database/models/OpticalLinkModel.py +++ b/src/context/service/database/models/OpticalLinkModel.py @@ -12,13 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. - -from sqlalchemy import Column, DateTime, ForeignKey, Integer, String ,Boolean +import operator +from sqlalchemy import ( + Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, String +) from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.orm import relationship from typing import Dict from ._Base import _Base -from .Slot import C_Slot ,S_Slot , L_Slot +from .Slot import C_Slot, S_Slot, L_Slot class OpticalLinkModel(_Base): __tablename__ = 'opticallink' @@ -59,7 +61,7 @@ class OpticalLinkModel(_Base): }, 'link_endpoint_ids' : [ optical_endpoint.endpoint.dump_id() - for optical_endpoint in self.opticallink_endpoints + for optical_endpoint in sorted(self.opticallink_endpoints, key=operator.attrgetter('position')) ], } return result @@ -69,6 +71,11 @@ class OpticalLinkEndPointModel(_Base): link_uuid = Column(ForeignKey('opticallink.opticallink_uuid', ondelete='CASCADE' ), primary_key=True) endpoint_uuid = Column(ForeignKey('endpoint.endpoint_uuid', ondelete='RESTRICT'), primary_key=True, index=True) + position = Column(Integer, nullable=False) optical_link = relationship('OpticalLinkModel', back_populates='opticallink_endpoints') endpoint = relationship('EndPointModel', lazy='selectin') + + __table_args__ = ( + CheckConstraint(position >= 0, name='check_position_value'), + ) diff --git a/src/device/service/drivers/__init__.py b/src/device/service/drivers/__init__.py index e3102cdf523a4e0b551873bb8f0c423db00aebf0..2169881af366e0e05e670ec5e2a1552c5dd67f93 100644 --- a/src/device/service/drivers/__init__.py +++ b/src/device/service/drivers/__init__.py @@ -82,7 +82,7 @@ DRIVERS.append( ])) -from .ietf_l3vpn.driver import IetfL3VpnDriver # pylint: disable=wrong-import-position +from .ietf_l3vpn.IetfL3VpnDriver import IetfL3VpnDriver # pylint: disable=wrong-import-position DRIVERS.append( (IetfL3VpnDriver, [ { @@ -188,7 +188,10 @@ if LOAD_ALL_DEVICE_DRIVERS: DRIVERS.append( (OpticalTfsDriver, [ { - FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.OPEN_LINE_SYSTEM, + FilterFieldEnum.DEVICE_TYPE: [ + DeviceTypeEnum.OPEN_LINE_SYSTEM, + DeviceTypeEnum.TERAFLOWSDN_CONTROLLER, + ], FilterFieldEnum.DRIVER: DeviceDriverEnum.DEVICEDRIVER_OPTICAL_TFS, } ])) diff --git a/src/device/service/drivers/ietf_l2vpn/IetfL2VpnDriver.py b/src/device/service/drivers/ietf_l2vpn/IetfL2VpnDriver.py index d756ca89f0bb062137a86ed2a21dfafec82b4bca..ba3cef3d8b85997eee395bc4a3c2c5f3782b0711 100644 --- a/src/device/service/drivers/ietf_l2vpn/IetfL2VpnDriver.py +++ b/src/device/service/drivers/ietf_l2vpn/IetfL2VpnDriver.py @@ -70,9 +70,10 @@ class IetfL2VpnDriver(_Driver): def Connect(self) -> bool: with self.__lock: + if self.__started.is_set(): return True try: self.wim.check_credentials() - except Exception: # pylint: disable=broad-except + except: # pylint: disable=bare-except LOGGER.exception('Exception checking credentials') return False else: diff --git a/src/device/service/drivers/ietf_l2vpn/TfsApiClient.py b/src/device/service/drivers/ietf_l2vpn/TfsApiClient.py index c51e2d6bf165d1b9caf41b05e367e43eb3ffebb5..e9b21f2498ba37abd8d45f278724c7afc82b4573 100644 --- a/src/device/service/drivers/ietf_l2vpn/TfsApiClient.py +++ b/src/device/service/drivers/ietf_l2vpn/TfsApiClient.py @@ -13,20 +13,13 @@ # limitations under the License. import logging, requests -from requests.auth import HTTPBasicAuth from typing import Dict, List, Optional +from common.tools.client.RestClient import RestClient from device.service.driver_api.ImportTopologyEnum import ImportTopologyEnum -GET_DEVICES_URL = '{:s}://{:s}:{:d}/tfs-api/devices' -GET_LINKS_URL = '{:s}://{:s}:{:d}/tfs-api/links' -TIMEOUT = 30 - -HTTP_OK_CODES = { - 200, # OK - 201, # Created - 202, # Accepted - 204, # No Content -} +GET_CONTEXT_IDS_URL = '/tfs-api/context_ids' +GET_DEVICES_URL = '/tfs-api/devices' +GET_LINKS_URL = '/tfs-api/links' MAPPING_STATUS = { 'DEVICEOPERATIONALSTATUS_UNDEFINED': 0, @@ -47,36 +40,44 @@ MAPPING_DRIVER = { 'DEVICEDRIVER_OPTICAL_TFS' : 9, 'DEVICEDRIVER_IETF_ACTN' : 10, 'DEVICEDRIVER_OC' : 11, + 'DEVICEDRIVER_QKD' : 12, + 'DEVICEDRIVER_IETF_L3VPN' : 13, + 'DEVICEDRIVER_IETF_SLICE' : 14, + 'DEVICEDRIVER_NCE' : 15, } -MSG_ERROR = 'Could not retrieve devices in remote TeraFlowSDN instance({:s}). status_code={:s} reply={:s}' - LOGGER = logging.getLogger(__name__) -class TfsApiClient: +class TfsApiClient(RestClient): def __init__( self, address : str, port : int, scheme : str = 'http', - username : Optional[str] = None, password : Optional[str] = None + username : Optional[str] = None, password : Optional[str] = None, + timeout : Optional[int] = 30 ) -> None: - self._devices_url = GET_DEVICES_URL.format(scheme, address, port) - self._links_url = GET_LINKS_URL.format(scheme, address, port) - self._auth = HTTPBasicAuth(username, password) if username is not None and password is not None else None - - def get_devices_endpoints(self, import_topology : ImportTopologyEnum = ImportTopologyEnum.DEVICES) -> List[Dict]: + super().__init__( + address, port, scheme=scheme, username=username, password=password, + timeout=timeout, verify_certs=False, allow_redirects=True, logger=LOGGER + ) + + def check_credentials(self) -> None: + self.get(GET_CONTEXT_IDS_URL, expected_status_codes={requests.codes['OK']}) + LOGGER.info('Credentials checked') + + def get_devices_endpoints( + self, import_topology : ImportTopologyEnum = ImportTopologyEnum.DEVICES + ) -> List[Dict]: LOGGER.debug('[get_devices_endpoints] begin') - LOGGER.debug('[get_devices_endpoints] import_topology={:s}'.format(str(import_topology))) - - reply = requests.get(self._devices_url, timeout=TIMEOUT, verify=False, auth=self._auth) - if reply.status_code not in HTTP_OK_CODES: - msg = MSG_ERROR.format(str(self._devices_url), str(reply.status_code), str(reply)) - LOGGER.error(msg) - raise Exception(msg) + MSG = '[get_devices_endpoints] import_topology={:s}' + LOGGER.debug(MSG.format(str(import_topology))) if import_topology == ImportTopologyEnum.DISABLED: - raise Exception('Unsupported import_topology mode: {:s}'.format(str(import_topology))) + MSG = 'Unsupported import_topology mode: {:s}' + raise Exception(MSG.format(str(import_topology))) + + devices = self.get(GET_DEVICES_URL, expected_status_codes={requests.codes['OK']}) result = list() - for json_device in reply.json()['devices']: + for json_device in devices['devices']: device_uuid : str = json_device['device_id']['device_uuid']['uuid'] device_type : str = json_device['device_type'] #if not device_type.startswith('emu-'): device_type = 'emu-' + device_type @@ -87,7 +88,10 @@ class TfsApiClient: 'name': json_device['name'], 'type': device_type, 'status': MAPPING_STATUS[device_status], - 'drivers': [MAPPING_DRIVER[driver] for driver in json_device['device_drivers']], + 'drivers': [ + MAPPING_DRIVER[driver] + for driver in json_device['device_drivers'] + ], } result.append((device_url, device_data)) @@ -106,17 +110,16 @@ class TfsApiClient: LOGGER.debug('[get_devices_endpoints] devices only; returning') return result - reply = requests.get(self._links_url, timeout=TIMEOUT, verify=False, auth=self._auth) - if reply.status_code not in HTTP_OK_CODES: - msg = MSG_ERROR.format(str(self._links_url), str(reply.status_code), str(reply)) - LOGGER.error(msg) - raise Exception(msg) + links = self.get(GET_LINKS_URL, expected_status_codes={requests.codes['OK']}) - for json_link in reply.json()['links']: + for json_link in links['links']: link_uuid : str = json_link['link_id']['link_uuid']['uuid'] link_url = '/links/link[{:s}]'.format(link_uuid) link_endpoint_ids = [ - (json_endpoint_id['device_id']['device_uuid']['uuid'], json_endpoint_id['endpoint_uuid']['uuid']) + ( + json_endpoint_id['device_id']['device_uuid']['uuid'], + json_endpoint_id['endpoint_uuid']['uuid'], + ) for json_endpoint_id in json_link['link_endpoint_ids'] ] link_data = { diff --git a/src/device/service/drivers/ietf_l3vpn/driver.py b/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py similarity index 67% rename from src/device/service/drivers/ietf_l3vpn/driver.py rename to src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py index 2aca83b6a645bf2e793b08841949813f0413a531..7a7e336489affbe0f522328f22ec7c8e6461cb16 100644 --- a/src/device/service/drivers/ietf_l3vpn/driver.py +++ b/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py @@ -1,4 +1,4 @@ -# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,41 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json -import logging -import re -import threading -from typing import Any, Iterator, List, Optional, Tuple, Union - -import anytree -import requests -from requests.auth import HTTPBasicAuth +import anytree, json, logging, re, requests, threading +from typing import Any, Iterator, List, Optional, Tuple, Union from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method from common.type_checkers.Checkers import chk_length, chk_string, chk_type -from device.service.driver_api._Driver import ( - RESOURCE_ENDPOINTS, - RESOURCE_SERVICES, - _Driver, -) -from device.service.driver_api.AnyTreeTools import ( - TreeNode, - dump_subtree, - get_subnode, - set_subnode_value, -) -from device.service.driver_api.ImportTopologyEnum import ( - ImportTopologyEnum, - get_import_topology, -) - +from device.service.driver_api._Driver import _Driver, RESOURCE_ENDPOINTS, RESOURCE_SERVICES +from device.service.driver_api.AnyTreeTools import TreeNode, dump_subtree, get_subnode, set_subnode_value +from device.service.driver_api.ImportTopologyEnum import ImportTopologyEnum, get_import_topology from .Constants import SPECIAL_RESOURCE_MAPPINGS from .TfsApiClient import TfsApiClient from .Tools import compose_resource_endpoint LOGGER = logging.getLogger(__name__) - ALL_RESOURCE_KEYS = [ RESOURCE_ENDPOINTS, RESOURCE_SERVICES, @@ -57,40 +36,34 @@ RE_GET_ENDPOINT_FROM_INTERFACE = re.compile(r"^\/interface\[([^\]]+)\].*") RE_IETF_L3VPN_DATA = re.compile(r"^\/service\[[^\]]+\]\/IETFL3VPN$") RE_IETF_L3VPN_OPERATION = re.compile(r"^\/service\[[^\]]+\]\/IETFL3VPN\/operation$") -DRIVER_NAME = "ietf_l3vpn" -METRICS_POOL = MetricsPool("Device", "Driver", labels={"driver": DRIVER_NAME}) - +DRIVER_NAME = 'ietf_l3vpn' +METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': DRIVER_NAME}) class IetfL3VpnDriver(_Driver): - def __init__(self, address: str, port: str, **settings) -> None: + def __init__(self, address : str, port : str, **settings) -> None: super().__init__(DRIVER_NAME, address, int(port), **settings) self.__lock = threading.Lock() self.__started = threading.Event() self.__terminate = threading.Event() - self.__running = TreeNode(".") - scheme = self.settings.get("scheme", "http") - username = self.settings.get("username") - password = self.settings.get("password") + self.__running = TreeNode('.') + username = self.settings.get('username') + password = self.settings.get('password') + scheme = self.settings.get('scheme', 'http') + timeout = int(self.settings.get('timeout', 60)) self.tac = TfsApiClient( - self.address, - self.port, - scheme=scheme, - username=username, - password=password, - ) - self.__auth = None - # ( - # HTTPBasicAuth(username, password) - # if username is not None and password is not None - # else None - # ) - self.__tfs_nbi_root = "{:s}://{:s}:{:d}".format( - scheme, self.address, int(self.port) - ) - self.__timeout = int(self.settings.get("timeout", 120)) - self.__import_topology = get_import_topology( - self.settings, default=ImportTopologyEnum.DEVICES + self.address, self.port, scheme=scheme, username=username, + password=password, timeout=timeout ) + #self.__tfs_nbi_root = "{:s}://{:s}:{:d}".format(scheme, self.address, int(self.port)) + + # Options are: + # disabled --> just import endpoints as usual + # devices --> imports sub-devices but not links connecting them. + # (a remotely-controlled transport domain might exist between them) + # topology --> imports sub-devices and links connecting them. + # (not supported by XR driver) + self.__import_topology = get_import_topology(self.settings, default=ImportTopologyEnum.DEVICES) + endpoints = self.settings.get("endpoints", []) endpoint_resources = [] for endpoint in endpoints: @@ -139,20 +112,12 @@ class IetfL3VpnDriver(_Driver): return results def Connect(self) -> bool: - url = ( - self.__tfs_nbi_root + "/restconf/data/ietf-l3vpn-svc:l3vpn-svc/vpn-services" - ) with self.__lock: - if self.__started.is_set(): - return True + if self.__started.is_set(): return True try: - # requests.get(url, timeout=self.__timeout, auth=self.__auth) - ... - except requests.exceptions.Timeout: - LOGGER.exception("Timeout connecting {:s}".format(url)) - return False - except Exception: # pylint: disable=broad-except - LOGGER.exception("Exception connecting {:s}".format(url)) + self.tac.check_credentials() + except: # pylint: disable=bare-except + LOGGER.exception('Exception checking credentials') return False else: self.__started.set() @@ -170,50 +135,46 @@ class IetfL3VpnDriver(_Driver): @metered_subclass_method(METRICS_POOL) def GetConfig( - self, resource_keys: List[str] = [] + self, resource_keys : List[str] = [] ) -> List[Tuple[str, Union[Any, None, Exception]]]: - chk_type("resources", resource_keys, list) + chk_type('resources', resource_keys, list) + results = [] with self.__lock: - if len(resource_keys) == 0: - return dump_subtree(self.__running) - results = [] - resolver = anytree.Resolver(pathattr="name") + self.tac.check_credentials() + if len(resource_keys) == 0: resource_keys = ALL_RESOURCE_KEYS + #if len(resource_keys) == 0: + # return dump_subtree(self.__running) + resolver = anytree.Resolver(pathattr='name') for i, resource_key in enumerate(resource_keys): - str_resource_name = "resource_key[#{:d}]".format(i) + str_resource_name = 'resource_key[#{:d}]'.format(i) try: chk_string(str_resource_name, resource_key, allow_empty=False) - resource_key = SPECIAL_RESOURCE_MAPPINGS.get( - resource_key, resource_key - ) - resource_path = resource_key.split("/") - except Exception as e: # pylint: disable=broad-except - LOGGER.exception( - "Exception validating {:s}: {:s}".format( - str_resource_name, str(resource_key) + if resource_key == RESOURCE_ENDPOINTS: + # return endpoints through TFS NBI API and list-devices method + results.extend(self.tac.get_devices_endpoints(self.__import_topology)) + else: + resource_key = SPECIAL_RESOURCE_MAPPINGS.get( + resource_key, resource_key ) - ) - results.append( - (resource_key, e) - ) # if validation fails, store the exception - continue - - resource_node = get_subnode( - resolver, self.__running, resource_path, default=None - ) - # if not found, resource_node is None - if resource_node is None: - continue - results.extend(dump_subtree(resource_node)) - return results + resource_path = resource_key.split('/') + resource_node = get_subnode( + resolver, self.__running, resource_path, default=None + ) + # if not found, resource_node is None + if resource_node is None: continue + results.extend(dump_subtree(resource_node)) + except Exception as e: + MSG = 'Unhandled error processing {:s}: resource_key({:s})' + LOGGER.exception(MSG.format(str_resource_name, str(resource_key))) + results.append((resource_key, e)) return results @metered_subclass_method(METRICS_POOL) def SetConfig( - self, resources: List[Tuple[str, Any]] + self, resources : List[Tuple[str, Any]] ) -> List[Union[bool, Exception]]: results = [] - if len(resources) == 0: - return results + if len(resources) == 0: return results with self.__lock: for resource in resources: resource_key, resource_value = resource @@ -224,7 +185,7 @@ class IetfL3VpnDriver(_Driver): else: raise Exception("operation type not found in resources") for resource in resources: - LOGGER.info("resource = {:s}".format(str(resource))) + LOGGER.info('resource = {:s}'.format(str(resource))) resource_key, resource_value = resource if not RE_IETF_L3VPN_DATA.match(resource_key): continue @@ -261,7 +222,7 @@ class IetfL3VpnDriver(_Driver): @metered_subclass_method(METRICS_POOL) def DeleteConfig( - self, resources: List[Tuple[str, Any]] + self, resources : List[Tuple[str, Any]] ) -> List[Union[bool, Exception]]: results = [] if len(resources) == 0: @@ -290,20 +251,20 @@ class IetfL3VpnDriver(_Driver): @metered_subclass_method(METRICS_POOL) def SubscribeState( - self, subscriptions: List[Tuple[str, float, float]] + self, subscriptions : List[Tuple[str, float, float]] ) -> List[Union[bool, Exception]]: - # TODO: IETF L3VPN does not support monitoring by now + # TODO: does not support monitoring by now return [False for _ in subscriptions] @metered_subclass_method(METRICS_POOL) def UnsubscribeState( - self, subscriptions: List[Tuple[str, float, float]] + self, subscriptions : List[Tuple[str, float, float]] ) -> List[Union[bool, Exception]]: - # TODO: IETF L3VPN does not support monitoring by now + # TODO: does not support monitoring by now return [False for _ in subscriptions] def GetState( - self, blocking=False, terminate: Optional[threading.Event] = None + self, blocking=False, terminate : Optional[threading.Event] = None ) -> Iterator[Tuple[float, str, Any]]: - # TODO: IETF L3VPN does not support monitoring by now + # TODO: does not support monitoring by now return [] diff --git a/src/device/service/drivers/ietf_l3vpn/TfsApiClient.py b/src/device/service/drivers/ietf_l3vpn/TfsApiClient.py index 1ca965f8777aa23287ad379c8ac2cd0d92d9c28f..6efe3712f327af35a114434e09efa5d4996ee848 100644 --- a/src/device/service/drivers/ietf_l3vpn/TfsApiClient.py +++ b/src/device/service/drivers/ietf_l3vpn/TfsApiClient.py @@ -1,4 +1,4 @@ -# Copyright 2022-2024 ETSI OSG/SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,176 +12,152 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging +import logging, requests from typing import Dict, List, Optional - -import requests -from requests.auth import HTTPBasicAuth - +from common.tools.client.RestClient import RestClient from device.service.driver_api.ImportTopologyEnum import ImportTopologyEnum -GET_DEVICES_URL = "{:s}://{:s}:{:d}/tfs-api/devices" -GET_LINKS_URL = "{:s}://{:s}:{:d}/tfs-api/links" -L3VPN_URL = "{:s}://{:s}:{:d}/restconf/data/ietf-l3vpn-svc:l3vpn-svc/vpn-services" -TIMEOUT = 30 - -HTTP_OK_CODES = { - 200, # OK - 201, # Created - 202, # Accepted - 204, # No Content -} +GET_CONTEXT_IDS_URL = '/tfs-api/context_ids' +GET_DEVICES_URL = '/tfs-api/devices' +GET_LINKS_URL = '/tfs-api/links' +L3VPN_URL = '/restconf/data/ietf-l3vpn-svc:l3vpn-svc/vpn-services' MAPPING_STATUS = { - "DEVICEOPERATIONALSTATUS_UNDEFINED": 0, - "DEVICEOPERATIONALSTATUS_DISABLED": 1, - "DEVICEOPERATIONALSTATUS_ENABLED": 2, + 'DEVICEOPERATIONALSTATUS_UNDEFINED': 0, + 'DEVICEOPERATIONALSTATUS_DISABLED' : 1, + 'DEVICEOPERATIONALSTATUS_ENABLED' : 2, } MAPPING_DRIVER = { - "DEVICEDRIVER_UNDEFINED": 0, - "DEVICEDRIVER_OPENCONFIG": 1, - "DEVICEDRIVER_TRANSPORT_API": 2, - "DEVICEDRIVER_P4": 3, - "DEVICEDRIVER_IETF_NETWORK_TOPOLOGY": 4, - "DEVICEDRIVER_ONF_TR_532": 5, - "DEVICEDRIVER_XR": 6, - "DEVICEDRIVER_IETF_L2VPN": 7, - "DEVICEDRIVER_GNMI_OPENCONFIG": 8, - "DEVICEDRIVER_OPTICAL_TFS": 9, - "DEVICEDRIVER_IETF_ACTN": 10, - "DEVICEDRIVER_OC": 11, + 'DEVICEDRIVER_UNDEFINED' : 0, + 'DEVICEDRIVER_OPENCONFIG' : 1, + 'DEVICEDRIVER_TRANSPORT_API' : 2, + 'DEVICEDRIVER_P4' : 3, + 'DEVICEDRIVER_IETF_NETWORK_TOPOLOGY': 4, + 'DEVICEDRIVER_ONF_TR_532' : 5, + 'DEVICEDRIVER_XR' : 6, + 'DEVICEDRIVER_IETF_L2VPN' : 7, + 'DEVICEDRIVER_GNMI_OPENCONFIG' : 8, + 'DEVICEDRIVER_OPTICAL_TFS' : 9, + 'DEVICEDRIVER_IETF_ACTN' : 10, + 'DEVICEDRIVER_OC' : 11, + 'DEVICEDRIVER_QKD' : 12, + 'DEVICEDRIVER_IETF_L3VPN' : 13, + 'DEVICEDRIVER_IETF_SLICE' : 14, + 'DEVICEDRIVER_NCE' : 15, } -MSG_ERROR = "Could not retrieve devices in remote TeraFlowSDN instance({:s}). status_code={:s} reply={:s}" - LOGGER = logging.getLogger(__name__) - -class TfsApiClient: +class TfsApiClient(RestClient): def __init__( - self, - address: str, - port: int, - scheme: str = "http", - username: Optional[str] = None, - password: Optional[str] = None, + self, address : str, port : int, scheme : str = 'http', + username : Optional[str] = None, password : Optional[str] = None, + timeout : Optional[int] = 30 ) -> None: - self._devices_url = GET_DEVICES_URL.format(scheme, address, port) - self._links_url = GET_LINKS_URL.format(scheme, address, port) - self._l3vpn_url = L3VPN_URL.format(scheme, address, port) - self._auth = None - # ( - # HTTPBasicAuth(username, password) - # if username is not None and password is not None - # else None - # ) + super().__init__( + address, port, scheme=scheme, username=username, password=password, + timeout=timeout, verify_certs=False, allow_redirects=True, logger=LOGGER + ) + + def check_credentials(self) -> None: + self.get(GET_CONTEXT_IDS_URL, expected_status_codes={requests.codes['OK']}) + LOGGER.info('Credentials checked') def get_devices_endpoints( - self, import_topology: ImportTopologyEnum = ImportTopologyEnum.DEVICES + self, import_topology : ImportTopologyEnum = ImportTopologyEnum.DEVICES ) -> List[Dict]: - LOGGER.debug("[get_devices_endpoints] begin") - LOGGER.debug( - "[get_devices_endpoints] import_topology={:s}".format(str(import_topology)) - ) - - reply = requests.get(self._devices_url, timeout=TIMEOUT, auth=self._auth) - if reply.status_code not in HTTP_OK_CODES: - msg = MSG_ERROR.format( - str(self._devices_url), str(reply.status_code), str(reply) - ) - LOGGER.error(msg) - raise Exception(msg) + LOGGER.debug('[get_devices_endpoints] begin') + MSG = '[get_devices_endpoints] import_topology={:s}' + LOGGER.debug(MSG.format(str(import_topology))) if import_topology == ImportTopologyEnum.DISABLED: - raise Exception( - "Unsupported import_topology mode: {:s}".format(str(import_topology)) - ) + MSG = 'Unsupported import_topology mode: {:s}' + raise Exception(MSG.format(str(import_topology))) + + devices = self.get(GET_DEVICES_URL, expected_status_codes={requests.codes['OK']}) result = list() - for json_device in reply.json()["devices"]: - device_uuid: str = json_device["device_id"]["device_uuid"]["uuid"] - device_type: str = json_device["device_type"] - device_status = json_device["device_operational_status"] - device_url = "/devices/device[{:s}]".format(device_uuid) + for json_device in devices['devices']: + device_uuid : str = json_device['device_id']['device_uuid']['uuid'] + device_type : str = json_device['device_type'] + #if not device_type.startswith('emu-'): device_type = 'emu-' + device_type + device_status = json_device['device_operational_status'] + device_url = '/devices/device[{:s}]'.format(device_uuid) device_data = { - "uuid": json_device["device_id"]["device_uuid"]["uuid"], - "name": json_device["name"], - "type": device_type, - "status": MAPPING_STATUS[device_status], - "drivers": [ - MAPPING_DRIVER[driver] for driver in json_device["device_drivers"] + 'uuid': json_device['device_id']['device_uuid']['uuid'], + 'name': json_device['name'], + 'type': device_type, + 'status': MAPPING_STATUS[device_status], + 'drivers': [ + MAPPING_DRIVER[driver] + for driver in json_device['device_drivers'] ], } result.append((device_url, device_data)) - for json_endpoint in json_device["device_endpoints"]: - endpoint_uuid = json_endpoint["endpoint_id"]["endpoint_uuid"]["uuid"] - endpoint_url = "/endpoints/endpoint[{:s}]".format(endpoint_uuid) + for json_endpoint in json_device['device_endpoints']: + endpoint_uuid = json_endpoint['endpoint_id']['endpoint_uuid']['uuid'] + endpoint_url = '/endpoints/endpoint[{:s}]'.format(endpoint_uuid) endpoint_data = { - "device_uuid": device_uuid, - "uuid": endpoint_uuid, - "name": json_endpoint["name"], - "type": json_endpoint["endpoint_type"], + 'device_uuid': device_uuid, + 'uuid': endpoint_uuid, + 'name': json_endpoint['name'], + 'type': json_endpoint['endpoint_type'], } result.append((endpoint_url, endpoint_data)) if import_topology == ImportTopologyEnum.DEVICES: - LOGGER.debug("[get_devices_endpoints] devices only; returning") + LOGGER.debug('[get_devices_endpoints] devices only; returning') return result - reply = requests.get(self._links_url, timeout=TIMEOUT, auth=self._auth) - if reply.status_code not in HTTP_OK_CODES: - msg = MSG_ERROR.format( - str(self._links_url), str(reply.status_code), str(reply) - ) - LOGGER.error(msg) - raise Exception(msg) - - for json_link in reply.json()["links"]: - link_uuid: str = json_link["link_id"]["link_uuid"]["uuid"] - link_url = "/links/link[{:s}]".format(link_uuid) + links = self.get(GET_LINKS_URL, expected_status_codes={requests.codes['OK']}) + + for json_link in links['links']: + link_uuid : str = json_link['link_id']['link_uuid']['uuid'] + link_url = '/links/link[{:s}]'.format(link_uuid) link_endpoint_ids = [ ( - json_endpoint_id["device_id"]["device_uuid"]["uuid"], - json_endpoint_id["endpoint_uuid"]["uuid"], + json_endpoint_id['device_id']['device_uuid']['uuid'], + json_endpoint_id['endpoint_uuid']['uuid'], ) - for json_endpoint_id in json_link["link_endpoint_ids"] + for json_endpoint_id in json_link['link_endpoint_ids'] ] link_data = { - "uuid": json_link["link_id"]["link_uuid"]["uuid"], - "name": json_link["name"], - "endpoints": link_endpoint_ids, + 'uuid': json_link['link_id']['link_uuid']['uuid'], + 'name': json_link['name'], + 'endpoints': link_endpoint_ids, } result.append((link_url, link_data)) - LOGGER.debug("[get_devices_endpoints] topology; returning") + LOGGER.debug('[get_devices_endpoints] topology; returning') return result - def create_connectivity_service(self, l3vpn_data: dict) -> None: + def create_connectivity_service(self, l3vpn_data : dict) -> None: + MSG = '[create_connectivity_service] l3vpn_data={:s}' + LOGGER.debug(MSG.format(str(l3vpn_data))) try: - requests.post(self._l3vpn_url, json=l3vpn_data) - LOGGER.debug( - "[create_connectivity_service] l3vpn_data={:s}".format(str(l3vpn_data)) - ) - except requests.exceptions.ConnectionError: - raise Exception("faild to send post request to TFS L3VPN NBI") - - def update_connectivity_service(self, l3vpn_data: dict) -> None: - vpn_id = l3vpn_data['ietf-l3vpn-svc:l3vpn-svc']["vpn-services"]["vpn-service"][0]["vpn-id"] - url = self._l3vpn_url + f"/vpn-service={vpn_id}" + self.post(L3VPN_URL, body=l3vpn_data) + except requests.exceptions.ConnectionError as e: + MSG = 'Failed to send POST request to TFS L3VPN NBI' + raise Exception(MSG) from e + + def update_connectivity_service(self, l3vpn_data : dict) -> None: + MSG = '[update_connectivity_service] l3vpn_data={:s}' + LOGGER.debug(MSG.format(str(l3vpn_data))) + vpn_id = l3vpn_data['ietf-l3vpn-svc:l3vpn-svc']['vpn-services']['vpn-service'][0]['vpn-id'] try: - requests.put(url, json=l3vpn_data) - LOGGER.debug( - "[update_connectivity_service] l3vpn_data={:s}".format(str(l3vpn_data)) - ) - except requests.exceptions.ConnectionError: - raise Exception("faild to send post request to TFS L3VPN NBI") - - def delete_connectivity_service(self, service_uuid: str) -> None: - url = self._l3vpn_url + f"/vpn-service={service_uuid}" + self.put(L3VPN_URL + f'/vpn-service={vpn_id}', body=l3vpn_data) + except requests.exceptions.ConnectionError as e: + MSG = 'Failed to send PUT request to TFS L3VPN NBI' + raise Exception(MSG) from e + + def delete_connectivity_service(self, service_uuid : str) -> None: + url = L3VPN_URL + f'/vpn-service={service_uuid}' + MSG = '[delete_connectivity_service] url={:s}' + LOGGER.debug(MSG.format(str(url))) try: - requests.delete(url, auth=self._auth) - LOGGER.debug("[delete_connectivity_service] url={:s}".format(str(url))) - except requests.exceptions.ConnectionError: - raise Exception("faild to send delete request to TFS L3VPN NBI") + self.delete(url) + except requests.exceptions.ConnectionError as e: + MSG = 'Failed to send DELETE request to TFS L3VPN NBI' + raise Exception(MSG) from e diff --git a/src/device/service/drivers/optical_tfs/OpticalTfsDriver.py b/src/device/service/drivers/optical_tfs/OpticalTfsDriver.py index 05c44d2d3a52d063ae9981c28af06d82fe1cfc73..d2d3ec3bb2b4ce6f5881251d7ba4f2e9f767b8fb 100644 --- a/src/device/service/drivers/optical_tfs/OpticalTfsDriver.py +++ b/src/device/service/drivers/optical_tfs/OpticalTfsDriver.py @@ -12,37 +12,44 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json, logging, requests, threading -from requests.auth import HTTPBasicAuth + +import json, logging, threading from typing import Any, Iterator, List, Optional, Tuple, Union from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method from common.type_checkers.Checkers import chk_string, chk_type -from device.service.driver_api._Driver import _Driver -from . import ALL_RESOURCE_KEYS -from .Tools import find_key, add_lightpath, del_lightpath, get_lightpaths -from device.service.driver_api._Driver import _Driver, RESOURCE_ENDPOINTS -from device.service.drivers.ietf_l2vpn.TfsApiClient import TfsApiClient +from device.service.driver_api._Driver import _Driver, RESOURCE_ENDPOINTS, RESOURCE_SERVICES from device.service.driver_api.ImportTopologyEnum import ImportTopologyEnum, get_import_topology +from .TfsApiClient import TfsApiClient +#from .TfsOpticalClient import TfsOpticalClient LOGGER = logging.getLogger(__name__) +ALL_RESOURCE_KEYS = [ + RESOURCE_ENDPOINTS, + RESOURCE_SERVICES, +] + DRIVER_NAME = 'optical_tfs' METRICS_POOL = MetricsPool('Device', 'Driver', labels={'driver': DRIVER_NAME}) - class OpticalTfsDriver(_Driver): - def __init__(self, address: str, port: int, **settings) -> None: - super().__init__(DRIVER_NAME, address, port, **settings) + def __init__(self, address : str, port : str, **settings) -> None: + super().__init__(DRIVER_NAME, address, int(port), **settings) self.__lock = threading.Lock() self.__started = threading.Event() self.__terminate = threading.Event() - username = self.settings.get('username') + username = self.settings.get('username') password = self.settings.get('password') - self.__auth = HTTPBasicAuth(username, password) if username is not None and password is not None else None - scheme = self.settings.get('scheme', 'http') - self.tac = TfsApiClient(self.address, int(self.port), scheme=scheme, username=username, password=password) - self.__base_url = '{:s}://{:s}:{:d}'.format(scheme, self.address, int(self.port)) - self.__timeout = int(self.settings.get('timeout', 120)) + scheme = self.settings.get('scheme', 'http') + timeout = int(self.settings.get('timeout', 60)) + self.tac = TfsApiClient( + self.address, self.port, scheme=scheme, username=username, + password=password, timeout=timeout + ) + #self.toc = TfsOpticalClient( + # self.address, int(self.port), scheme=scheme, username=username, + # password=password, timeout=timeout + #) # Options are: # disabled --> just import endpoints as usual @@ -51,19 +58,14 @@ class OpticalTfsDriver(_Driver): # topology --> imports sub-devices and links connecting them. # (not supported by XR driver) self.__import_topology = get_import_topology(self.settings, default=ImportTopologyEnum.TOPOLOGY) - def Connect(self) -> bool: - url = self.__base_url + '/OpticalTFS/GetLightpaths' with self.__lock: if self.__started.is_set(): return True try: - requests.get(url, timeout=self.__timeout, verify=False, auth=self.__auth) - except requests.exceptions.Timeout: - LOGGER.exception('Timeout connecting {:s}'.format(str(self.__tapi_root))) - return False - except Exception: # pylint: disable=broad-except - LOGGER.exception('Exception connecting {:s}'.format(str(self.__tapi_root))) + self.tac.check_credentials() + except: # pylint: disable=bare-except + LOGGER.exception('Exception checking credentials') return False else: self.__started.set() @@ -80,72 +82,91 @@ class OpticalTfsDriver(_Driver): return [] @metered_subclass_method(METRICS_POOL) - def GetConfig(self, resource_keys : List[str] = []) -> List[Tuple[str, Union[Any, None, Exception]]]: + def GetConfig( + self, resource_keys : List[str] = [] + ) -> List[Tuple[str, Union[Any, None, Exception]]]: chk_type('resources', resource_keys, list) results = [] with self.__lock: + self.tac.check_credentials() if len(resource_keys) == 0: resource_keys = ALL_RESOURCE_KEYS for i, resource_key in enumerate(resource_keys): str_resource_name = 'resource_key[#{:d}]'.format(i) - chk_string(str_resource_name, resource_key, allow_empty=False) - - if resource_key == RESOURCE_ENDPOINTS: - # return endpoints through TFS NBI API and list-devices method - results.extend(self.tac.get_devices_endpoints(self.__import_topology)) - - # results.extend(get_lightpaths( - # self.__base_url, resource_key, timeout=self.__timeout, auth=self.__auth)) + try: + chk_string(str_resource_name, resource_key, allow_empty=False) + if resource_key == RESOURCE_ENDPOINTS: + # return endpoints through TFS NBI API and list-devices method + results.extend(self.tac.get_devices_endpoints(self.__import_topology)) + elif resource_key == RESOURCE_SERVICES: + # return all services through + results.extend(self.tac.get_services()) + else: + MSG = 'ResourceKey({:s}) not implemented' + LOGGER.warning(MSG.format(str(resource_key))) + except Exception as e: + MSG = 'Unhandled error processing {:s}: resource_key({:s})' + LOGGER.exception(MSG.format(str_resource_name, str(resource_key))) + results.append((resource_key, e)) return results @metered_subclass_method(METRICS_POOL) - def SetConfig(self, resources: List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + def SetConfig( + self, resources : List[Tuple[str, Any]] + ) -> List[Union[bool, Exception]]: results = [] - if len(resources) == 0: - return results + if len(resources) == 0: return results with self.__lock: - for _, resource in resources: + self.tac.check_credentials() + for resource in resources: LOGGER.info('resource = {:s}'.format(str(resource))) - - src_node = find_key(resource, 'src_node') - dst_node = find_key(resource, 'dst_node') - bitrate = find_key(resource, 'bitrate') - - response = add_lightpath(self.__base_url, src_node, dst_node, bitrate, - auth=self.__auth, timeout=self.__timeout) - - results.extend(response) + resource_key, resource_value = resource + try: + resource_value = json.loads(resource_value) + self.tac.setup_service(resource_value) + results.append((resource_key, True)) + except Exception as e: + MSG = 'Unhandled error processing resource_key({:s})' + LOGGER.exception(MSG.format(str(resource_key))) + results.append((resource_key, e)) return results @metered_subclass_method(METRICS_POOL) - def DeleteConfig(self, resources: List[Tuple[str, Any]]) -> List[Union[bool, Exception]]: + def DeleteConfig( + self, resources : List[Tuple[str, Any]] + ) -> List[Union[bool, Exception]]: results = [] - if len(resources) == 0: - return results + if len(resources) == 0: return results with self.__lock: - for _, resource in resources: + self.tac.check_credentials() + for resource in resources: LOGGER.info('resource = {:s}'.format(str(resource))) - flow_id = find_key(resource, 'flow_id') - src_node = find_key(resource, 'src_node') - dst_node = find_key(resource, 'dst_node') - bitrate = find_key(resource, 'bitrate') - - response = del_lightpath(self.__base_url, flow_id, src_node, dst_node, bitrate) - results.extend(response) - + resource_key,resource_value = resource + try: + resource_value = json.loads(resource_value) + self.tac.teardown_service(resource_value) + results.append((resource_key, True)) + except Exception as e: + MSG = 'Unhandled error processing resource_key({:s})' + LOGGER.exception(MSG.format(str(resource_key))) + results.append((resource_key, e)) return results @metered_subclass_method(METRICS_POOL) - def SubscribeState(self, subscriptions : List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]: - # Optical TFS does not support monitoring by now + def SubscribeState( + self, subscriptions : List[Tuple[str, float, float]] + ) -> List[Union[bool, Exception]]: + # TODO: does not support monitoring by now return [False for _ in subscriptions] @metered_subclass_method(METRICS_POOL) - def UnsubscribeState(self, subscriptions : List[Tuple[str, float, float]]) -> List[Union[bool, Exception]]: - # Optical TFS does not support monitoring by now + def UnsubscribeState( + self, subscriptions : List[Tuple[str, float, float]] + ) -> List[Union[bool, Exception]]: + # TODO: does not support monitoring by now return [False for _ in subscriptions] def GetState( self, blocking=False, terminate : Optional[threading.Event] = None ) -> Iterator[Tuple[float, str, Any]]: - # Optical TFS does not support monitoring by now + # TODO: does not support monitoring by now return [] diff --git a/src/device/service/drivers/optical_tfs/TfsApiClient.py b/src/device/service/drivers/optical_tfs/TfsApiClient.py new file mode 100644 index 0000000000000000000000000000000000000000..49c5a9e4f07026d8bcd5851770e9b225ab37fe63 --- /dev/null +++ b/src/device/service/drivers/optical_tfs/TfsApiClient.py @@ -0,0 +1,258 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from typing import Dict, List, Optional, Tuple +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME +from common.proto.context_pb2 import ServiceStatusEnum, ServiceTypeEnum +from common.tools.client.RestClient import RestClient +from common.tools.object_factory.Constraint import json_constraint_custom +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Device import json_device_id +from common.tools.object_factory.EndPoint import json_endpoint_id +from common.tools.object_factory.Service import json_service +from device.service.driver_api.ImportTopologyEnum import ImportTopologyEnum + +CONTEXT_IDS_URL = '/tfs-api/context_ids' +TOPOLOGY_URL = '/tfs-api/context/{context_uuid:s}/topology_details/{topology_uuid:s}' +SERVICES_URL = '/tfs-api/context/{context_uuid:s}/services' +SERVICE_URL = '/tfs-api/context/{context_uuid:s}/service/{service_uuid:s}' + +MAPPING_STATUS = { + 'DEVICEOPERATIONALSTATUS_UNDEFINED': 0, + 'DEVICEOPERATIONALSTATUS_DISABLED' : 1, + 'DEVICEOPERATIONALSTATUS_ENABLED' : 2, +} + +MAPPING_DRIVER = { + 'DEVICEDRIVER_UNDEFINED' : 0, + 'DEVICEDRIVER_OPENCONFIG' : 1, + 'DEVICEDRIVER_TRANSPORT_API' : 2, + 'DEVICEDRIVER_P4' : 3, + 'DEVICEDRIVER_IETF_NETWORK_TOPOLOGY': 4, + 'DEVICEDRIVER_ONF_TR_532' : 5, + 'DEVICEDRIVER_XR' : 6, + 'DEVICEDRIVER_IETF_L2VPN' : 7, + 'DEVICEDRIVER_GNMI_OPENCONFIG' : 8, + 'DEVICEDRIVER_OPTICAL_TFS' : 9, + 'DEVICEDRIVER_IETF_ACTN' : 10, + 'DEVICEDRIVER_OC' : 11, + 'DEVICEDRIVER_QKD' : 12, + 'DEVICEDRIVER_IETF_L3VPN' : 13, + 'DEVICEDRIVER_IETF_SLICE' : 14, + 'DEVICEDRIVER_NCE' : 15, +} + +LOGGER = logging.getLogger(__name__) + +class TfsApiClient(RestClient): + def __init__( + self, address : str, port : int, scheme : str = 'http', + username : Optional[str] = None, password : Optional[str] = None, + timeout : Optional[int] = 30 + ) -> None: + super().__init__( + address, port, scheme=scheme, username=username, password=password, + timeout=timeout, verify_certs=False, allow_redirects=True, logger=LOGGER + ) + + def check_credentials(self) -> None: + self.get(CONTEXT_IDS_URL) + LOGGER.info('Credentials checked') + + def get_devices_endpoints( + self, import_topology : ImportTopologyEnum = ImportTopologyEnum.DEVICES + ) -> List[Dict]: + LOGGER.debug('[get_devices_endpoints] begin') + MSG = '[get_devices_endpoints] import_topology={:s}' + LOGGER.debug(MSG.format(str(import_topology))) + + if import_topology == ImportTopologyEnum.DISABLED: + MSG = 'Unsupported import_topology mode: {:s}' + raise Exception(MSG.format(str(import_topology))) + + topology = self.get(TOPOLOGY_URL.format( + context_uuid=DEFAULT_CONTEXT_NAME, topology_uuid=DEFAULT_TOPOLOGY_NAME + )) + + result = list() + for json_device in topology['devices']: + device_uuid : str = json_device['device_id']['device_uuid']['uuid'] + device_type : str = json_device['device_type'] + #if not device_type.startswith('emu-'): device_type = 'emu-' + device_type + device_status = json_device['device_operational_status'] + device_url = '/devices/device[{:s}]'.format(device_uuid) + device_data = { + 'uuid': json_device['device_id']['device_uuid']['uuid'], + 'name': json_device['name'], + 'type': device_type, + 'status': MAPPING_STATUS[device_status], + 'drivers': [ + MAPPING_DRIVER[driver] + for driver in json_device['device_drivers'] + ], + } + result.append((device_url, device_data)) + + for json_endpoint in json_device['device_endpoints']: + endpoint_uuid = json_endpoint['endpoint_id']['endpoint_uuid']['uuid'] + endpoint_url = '/endpoints/endpoint[{:s}]'.format(endpoint_uuid) + endpoint_data = { + 'device_uuid': device_uuid, + 'uuid': endpoint_uuid, + 'name': json_endpoint['name'], + 'type': json_endpoint['endpoint_type'], + } + result.append((endpoint_url, endpoint_data)) + + if import_topology == ImportTopologyEnum.DEVICES: + LOGGER.debug('[get_devices_endpoints] devices only; returning') + return result + + for json_link in topology['links']: + link_uuid : str = json_link['link_id']['link_uuid']['uuid'] + link_url = '/links/link[{:s}]'.format(link_uuid) + link_endpoint_ids = [ + ( + json_endpoint_id['device_id']['device_uuid']['uuid'], + json_endpoint_id['endpoint_uuid']['uuid'], + ) + for json_endpoint_id in json_link['link_endpoint_ids'] + ] + link_data = { + 'uuid': json_link['link_id']['link_uuid']['uuid'], + 'name': json_link['name'], + 'endpoints': link_endpoint_ids, + } + result.append((link_url, link_data)) + + for json_link in topology['optical_links']: + link_uuid : str = json_link['link_id']['link_uuid']['uuid'] + link_url = '/links/link[{:s}]'.format(link_uuid) + link_endpoint_ids = [ + ( + json_endpoint_id['device_id']['device_uuid']['uuid'], + json_endpoint_id['endpoint_uuid']['uuid'], + ) + for json_endpoint_id in json_link['link_endpoint_ids'] + ] + link_data = { + 'uuid': json_link['link_id']['link_uuid']['uuid'], + 'name': json_link['name'], + 'endpoints': link_endpoint_ids, + } + result.append((link_url, link_data)) + + LOGGER.debug('[get_devices_endpoints] topology; returning') + return result + + def setup_service(self, resource_value : Dict) -> None: + service_uuid = resource_value['service_uuid' ] + service_name = resource_value['service_name' ] + src_device_uuid = resource_value['src_device_uuid' ] + src_endpoint_uuid = resource_value['src_endpoint_uuid'] + dst_device_uuid = resource_value['dst_device_uuid' ] + dst_endpoint_uuid = resource_value['dst_endpoint_uuid'] + bitrate = resource_value['bitrate' ] + bidir = resource_value['bidir' ] + ob_width = resource_value['ob_width' ] + + endpoint_ids = [ + json_endpoint_id(json_device_id(src_device_uuid), src_endpoint_uuid), + json_endpoint_id(json_device_id(dst_device_uuid), dst_endpoint_uuid), + ] + constraints = [ + json_constraint_custom('bandwidth[gbps]', str(bitrate)), + json_constraint_custom('bidirectionality', '1' if bidir else '0'), + ] + if service_name == 'IP1/PORT-xe1==IP2/PORT-xe1': + constraints.append(json_constraint_custom('optical-band-width[GHz]', str(ob_width))) + + service_add = json_service( + service_uuid, + ServiceTypeEnum.Name(ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY), + context_id = json_context_id(DEFAULT_CONTEXT_NAME), + name = service_name, + status = ServiceStatusEnum.Name(ServiceStatusEnum.SERVICESTATUS_PLANNED), + ) + services_url = SERVICES_URL.format(context_uuid=DEFAULT_CONTEXT_NAME) + service_ids = self.post(services_url, body=service_add) + assert len(service_ids) == 1 + service_id = service_ids[0] + service_uuid = service_id['service_uuid']['uuid'] + + service_upd = json_service( + service_uuid, + ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY, + context_id = json_context_id(DEFAULT_CONTEXT_NAME), + name = service_name, endpoint_ids = endpoint_ids, constraints = constraints, + status = ServiceStatusEnum.Name(ServiceStatusEnum.SERVICESTATUS_PLANNED), + ) + service_url = SERVICE_URL.format(context_uuid=DEFAULT_CONTEXT_NAME, service_uuid=service_uuid) + self.put(service_url, body=service_upd) + + def teardown_service(self, resource_value : Dict) -> None: + service_uuid = resource_value['service_uuid'] + service_name = resource_value['service_name'] + + service_url = SERVICE_URL.format(context_uuid=DEFAULT_CONTEXT_NAME, service_uuid=service_uuid) + self.delete(service_url) + if service_name == 'IP1/PORT-xe1==IP2/PORT-xe1': + self.delete(service_url) + + @staticmethod + def parse_service(service : Dict) -> Tuple[str, Dict]: + service_uuid = service['service_id']['service_uuid']['uuid'] + src_endpoint_id = service['service_endpoint_ids'][ 0] + dst_endpoint_id = service['service_endpoint_ids'][-1] + parsed_service = { + 'service_uuid' : service_uuid, + 'service_name' : service['name'], + 'src_device_uuid' : src_endpoint_id['device_id']['device_uuid']['uuid'], + 'src_endpoint_uuid': src_endpoint_id['endpoint_uuid']['uuid'], + 'dst_device_uuid' : dst_endpoint_id['device_id']['device_uuid']['uuid'], + 'dst_endpoint_uuid': dst_endpoint_id['endpoint_uuid']['uuid'], + } + + for constraint in service.get('service_constraints', list()): + if 'custom' not in constraint: continue + constraint_type = constraint['custom']['constraint_type'] + constraint_value = constraint['custom']['constraint_value'] + if constraint_type == 'bandwidth[gbps]': + parsed_service['bitrate'] = int(float(constraint_value)) + if constraint_type == 'bidirectionality': + parsed_service['bidir'] = int(constraint_value) == 1 + if constraint_type == 'optical-band-width[GHz]': + parsed_service['ob_width'] = int(constraint_value) + + resource_key = '/services/service[{:s}]'.format(service_uuid) + return resource_key, parsed_service + + def get_services(self) -> List[Tuple[str, Dict]]: + services_url = SERVICES_URL.format(context_uuid=DEFAULT_CONTEXT_NAME) + _services = self.get(services_url) + OPTICAL_CONNECTIVITY_SERVICE_TYPES = { + 'SERVICETYPE_OPTICAL_CONNECTIVITY', + ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY + } + return [ + TfsApiClient.parse_service(service) + for service in _services['services'] + if service['service_type'] in OPTICAL_CONNECTIVITY_SERVICE_TYPES + ] + + def get_service(self, service_uuid : str) -> Tuple[str, Dict]: + service_url = SERVICE_URL.format(context_uuid=DEFAULT_CONTEXT_NAME, service_uuid=service_uuid) + service = self.get(service_url) + return TfsApiClient.parse_service(service) diff --git a/src/device/service/drivers/optical_tfs/TfsOpticalClient.py b/src/device/service/drivers/optical_tfs/TfsOpticalClient.py new file mode 100644 index 0000000000000000000000000000000000000000..f5749ae56b7ec4b2cb57ec2306bb4e648dba29c1 --- /dev/null +++ b/src/device/service/drivers/optical_tfs/TfsOpticalClient.py @@ -0,0 +1,100 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging, requests +from typing import Dict, List, Optional, Union +from common.tools.client.RestClient import RestClient + + +LOGGER = logging.getLogger(__name__) + +GET_OPTICAL_LINKS_URL = '/OpticalTFS/GetLinks' +GET_LIGHTPATHS_URL = '/OpticalTFS/GetLightpaths' +ADD_LIGHTPATH_URL = '/OpticalTFS/AddLightpath/{src_node:s}/{dst_node:s}/{bitrate:s}' +DEL_LIGHTPATH_URL = '/OpticalTFS/DelLightpath/{flow_id:s}/{src_node:s}/{dst_node:s}/{bitrate:s}' + + +class TfsOpticalClient(RestClient): + def __init__( + self, address : str, port : int, scheme : str = 'http', + username : Optional[str] = None, password : Optional[str] = None, + timeout : Optional[int] = 30 + ) -> None: + super().__init__( + address, port, scheme=scheme, username=username, password=password, + timeout=timeout, verify_certs=False, allow_redirects=True, logger=LOGGER + ) + + def check_credentials(self) -> None: + self.get(GET_LIGHTPATHS_URL, expected_status_codes={requests.codes['OK']}) + LOGGER.info('Credentials checked') + + def get_optical_links(self) -> Union[List[Dict], Exception]: + try: + return self.get(GET_OPTICAL_LINKS_URL, expected_status_codes={requests.codes['OK']}) + except Exception as e: + LOGGER.exception('Exception retrieving optical links') + return e + + def get_lightpaths(self) -> Union[List[Dict], Exception]: + try: + lightpaths : List[Dict] = self.get( + GET_LIGHTPATHS_URL, expected_status_codes={requests.codes['OK']} + ) + except Exception as e: + LOGGER.exception('Exception retrieving lightpaths') + return e + + result = [] + for lightpath in lightpaths: + assert 'flow_id' in lightpath + assert 'src' in lightpath + assert 'dst' in lightpath + assert 'bitrate' in lightpath + resource_key = '/lightpaths/lightpath[{:s}]'.format(lightpath['flow_id']) + result.append((resource_key, lightpath)) + return result + + def add_lightpath( + self, src_node : str, dst_node : str, bitrate : int + ) -> Union[List[Dict], Exception]: + MSG = 'Add Lightpath: {:s} <-> {:s} with {:d} bitrate' + LOGGER.info(MSG.format(str(src_node), str(dst_node), int(bitrate))) + request_endpoint = ADD_LIGHTPATH_URL.format( + src_node=str(src_node), dst_node=str(dst_node), bitrate=int(bitrate) + ) + expected_status_codes = {requests.codes['CREATED'], requests.codes['NO_CONTENT']} + try: + return self.put(request_endpoint, expected_status_codes=expected_status_codes) + except Exception as e: + MSG = 'Exception requesting Lightpath: {:s} <-> {:s} with {:s} bitrate' + LOGGER.exception(MSG.format(str(src_node), str(dst_node), str(bitrate))) + return e + + def del_lightpath( + self, flow_id : str, src_node : str, dst_node : str, bitrate : int + ) -> Union[List[Dict], Exception]: + MSG = 'Delete Lightpath {:s}: {:s} <-> {:s} with {:d} bitrate' + LOGGER.info(MSG.format(str(flow_id), str(src_node), str(dst_node), int(bitrate))) + request_endpoint = DEL_LIGHTPATH_URL.format( + src_node=str(src_node), dst_node=str(dst_node), bitrate=int(bitrate) + ) + expected_status_codes = {requests.codes['NO_CONTENT']} + try: + return self.delete(request_endpoint, expected_status_codes=expected_status_codes) + except Exception as e: + MSG = 'Exception deleting Lightpath {:s}: {:s} <-> {:s} with {:s} bitrate' + LOGGER.exception(MSG.format(str(flow_id), str(src_node), str(dst_node), str(bitrate))) + return e diff --git a/src/device/service/drivers/optical_tfs/Tools.py b/src/device/service/drivers/optical_tfs/Tools.py deleted file mode 100644 index 3714672f8b9f892ee6e6b51a5e0f56b37bf3967e..0000000000000000000000000000000000000000 --- a/src/device/service/drivers/optical_tfs/Tools.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import json, logging, requests -from requests.auth import HTTPBasicAuth -from typing import Optional - -LOGGER = logging.getLogger(__name__) - -HTTP_OK_CODES = { - 200, # OK - 201, # Created - 202, # Accepted - 204, # No Content -} - -def find_key(resource, key): - return json.loads(resource[1])[key] - -def get_lightpaths(root_url : str, resource_key : str,auth : Optional[HTTPBasicAuth] = None, - timeout : Optional[int] = None): - headers = {'accept': 'application/json'} - url = '{:s}/OpticalTFS/GetLightpaths'.format(root_url) - - result = [] - try: - response = requests.get(url, timeout=timeout, headers=headers, verify=False, auth=auth) - except requests.exceptions.Timeout: - LOGGER.exception('Timeout connecting {:s}'.format(url)) - return result - except Exception as e: # pylint: disable=broad-except - LOGGER.exception('Exception retrieving {:s}'.format(resource_key)) - result.append((resource_key, e)) - return result - - try: - flows = json.loads(response.content) - except Exception as e: # pylint: disable=broad-except - LOGGER.warning('Unable to decode reply: {:s}'.format(str(response.content))) - result.append((resource_key, e)) - return result - - for flow in flows: - flow_id = flow.get('flow_id') - source = flow.get('src') - destination = flow.get('dst') - bitrate = flow.get('bitrate') - - endpoint_url = '/flows/flow[{:s}]'.format(flow_id) - endpoint_data = {'flow_id': flow_id, 'src': source, 'dst': destination, 'bitrate': bitrate} - result.append((endpoint_url, endpoint_data)) - - return result - - -def add_lightpath(root_url, src_node, dst_node, bitrate, - auth : Optional[HTTPBasicAuth] = None, timeout : Optional[int] = None): - - headers = {'accept': 'application/json'} - url = '{:s}/OpticalTFS/AddLightpath/{:s}/{:s}/{:s}'.format( - root_url, src_node, dst_node, bitrate) - - results = [] - try: - LOGGER.info('Lightpath request: {:s} <-> {:s} with {:s} bitrate'.format( - str(src_node), str(dst_node), str(bitrate))) - response = requests.put(url=url, timeout=timeout, headers=headers, verify=False, auth=auth) - results.append(response.json()) - LOGGER.info('Response: {:s}'.format(str(response))) - - except Exception as e: # pylint: disable=broad-except - LOGGER.exception('Exception requesting Lightpath: {:s} <-> {:s} with {:s} bitrate'.format( - str(src_node), str(dst_node), str(bitrate))) - results.append(e) - else: - if response.status_code not in HTTP_OK_CODES: - msg = 'Could not create Lightpath(status_code={:s} reply={:s}' - LOGGER.error(msg.format(str(response.status_code), str(response))) - results.append(response.status_code in HTTP_OK_CODES) - - return results - - - -def del_lightpath(root_url, flow_id, src_node, dst_node, bitrate, - auth : Optional[HTTPBasicAuth] = None, timeout : Optional[int] = None): - url = '{:s}/OpticalTFS/DelLightpath/{:s}/{:s}/{:s}/{:s}'.format( - root_url, flow_id, src_node, dst_node, bitrate) - headers = {'accept': 'application/json'} - - results = [] - - try: - response = requests.delete( - url=url, timeout=timeout, headers=headers, verify=False, auth=auth) - except Exception as e: # pylint: disable=broad-except - LOGGER.exception('Exception deleting Lightpath(uuid={:s})'.format(str(flow_id))) - results.append(e) - else: - if response.status_code not in HTTP_OK_CODES: - msg = 'Could not delete Lightpath(flow_id={:s}). status_code={:s} reply={:s}' - LOGGER.error(msg.format(str(flow_id), str(response.status_code), str(response))) - results.append(response.status_code in HTTP_OK_CODES) - - return results - - -def get_topology(root_url : str, resource_key : str,auth : Optional[HTTPBasicAuth] = None, - timeout : Optional[int] = None): - headers = {'accept': 'application/json'} - url = '{:s}/OpticalTFS/GetLinks'.format(root_url) - - result = [] - try: - response = requests.get(url, timeout=timeout, headers=headers, verify=False, auth=auth) - except requests.exceptions.Timeout: - LOGGER.exception('Timeout connecting {:s}'.format(url)) - return result - except Exception as e: # pylint: disable=broad-except - LOGGER.exception('Exception retrieving {:s}'.format(resource_key)) - result.append((resource_key, e)) - return result - - try: - response = json.loads(response.content) - except Exception as e: # pylint: disable=broad-except - LOGGER.warning('Unable to decode reply: {:s}'.format(str(response.content))) - result.append((resource_key, e)) - return result - - result.append(response) - return result diff --git a/src/device/service/drivers/optical_tfs/__init__.py b/src/device/service/drivers/optical_tfs/__init__.py index 97ec0dd4209f253161cddf69344bd62933a81fd7..53d5157f750bfb085125cbd33faff1cec5924e14 100644 --- a/src/device/service/drivers/optical_tfs/__init__.py +++ b/src/device/service/drivers/optical_tfs/__init__.py @@ -12,9 +12,3 @@ # See the License for the specific language governing permissions and # limitations under the License. -from device.service.driver_api._Driver import RESOURCE_ENDPOINTS, RESOURCE_SERVICES - -ALL_RESOURCE_KEYS = [ - RESOURCE_ENDPOINTS, - RESOURCE_SERVICES, -] diff --git a/src/device/tests/test_unitary_ietf_l3vpn.py b/src/device/tests/test_unitary_ietf_l3vpn.py index 728ca691332c8abee7b5d6f5ad6c151240e540ed..f9f7ae99070f6a55137be0924eb4e69863b86fbe 100644 --- a/src/device/tests/test_unitary_ietf_l3vpn.py +++ b/src/device/tests/test_unitary_ietf_l3vpn.py @@ -3,7 +3,7 @@ from json import dumps import requests -from device.service.drivers.ietf_l3vpn.driver import IetfL3VpnDriver +from device.service.drivers.ietf_l3vpn.IetfL3VpnDriver import IetfL3VpnDriver from device.service.Tools import RESOURCE_ENDPOINTS settings = { diff --git a/src/e2e_orchestrator/requirements.in b/src/e2e_orchestrator/requirements.in index 53f9028a7ed13cfc9d8949ef739e2c96af544797..c9286e2e13add39d977957141cedfae8e23e4f72 100644 --- a/src/e2e_orchestrator/requirements.in +++ b/src/e2e_orchestrator/requirements.in @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -networkx -websockets==12.0 +networkx==3.2.1 +python-socketio==5.12.1 requests==2.27.* +websocket-client==1.8.0 # used by socketio to upgrate to websocket diff --git a/src/e2e_orchestrator/service/E2EOrchestratorServiceServicerImpl.py b/src/e2e_orchestrator/service/E2EOrchestratorServiceServicerImpl.py index 4878d4788276857600478b20c060c75e29d39015..2284abe4cf578d58310cf8666b9b67896e81e199 100644 --- a/src/e2e_orchestrator/service/E2EOrchestratorServiceServicerImpl.py +++ b/src/e2e_orchestrator/service/E2EOrchestratorServiceServicerImpl.py @@ -12,229 +12,49 @@ # See the License for the specific language governing permissions and # limitations under the License. -import copy, grpc, json, logging, networkx, requests, threading +import copy, grpc, logging, networkx from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method from common.proto.e2eorchestrator_pb2 import E2EOrchestratorRequest, E2EOrchestratorReply -from common.proto.context_pb2 import ( - Empty, Connection, EndPointId, Link, LinkId, TopologyDetails, Topology, Context, Service, ServiceId, - ServiceTypeEnum, ServiceStatusEnum) +from common.proto.context_pb2 import Empty, Connection, EndPointId from common.proto.e2eorchestrator_pb2_grpc import E2EOrchestratorServiceServicer -from common.proto.vnt_manager_pb2 import VNTSubscriptionRequest -from common.tools.grpc.Tools import grpc_message_to_json_string -from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME -from common.Settings import get_setting from context.client.ContextClient import ContextClient from context.service.database.uuids.EndPoint import endpoint_get_uuid -from context.service.database.uuids.Device import device_get_uuid -from service.client.ServiceClient import ServiceClient -from websockets.sync.client import connect -from websockets.sync.server import serve - LOGGER = logging.getLogger(__name__) -logging.getLogger("websockets").propagate = True -logging.getLogger("requests.packages.urllib3").propagate = True METRICS_POOL = MetricsPool("E2EOrchestrator", "RPC") - -context_client: ContextClient = ContextClient() -service_client: ServiceClient = ServiceClient() - -EXT_HOST = str(get_setting('WS_IP_HOST')) -EXT_PORT = int(get_setting('WS_IP_PORT')) -EXT_URL = 'ws://{:s}:{:d}'.format(EXT_HOST, EXT_PORT) - -OWN_HOST = str(get_setting('WS_E2E_HOST')) -OWN_PORT = int(get_setting('WS_E2E_PORT')) - -ALL_HOSTS = '0.0.0.0' - -class SubscriptionServer(threading.Thread): - def run(self): - request = VNTSubscriptionRequest() - request.host = OWN_HOST - request.port = OWN_PORT - try: - LOGGER.debug('Trying to connect to {:s}'.format(EXT_URL)) - websocket = connect(EXT_URL) - except: # pylint: disable=bare-except - LOGGER.exception('Error connecting to {:s}'.format(EXT_URL)) - else: - with websocket: - LOGGER.debug('Connected to {:s}'.format(EXT_URL)) - send = grpc_message_to_json_string(request) - websocket.send(send) - LOGGER.debug('Sent: {:s}'.format(send)) - try: - message = websocket.recv() - LOGGER.debug('Received message from WebSocket: {:s}'.format(message)) - except Exception as ex: - LOGGER.error('Exception receiving from WebSocket: {:s}'.format(ex)) - self._events_server() - - - def _events_server(self): - try: - server = serve(self._event_received, ALL_HOSTS, int(OWN_PORT)) - except: # pylint: disable=bare-except - LOGGER.exception('Error starting server on {:s}:{:d}'.format(ALL_HOSTS, OWN_PORT)) - else: - with server: - LOGGER.info('Running events server...: {:s}:{:d}'.format(ALL_HOSTS, OWN_PORT)) - server.serve_forever() - - - def _event_received(self, connection): - LOGGER.debug('Event received') - for message in connection: - message_json = json.loads(message) - - # Link creation - if 'link_id' in message_json: - LOGGER.debug('Link creation') - link = Link(**message_json) - - service = Service() - service.service_id.service_uuid.uuid = link.link_id.link_uuid.uuid - service.service_id.context_id.context_uuid.uuid = DEFAULT_CONTEXT_NAME - service.service_type = ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY - service.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PLANNED - service_client.CreateService(service) - - a_device_uuid = device_get_uuid(link.link_endpoint_ids[0].device_id) - a_endpoint_uuid = endpoint_get_uuid(link.link_endpoint_ids[0])[2] - z_device_uuid = device_get_uuid(link.link_endpoint_ids[1].device_id) - z_endpoint_uuid = endpoint_get_uuid(link.link_endpoint_ids[1])[2] - - links = context_client.ListLinks(Empty()).links - for _link in links: - for _endpoint_id in _link.link_endpoint_ids: - if _endpoint_id.device_id.device_uuid.uuid == a_device_uuid and \ - _endpoint_id.endpoint_uuid.uuid == a_endpoint_uuid: - a_ep_id = _endpoint_id - elif _endpoint_id.device_id.device_uuid.uuid == z_device_uuid and \ - _endpoint_id.endpoint_uuid.uuid == z_endpoint_uuid: - z_ep_id = _endpoint_id - - if (not 'a_ep_id' in locals()) or (not 'z_ep_id' in locals()): - error_msg = f'Could not get VNT link endpoints\ - \n\ta_endpoint_uuid= {a_endpoint_uuid}\ - \n\tz_endpoint_uuid= {z_device_uuid}' - LOGGER.error(error_msg) - connection.send(error_msg) - return - - service.service_endpoint_ids.append(copy.deepcopy(a_ep_id)) - service.service_endpoint_ids.append(copy.deepcopy(z_ep_id)) - - service_client.UpdateService(service) - re_svc = context_client.GetService(service.service_id) - connection.send(grpc_message_to_json_string(link)) - context_client.SetLink(link) - elif 'link_uuid' in message_json: - LOGGER.debug('Link removal') - link_id = LinkId(**message_json) - - service_id = ServiceId() - service_id.service_uuid.uuid = link_id.link_uuid.uuid - service_id.context_id.context_uuid.uuid = DEFAULT_CONTEXT_NAME - service_client.DeleteService(service_id) - connection.send(grpc_message_to_json_string(link_id)) - context_client.RemoveLink(link_id) - else: - LOGGER.debug('Topology received') - topology_details = TopologyDetails(**message_json) - - context = Context() - context.context_id.context_uuid.uuid = topology_details.topology_id.context_id.context_uuid.uuid - context_client.SetContext(context) - - topology = Topology() - topology.topology_id.context_id.CopyFrom(context.context_id) - topology.topology_id.topology_uuid.uuid = topology_details.topology_id.topology_uuid.uuid - context_client.SetTopology(topology) - - for device in topology_details.devices: - context_client.SetDevice(device) - - for link in topology_details.links: - context_client.SetLink(link) - - - class E2EOrchestratorServiceServicerImpl(E2EOrchestratorServiceServicer): def __init__(self): LOGGER.debug('Creating Servicer...') - try: - LOGGER.debug('Requesting subscription') - sub_server = SubscriptionServer() - sub_server.start() - LOGGER.debug('Servicer Created') - self.retrieve_external_topologies() - except: - LOGGER.exception('Unhandled Exception') - - def retrieve_external_topologies(self): - i = 1 - while True: - try: - ADD = str(get_setting(f'EXT_CONTROLLER{i}_ADD')) - PORT = int(get_setting(f'EXT_CONTROLLER{i}_PORT')) - except: # pylint: disable=bare-except - break + LOGGER.debug('Servicer Created') - try: - LOGGER.info('Retrieving external controller #{:d}'.format(i)) - url = 'http://{:s}:{:d}/tfs-api/context/{:s}/topology_details/{:s}'.format( - ADD, PORT, DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME - ) - LOGGER.info('url={:s}'.format(str(url))) - topo = requests.get(url).json() - LOGGER.info('Retrieved external controller #{:d}'.format(i)) - except: # pylint: disable=bare-except - LOGGER.exception('Exception retrieven topology from external controler #{:d}'.format(i)) - - topology_details = TopologyDetails(**topo) - context = Context() - context.context_id.context_uuid.uuid = topology_details.topology_id.context_id.context_uuid.uuid - context_client.SetContext(context) - - topology = Topology() - topology.topology_id.context_id.CopyFrom(context.context_id) - topology.topology_id.topology_uuid.uuid = topology_details.topology_id.topology_uuid.uuid - context_client.SetTopology(topology) - - for device in topology_details.devices: - context_client.SetDevice(device) - - for link in topology_details.links: - context_client.SetLink(link) - - i+=1 - - @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) - def Compute(self, request: E2EOrchestratorRequest, context: grpc.ServicerContext) -> E2EOrchestratorReply: - endpoints_ids = [] - for endpoint_id in request.service.service_endpoint_ids: - endpoints_ids.append(endpoint_get_uuid(endpoint_id)[2]) + def Compute( + self, request: E2EOrchestratorRequest, context: grpc.ServicerContext + ) -> E2EOrchestratorReply: + endpoints_ids = [ + endpoint_get_uuid(endpoint_id)[2] + for endpoint_id in request.service.service_endpoint_ids + ] graph = networkx.Graph() + context_client = ContextClient() devices = context_client.ListDevices(Empty()).devices - for device in devices: - endpoints_uuids = [endpoint.endpoint_id.endpoint_uuid.uuid - for endpoint in device.device_endpoints] + endpoints_uuids = [ + endpoint.endpoint_id.endpoint_uuid.uuid + for endpoint in device.device_endpoints + ] for ep in endpoints_uuids: graph.add_node(ep) - for ep in endpoints_uuids: - for ep_i in endpoints_uuids: - if ep == ep_i: + for ep_i in endpoints_uuids: + for ep_j in endpoints_uuids: + if ep_i == ep_j: continue - graph.add_edge(ep, ep_i) + graph.add_edge(ep_i, ep_j) links = context_client.ListLinks(Empty()).links for link in links: @@ -244,7 +64,9 @@ class E2EOrchestratorServiceServicerImpl(E2EOrchestratorServiceServicer): graph.add_edge(eps[0], eps[1]) - shortest = networkx.shortest_path(graph, endpoints_ids[0], endpoints_ids[1]) + shortest = networkx.shortest_path( + graph, endpoints_ids[0], endpoints_ids[1] + ) path = E2EOrchestratorReply() path.services.append(copy.deepcopy(request.service)) diff --git a/src/e2e_orchestrator/service/SubscriptionServer.py b/src/e2e_orchestrator/service/SubscriptionServer.py new file mode 100644 index 0000000000000000000000000000000000000000..ab1c37dbdaef656c0babf9eee896f15ff76053a8 --- /dev/null +++ b/src/e2e_orchestrator/service/SubscriptionServer.py @@ -0,0 +1,162 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy, grpc, json, logging, networkx, requests, threading +from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method +from common.proto.e2eorchestrator_pb2 import E2EOrchestratorRequest, E2EOrchestratorReply +from common.proto.context_pb2 import ( + Empty, Connection, EndPointId, Link, LinkId, TopologyDetails, Topology, Context, Service, ServiceId, + ServiceTypeEnum, ServiceStatusEnum) +from common.proto.e2eorchestrator_pb2_grpc import E2EOrchestratorServiceServicer +from common.proto.vnt_manager_pb2 import VNTSubscriptionRequest +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME +from common.Settings import get_setting +from context.client.ContextClient import ContextClient +from context.service.database.uuids.EndPoint import endpoint_get_uuid +from context.service.database.uuids.Device import device_get_uuid +from service.client.ServiceClient import ServiceClient +from websockets.sync.client import connect +from websockets.sync.server import serve + + +LOGGER = logging.getLogger(__name__) +logging.getLogger("websockets").propagate = True +logging.getLogger("requests.packages.urllib3").propagate = True + +METRICS_POOL = MetricsPool("E2EOrchestrator", "RPC") + + +context_client: ContextClient = ContextClient() +service_client: ServiceClient = ServiceClient() + +EXT_HOST = str(get_setting('WS_IP_HOST')) +EXT_PORT = int(get_setting('WS_IP_PORT')) +EXT_URL = 'ws://{:s}:{:d}'.format(EXT_HOST, EXT_PORT) + +OWN_HOST = str(get_setting('WS_E2E_HOST')) +OWN_PORT = int(get_setting('WS_E2E_PORT')) + +ALL_HOSTS = '0.0.0.0' + +class SubscriptionServer(threading.Thread): + def run(self): + request = VNTSubscriptionRequest() + request.host = OWN_HOST + request.port = OWN_PORT + try: + LOGGER.debug('Trying to connect to {:s}'.format(EXT_URL)) + websocket = connect(EXT_URL) + except: # pylint: disable=bare-except + LOGGER.exception('Error connecting to {:s}'.format(EXT_URL)) + else: + with websocket: + LOGGER.debug('Connected to {:s}'.format(EXT_URL)) + send = grpc_message_to_json_string(request) + websocket.send(send) + LOGGER.debug('Sent: {:s}'.format(send)) + try: + message = websocket.recv() + LOGGER.debug('Received message from WebSocket: {:s}'.format(message)) + except Exception as ex: + LOGGER.error('Exception receiving from WebSocket: {:s}'.format(ex)) + self._events_server() + + + def _events_server(self): + try: + server = serve(self._event_received, ALL_HOSTS, int(OWN_PORT)) + except: # pylint: disable=bare-except + LOGGER.exception('Error starting server on {:s}:{:d}'.format(ALL_HOSTS, OWN_PORT)) + else: + with server: + LOGGER.info('Running events server...: {:s}:{:d}'.format(ALL_HOSTS, OWN_PORT)) + server.serve_forever() + + + def _event_received(self, connection): + LOGGER.debug('Event received') + for message in connection: + message_json = json.loads(message) + + # Link creation + if 'link_id' in message_json: + LOGGER.debug('Link creation') + link = Link(**message_json) + + service = Service() + service.service_id.service_uuid.uuid = link.link_id.link_uuid.uuid + service.service_id.context_id.context_uuid.uuid = DEFAULT_CONTEXT_NAME + service.service_type = ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY + service.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PLANNED + service_client.CreateService(service) + + a_device_uuid = device_get_uuid(link.link_endpoint_ids[0].device_id) + a_endpoint_uuid = endpoint_get_uuid(link.link_endpoint_ids[0])[2] + z_device_uuid = device_get_uuid(link.link_endpoint_ids[1].device_id) + z_endpoint_uuid = endpoint_get_uuid(link.link_endpoint_ids[1])[2] + + links = context_client.ListLinks(Empty()).links + for _link in links: + for _endpoint_id in _link.link_endpoint_ids: + if _endpoint_id.device_id.device_uuid.uuid == a_device_uuid and \ + _endpoint_id.endpoint_uuid.uuid == a_endpoint_uuid: + a_ep_id = _endpoint_id + elif _endpoint_id.device_id.device_uuid.uuid == z_device_uuid and \ + _endpoint_id.endpoint_uuid.uuid == z_endpoint_uuid: + z_ep_id = _endpoint_id + + if (not 'a_ep_id' in locals()) or (not 'z_ep_id' in locals()): + error_msg = f'Could not get VNT link endpoints\ + \n\ta_endpoint_uuid= {a_endpoint_uuid}\ + \n\tz_endpoint_uuid= {z_device_uuid}' + LOGGER.error(error_msg) + connection.send(error_msg) + return + + service.service_endpoint_ids.append(copy.deepcopy(a_ep_id)) + service.service_endpoint_ids.append(copy.deepcopy(z_ep_id)) + + service_client.UpdateService(service) + re_svc = context_client.GetService(service.service_id) + connection.send(grpc_message_to_json_string(link)) + context_client.SetLink(link) + elif 'link_uuid' in message_json: + LOGGER.debug('Link removal') + link_id = LinkId(**message_json) + + service_id = ServiceId() + service_id.service_uuid.uuid = link_id.link_uuid.uuid + service_id.context_id.context_uuid.uuid = DEFAULT_CONTEXT_NAME + service_client.DeleteService(service_id) + connection.send(grpc_message_to_json_string(link_id)) + context_client.RemoveLink(link_id) + else: + LOGGER.debug('Topology received') + topology_details = TopologyDetails(**message_json) + + context = Context() + context.context_id.context_uuid.uuid = topology_details.topology_id.context_id.context_uuid.uuid + context_client.SetContext(context) + + topology = Topology() + topology.topology_id.context_id.CopyFrom(context.context_id) + topology.topology_id.topology_uuid.uuid = topology_details.topology_id.topology_uuid.uuid + context_client.SetTopology(topology) + + for device in topology_details.devices: + context_client.SetDevice(device) + + for link in topology_details.links: + context_client.SetLink(link) diff --git a/src/e2e_orchestrator/service/TopologyTools.py b/src/e2e_orchestrator/service/TopologyTools.py new file mode 100644 index 0000000000000000000000000000000000000000..eaa0e2dbb7c839dde67040fa7ee89ab568a57a78 --- /dev/null +++ b/src/e2e_orchestrator/service/TopologyTools.py @@ -0,0 +1,37 @@ + def retrieve_external_topologies(self): + i = 1 + while True: + try: + ADD = str(get_setting(f'EXT_CONTROLLER{i}_ADD')) + PORT = int(get_setting(f'EXT_CONTROLLER{i}_PORT')) + except: # pylint: disable=bare-except + break + + try: + LOGGER.info('Retrieving external controller #{:d}'.format(i)) + url = 'http://{:s}:{:d}/tfs-api/context/{:s}/topology_details/{:s}'.format( + ADD, PORT, DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME + ) + LOGGER.info('url={:s}'.format(str(url))) + topo = requests.get(url).json() + LOGGER.info('Retrieved external controller #{:d}'.format(i)) + except: # pylint: disable=bare-except + LOGGER.exception('Exception retrieven topology from external controler #{:d}'.format(i)) + + topology_details = TopologyDetails(**topo) + context = Context() + context.context_id.context_uuid.uuid = topology_details.topology_id.context_id.context_uuid.uuid + context_client.SetContext(context) + + topology = Topology() + topology.topology_id.context_id.CopyFrom(context.context_id) + topology.topology_id.topology_uuid.uuid = topology_details.topology_id.topology_uuid.uuid + context_client.SetTopology(topology) + + for device in topology_details.devices: + context_client.SetDevice(device) + + for link in topology_details.links: + context_client.SetLink(link) + + i+=1 diff --git a/src/e2e_orchestrator/service/__main__.py b/src/e2e_orchestrator/service/__main__.py index 4c0a6d471e2b6d7ae87aee666695ebdca6938491..aa4def383265ee1fc259fb8a7ec439059785e3f6 100644 --- a/src/e2e_orchestrator/service/__main__.py +++ b/src/e2e_orchestrator/service/__main__.py @@ -12,59 +12,68 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging -import signal -import sys -import threading - +import logging, signal, sys, threading from prometheus_client import start_http_server - from common.Constants import ServiceNameEnum -from common.Settings import (ENVVAR_SUFIX_SERVICE_HOST, - ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, - get_log_level, get_metrics_port, - wait_for_environment_variables) - +from common.Settings import ( + ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, + get_log_level, get_metrics_port, wait_for_environment_variables +) +from .subscriptions.ControllerDiscoverer import ControllerDiscoverer +from .subscriptions.Subscriptions import Subscriptions +from .subscriptions.dispatchers.Dispatchers import Dispatchers +from .subscriptions.dispatchers.recommendation.Dispatcher import RecommendationDispatcher from .E2EOrchestratorService import E2EOrchestratorService -terminate = threading.Event() +TERMINATE = threading.Event() LOG_LEVEL = get_log_level() logging.basicConfig(level=LOG_LEVEL, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s") LOGGER = logging.getLogger(__name__) -def signal_handler(signal, frame): # pylint: disable=redefined-outer-name - LOGGER.warning("Terminate signal received") - terminate.set() +def signal_handler(signal, frame): # pylint: disable=redefined-outer-name + LOGGER.warning('Terminate signal received') + TERMINATE.set() def main(): - signal.signal(signal.SIGINT, signal_handler) + wait_for_environment_variables([ + get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST ), + get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC), + ]) + + signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGTERM, signal_handler) - LOGGER.info("Starting...") + LOGGER.info('Starting...') # Start metrics server metrics_port = get_metrics_port() start_http_server(metrics_port) - # Starting CentralizedCybersecurity service + # Starting service grpc_service = E2EOrchestratorService() grpc_service.start() - LOGGER.info("Started...") - # Wait for Ctrl+C or termination signal - while not terminate.wait(timeout=1): - pass + dispatchers = Dispatchers(TERMINATE) + dispatchers.add_dispatcher(RecommendationDispatcher) + subscriptions = Subscriptions(dispatchers, TERMINATE) + discoverer = ControllerDiscoverer(subscriptions, TERMINATE) + discoverer.start() + + LOGGER.info('Running...') + # Wait for Ctrl+C or termination signal + while not TERMINATE.wait(timeout=1.0): pass - LOGGER.info("Terminating...") + LOGGER.info('Terminating...') + discoverer.stop() grpc_service.stop() - LOGGER.info("Bye") + LOGGER.info('Bye') return 0 -if __name__ == "__main__": +if __name__ == '__main__': sys.exit(main()) diff --git a/src/e2e_orchestrator/service/old_E2EOrchestratorServiceServicerImpl.py b/src/e2e_orchestrator/service/old_E2EOrchestratorServiceServicerImpl.py new file mode 100644 index 0000000000000000000000000000000000000000..4878d4788276857600478b20c060c75e29d39015 --- /dev/null +++ b/src/e2e_orchestrator/service/old_E2EOrchestratorServiceServicerImpl.py @@ -0,0 +1,268 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy, grpc, json, logging, networkx, requests, threading +from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method +from common.proto.e2eorchestrator_pb2 import E2EOrchestratorRequest, E2EOrchestratorReply +from common.proto.context_pb2 import ( + Empty, Connection, EndPointId, Link, LinkId, TopologyDetails, Topology, Context, Service, ServiceId, + ServiceTypeEnum, ServiceStatusEnum) +from common.proto.e2eorchestrator_pb2_grpc import E2EOrchestratorServiceServicer +from common.proto.vnt_manager_pb2 import VNTSubscriptionRequest +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME +from common.Settings import get_setting +from context.client.ContextClient import ContextClient +from context.service.database.uuids.EndPoint import endpoint_get_uuid +from context.service.database.uuids.Device import device_get_uuid +from service.client.ServiceClient import ServiceClient +from websockets.sync.client import connect +from websockets.sync.server import serve + + +LOGGER = logging.getLogger(__name__) +logging.getLogger("websockets").propagate = True +logging.getLogger("requests.packages.urllib3").propagate = True + +METRICS_POOL = MetricsPool("E2EOrchestrator", "RPC") + + +context_client: ContextClient = ContextClient() +service_client: ServiceClient = ServiceClient() + +EXT_HOST = str(get_setting('WS_IP_HOST')) +EXT_PORT = int(get_setting('WS_IP_PORT')) +EXT_URL = 'ws://{:s}:{:d}'.format(EXT_HOST, EXT_PORT) + +OWN_HOST = str(get_setting('WS_E2E_HOST')) +OWN_PORT = int(get_setting('WS_E2E_PORT')) + +ALL_HOSTS = '0.0.0.0' + +class SubscriptionServer(threading.Thread): + def run(self): + request = VNTSubscriptionRequest() + request.host = OWN_HOST + request.port = OWN_PORT + try: + LOGGER.debug('Trying to connect to {:s}'.format(EXT_URL)) + websocket = connect(EXT_URL) + except: # pylint: disable=bare-except + LOGGER.exception('Error connecting to {:s}'.format(EXT_URL)) + else: + with websocket: + LOGGER.debug('Connected to {:s}'.format(EXT_URL)) + send = grpc_message_to_json_string(request) + websocket.send(send) + LOGGER.debug('Sent: {:s}'.format(send)) + try: + message = websocket.recv() + LOGGER.debug('Received message from WebSocket: {:s}'.format(message)) + except Exception as ex: + LOGGER.error('Exception receiving from WebSocket: {:s}'.format(ex)) + self._events_server() + + + def _events_server(self): + try: + server = serve(self._event_received, ALL_HOSTS, int(OWN_PORT)) + except: # pylint: disable=bare-except + LOGGER.exception('Error starting server on {:s}:{:d}'.format(ALL_HOSTS, OWN_PORT)) + else: + with server: + LOGGER.info('Running events server...: {:s}:{:d}'.format(ALL_HOSTS, OWN_PORT)) + server.serve_forever() + + + def _event_received(self, connection): + LOGGER.debug('Event received') + for message in connection: + message_json = json.loads(message) + + # Link creation + if 'link_id' in message_json: + LOGGER.debug('Link creation') + link = Link(**message_json) + + service = Service() + service.service_id.service_uuid.uuid = link.link_id.link_uuid.uuid + service.service_id.context_id.context_uuid.uuid = DEFAULT_CONTEXT_NAME + service.service_type = ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY + service.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_PLANNED + service_client.CreateService(service) + + a_device_uuid = device_get_uuid(link.link_endpoint_ids[0].device_id) + a_endpoint_uuid = endpoint_get_uuid(link.link_endpoint_ids[0])[2] + z_device_uuid = device_get_uuid(link.link_endpoint_ids[1].device_id) + z_endpoint_uuid = endpoint_get_uuid(link.link_endpoint_ids[1])[2] + + links = context_client.ListLinks(Empty()).links + for _link in links: + for _endpoint_id in _link.link_endpoint_ids: + if _endpoint_id.device_id.device_uuid.uuid == a_device_uuid and \ + _endpoint_id.endpoint_uuid.uuid == a_endpoint_uuid: + a_ep_id = _endpoint_id + elif _endpoint_id.device_id.device_uuid.uuid == z_device_uuid and \ + _endpoint_id.endpoint_uuid.uuid == z_endpoint_uuid: + z_ep_id = _endpoint_id + + if (not 'a_ep_id' in locals()) or (not 'z_ep_id' in locals()): + error_msg = f'Could not get VNT link endpoints\ + \n\ta_endpoint_uuid= {a_endpoint_uuid}\ + \n\tz_endpoint_uuid= {z_device_uuid}' + LOGGER.error(error_msg) + connection.send(error_msg) + return + + service.service_endpoint_ids.append(copy.deepcopy(a_ep_id)) + service.service_endpoint_ids.append(copy.deepcopy(z_ep_id)) + + service_client.UpdateService(service) + re_svc = context_client.GetService(service.service_id) + connection.send(grpc_message_to_json_string(link)) + context_client.SetLink(link) + elif 'link_uuid' in message_json: + LOGGER.debug('Link removal') + link_id = LinkId(**message_json) + + service_id = ServiceId() + service_id.service_uuid.uuid = link_id.link_uuid.uuid + service_id.context_id.context_uuid.uuid = DEFAULT_CONTEXT_NAME + service_client.DeleteService(service_id) + connection.send(grpc_message_to_json_string(link_id)) + context_client.RemoveLink(link_id) + else: + LOGGER.debug('Topology received') + topology_details = TopologyDetails(**message_json) + + context = Context() + context.context_id.context_uuid.uuid = topology_details.topology_id.context_id.context_uuid.uuid + context_client.SetContext(context) + + topology = Topology() + topology.topology_id.context_id.CopyFrom(context.context_id) + topology.topology_id.topology_uuid.uuid = topology_details.topology_id.topology_uuid.uuid + context_client.SetTopology(topology) + + for device in topology_details.devices: + context_client.SetDevice(device) + + for link in topology_details.links: + context_client.SetLink(link) + + + +class E2EOrchestratorServiceServicerImpl(E2EOrchestratorServiceServicer): + def __init__(self): + LOGGER.debug('Creating Servicer...') + try: + LOGGER.debug('Requesting subscription') + sub_server = SubscriptionServer() + sub_server.start() + LOGGER.debug('Servicer Created') + self.retrieve_external_topologies() + except: + LOGGER.exception('Unhandled Exception') + + def retrieve_external_topologies(self): + i = 1 + while True: + try: + ADD = str(get_setting(f'EXT_CONTROLLER{i}_ADD')) + PORT = int(get_setting(f'EXT_CONTROLLER{i}_PORT')) + except: # pylint: disable=bare-except + break + + try: + LOGGER.info('Retrieving external controller #{:d}'.format(i)) + url = 'http://{:s}:{:d}/tfs-api/context/{:s}/topology_details/{:s}'.format( + ADD, PORT, DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME + ) + LOGGER.info('url={:s}'.format(str(url))) + topo = requests.get(url).json() + LOGGER.info('Retrieved external controller #{:d}'.format(i)) + except: # pylint: disable=bare-except + LOGGER.exception('Exception retrieven topology from external controler #{:d}'.format(i)) + + topology_details = TopologyDetails(**topo) + context = Context() + context.context_id.context_uuid.uuid = topology_details.topology_id.context_id.context_uuid.uuid + context_client.SetContext(context) + + topology = Topology() + topology.topology_id.context_id.CopyFrom(context.context_id) + topology.topology_id.topology_uuid.uuid = topology_details.topology_id.topology_uuid.uuid + context_client.SetTopology(topology) + + for device in topology_details.devices: + context_client.SetDevice(device) + + for link in topology_details.links: + context_client.SetLink(link) + + i+=1 + + + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) + def Compute(self, request: E2EOrchestratorRequest, context: grpc.ServicerContext) -> E2EOrchestratorReply: + endpoints_ids = [] + for endpoint_id in request.service.service_endpoint_ids: + endpoints_ids.append(endpoint_get_uuid(endpoint_id)[2]) + + graph = networkx.Graph() + + devices = context_client.ListDevices(Empty()).devices + + for device in devices: + endpoints_uuids = [endpoint.endpoint_id.endpoint_uuid.uuid + for endpoint in device.device_endpoints] + for ep in endpoints_uuids: + graph.add_node(ep) + + for ep in endpoints_uuids: + for ep_i in endpoints_uuids: + if ep == ep_i: + continue + graph.add_edge(ep, ep_i) + + links = context_client.ListLinks(Empty()).links + for link in links: + eps = [] + for endpoint_id in link.link_endpoint_ids: + eps.append(endpoint_id.endpoint_uuid.uuid) + graph.add_edge(eps[0], eps[1]) + + + shortest = networkx.shortest_path(graph, endpoints_ids[0], endpoints_ids[1]) + + path = E2EOrchestratorReply() + path.services.append(copy.deepcopy(request.service)) + for i in range(0, int(len(shortest)/2)): + conn = Connection() + ep_a_uuid = str(shortest[i*2]) + ep_z_uuid = str(shortest[i*2+1]) + + conn.connection_id.connection_uuid.uuid = str(ep_a_uuid) + '_->_' + str(ep_z_uuid) + + ep_a_id = EndPointId() + ep_a_id.endpoint_uuid.uuid = ep_a_uuid + conn.path_hops_endpoint_ids.append(ep_a_id) + + ep_z_id = EndPointId() + ep_z_id.endpoint_uuid.uuid = ep_z_uuid + conn.path_hops_endpoint_ids.append(ep_z_id) + + path.connections.append(conn) + + return path diff --git a/src/e2e_orchestrator/service/subscriptions/ControllerDiscoverer.py b/src/e2e_orchestrator/service/subscriptions/ControllerDiscoverer.py new file mode 100644 index 0000000000000000000000000000000000000000..5d9efd531124782e687df1bf6d7f5f0779f1d74b --- /dev/null +++ b/src/e2e_orchestrator/service/subscriptions/ControllerDiscoverer.py @@ -0,0 +1,92 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging, queue, threading +from typing import Any, Optional +from common.proto.context_pb2 import DeviceEvent, Empty +from common.tools.grpc.BaseEventCollector import BaseEventCollector +from common.tools.grpc.BaseEventDispatcher import BaseEventDispatcher +from common.tools.grpc.Tools import grpc_message_to_json_string +from context.client.ContextClient import ContextClient +from .Subscriptions import Subscriptions +from .TFSControllerSettings import get_tfs_controller_settings + + +LOGGER = logging.getLogger(__name__) + + +class EventDispatcher(BaseEventDispatcher): + def __init__( + self, events_queue : queue.PriorityQueue, + context_client : ContextClient, + subscriptions : Subscriptions, + terminate : Optional[threading.Event] = None + ) -> None: + super().__init__(events_queue, terminate) + self._context_client = context_client + self._subscriptions = subscriptions + + def dispatch_device_create(self, device_event : DeviceEvent) -> None: + MSG = 'Processing Device Create: {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(device_event))) + tfs_ctrl_settings = get_tfs_controller_settings( + self._context_client, device_event + ) + if tfs_ctrl_settings is None: return + self._subscriptions.add_subscription(tfs_ctrl_settings) + + def dispatch_device_update(self, device_event : DeviceEvent) -> None: + MSG = 'Processing Device Update: {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(device_event))) + tfs_ctrl_settings = get_tfs_controller_settings( + self._context_client, device_event + ) + if tfs_ctrl_settings is None: return + self._subscriptions.add_subscription(tfs_ctrl_settings) + + def dispatch_device_remove(self, device_event : DeviceEvent) -> None: + MSG = 'Processing Device Remove: {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(device_event))) + device_uuid = device_event.device_id.device_uuid.uuid + self._subscriptions.remove_subscription(device_uuid) + + def dispatch(self, event : Any) -> None: + MSG = 'Unexpected Event: {:s}' + LOGGER.warning(MSG.format(grpc_message_to_json_string(event))) + +class ControllerDiscoverer: + def __init__( + self, subscriptions : Subscriptions, terminate : threading.Event + ) -> None: + self._context_client = ContextClient() + + self._event_collector = BaseEventCollector(terminate=terminate) + self._event_collector.install_collector( + self._context_client.GetDeviceEvents, Empty(), log_events_received=True + ) + self._event_dispatcher = EventDispatcher( + self._event_collector.get_events_queue(), self._context_client, subscriptions, + terminate=terminate + ) + + def start(self) -> None: + self._context_client.connect() + self._event_dispatcher.start() + self._event_collector.start() + + def stop(self): + self._event_collector.stop() + self._event_dispatcher.stop() + self._context_client.close() diff --git a/src/e2e_orchestrator/service/subscriptions/Subscription.py b/src/e2e_orchestrator/service/subscriptions/Subscription.py new file mode 100644 index 0000000000000000000000000000000000000000..a8b986858357749342e5f427cc949cee4ccc6e1e --- /dev/null +++ b/src/e2e_orchestrator/service/subscriptions/Subscription.py @@ -0,0 +1,59 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import socketio, threading +from common.Constants import ServiceNameEnum +from common.Settings import get_service_baseurl_http +from .dispatchers.Dispatchers import Dispatchers +from .TFSControllerSettings import TFSControllerSettings + + +NBI_SERVICE_PREFIX_URL = get_service_baseurl_http(ServiceNameEnum.NBI) or '' +CHILD_SOCKETIO_URL = 'http://{:s}:{:s}@{:s}:{:d}' + NBI_SERVICE_PREFIX_URL + + +class Subscription(threading.Thread): + def __init__( + self, tfs_ctrl_settings : TFSControllerSettings, dispatchers : Dispatchers, + terminate : threading.Event + ) -> None: + super().__init__(daemon=True) + self._settings = tfs_ctrl_settings + self._dispatchers = dispatchers + self._terminate = terminate + self._is_running = threading.Event() + + @property + def is_running(self): return self._is_running.is_set() + + def run(self) -> None: + child_socketio_url = CHILD_SOCKETIO_URL.format( + self._settings.nbi_username, + self._settings.nbi_password, + self._settings.nbi_address, + self._settings.nbi_port, + ) + + sio = socketio.Client(logger=True, engineio_logger=True) + self._dispatchers.register(sio) + sio.connect(child_socketio_url) + + while not self._terminate.is_set(): + sio.sleep(seconds=0.5) + + sio.shutdown() + + def stop(self): + self._terminate.set() diff --git a/src/e2e_orchestrator/service/subscriptions/Subscriptions.py b/src/e2e_orchestrator/service/subscriptions/Subscriptions.py new file mode 100644 index 0000000000000000000000000000000000000000..0ef8c010968997f3440b32fd99f3b4951e35598e --- /dev/null +++ b/src/e2e_orchestrator/service/subscriptions/Subscriptions.py @@ -0,0 +1,44 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, threading +from typing import Dict +from .dispatchers.Dispatchers import Dispatchers +from .Subscription import Subscription +from .TFSControllerSettings import TFSControllerSettings + +LOGGER = logging.getLogger(__name__) + +class Subscriptions: + def __init__(self, dispatchers : Dispatchers, terminate : threading.Event) -> None: + self._dispatchers = dispatchers + self._terminate = terminate + self._lock = threading.Lock() + self._subscriptions : Dict[str, Subscription] = dict() + + def add_subscription(self, tfs_ctrl_settings : TFSControllerSettings) -> None: + device_uuid = tfs_ctrl_settings.device_uuid + with self._lock: + subscription = self._subscriptions.get(device_uuid) + if subscription is not None: return + subscription = Subscription(tfs_ctrl_settings, self._dispatchers, self._terminate) + self._subscriptions[device_uuid] = subscription + subscription.start() + + def remove_subscription(self, device_uuid : str) -> None: + with self._lock: + subscription = self._subscriptions.get(device_uuid) + if subscription is None: return + if subscription.is_running: subscription.stop() + self._subscriptions.pop(device_uuid, None) diff --git a/src/e2e_orchestrator/service/subscriptions/TFSControllerSettings.py b/src/e2e_orchestrator/service/subscriptions/TFSControllerSettings.py new file mode 100644 index 0000000000000000000000000000000000000000..00613e3d44fa5d25f0640b1e12521329956c0e7f --- /dev/null +++ b/src/e2e_orchestrator/service/subscriptions/TFSControllerSettings.py @@ -0,0 +1,72 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import json +from dataclasses import dataclass +from typing import Optional +from common.DeviceTypes import DeviceTypeEnum +from common.proto.context_pb2 import ConfigActionEnum, DeviceEvent +from common.tools.context_queries.Device import get_device +from context.client.ContextClient import ContextClient + + +@dataclass +class TFSControllerSettings: + device_uuid : str + device_type : DeviceTypeEnum + nbi_address : str + nbi_port : int + nbi_username : str + nbi_password : str + + +SELECTED_DEVICE_TYPES = { + DeviceTypeEnum.TERAFLOWSDN_CONTROLLER.value +} + + +def get_tfs_controller_settings( + context_client : ContextClient, device_event : DeviceEvent +) -> Optional[TFSControllerSettings]: + device_uuid = device_event.device_id.device_uuid.uuid + device = get_device( + context_client, device_uuid, rw_copy=False, + include_endpoints=False, include_config_rules=True, + include_components=False + ) + device_type = device.device_type + if device_type not in SELECTED_DEVICE_TYPES: return None + + connect_rules = dict() + for config_rule in device.device_config.config_rules: + if config_rule.action != ConfigActionEnum.CONFIGACTION_SET: continue + if config_rule.WhichOneof('config_rule') != 'custom': continue + if not config_rule.custom.resource_key.startswith('_connect/'): continue + connect_attribute = config_rule.custom.resource_key.replace('_connect/', '') + if connect_attribute == 'settings': + settings = json.loads(config_rule.custom.resource_value) + for field in ['username', 'password']: + connect_rules[field] = settings[field] + else: + connect_rules[connect_attribute] = config_rule.custom.resource_value + + return TFSControllerSettings( + device_uuid = device_uuid, + device_type = device_type, + nbi_address = str(connect_rules['address' ]), + nbi_port = int(connect_rules['port' ]), + nbi_username = str(connect_rules['username']), + nbi_password = str(connect_rules['password']), + ) diff --git a/src/e2e_orchestrator/service/subscriptions/__init__.py b/src/e2e_orchestrator/service/subscriptions/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..023830645e0fcb60e3f8583674a954810af222f2 --- /dev/null +++ b/src/e2e_orchestrator/service/subscriptions/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/e2e_orchestrator/service/subscriptions/dispatchers/Dispatchers.py b/src/e2e_orchestrator/service/subscriptions/dispatchers/Dispatchers.py new file mode 100644 index 0000000000000000000000000000000000000000..88345e32a67dfec58adafdd2c90d76f99c0d2d65 --- /dev/null +++ b/src/e2e_orchestrator/service/subscriptions/dispatchers/Dispatchers.py @@ -0,0 +1,33 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, socketio, threading +from typing import List, Type +from ._Dispatcher import _Dispatcher + +LOGGER = logging.getLogger(__name__) + +class Dispatchers: + def __init__(self, terminate : threading.Event) -> None: + self._terminate = terminate + self._dispatchers : List[_Dispatcher] = list() + + def add_dispatcher(self, dispatcher_class : Type[_Dispatcher]) -> None: + dispatcher = dispatcher_class(self._terminate) + self._dispatchers.append(dispatcher) + dispatcher.start() + + def register(self, sio_client : socketio.Client) -> None: + for dispatcher in self._dispatchers: + dispatcher.register(sio_client) diff --git a/src/e2e_orchestrator/service/subscriptions/dispatchers/_Dispatcher.py b/src/e2e_orchestrator/service/subscriptions/dispatchers/_Dispatcher.py new file mode 100644 index 0000000000000000000000000000000000000000..d2cd40bbd8c79758da08dac59620bbe93ed5e39b --- /dev/null +++ b/src/e2e_orchestrator/service/subscriptions/dispatchers/_Dispatcher.py @@ -0,0 +1,46 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import queue, socketio, threading +from concurrent.futures import Future +from typing import Any, Tuple + +class _Dispatcher(threading.Thread): + def __init__(self, terminate : threading.Event): + super().__init__(daemon=True) + self._dispatcher_queue = queue.Queue[Tuple[Any, Future]]() + self._terminate = terminate + + @property + def dispatcher_queue(self): return self._dispatcher_queue + + def register(self, sio_client : socketio.Client) -> None: + raise NotImplementedError('To be implemented in subclass') + + def run(self): + while not self._terminate.is_set(): + try: + request,future = self._dispatcher_queue.get(block=True, timeout=1.0) + except queue.Empty: + continue + + try: + result = self.process_request(request) + except Exception as e: + future.set_exception(e) + else: + future.set_result(result) + + def process_request(self, request : Any) -> Any: + raise NotImplementedError('To be implemented in subclass') diff --git a/src/e2e_orchestrator/service/subscriptions/dispatchers/__init__.py b/src/e2e_orchestrator/service/subscriptions/dispatchers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..023830645e0fcb60e3f8583674a954810af222f2 --- /dev/null +++ b/src/e2e_orchestrator/service/subscriptions/dispatchers/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/e2e_orchestrator/service/subscriptions/dispatchers/recommendation/ClientNamespace.py b/src/e2e_orchestrator/service/subscriptions/dispatchers/recommendation/ClientNamespace.py new file mode 100644 index 0000000000000000000000000000000000000000..b20b5a5f00c5813966fbc813bfd5514fdd00ad8f --- /dev/null +++ b/src/e2e_orchestrator/service/subscriptions/dispatchers/recommendation/ClientNamespace.py @@ -0,0 +1,98 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging, queue, socketio +from concurrent.futures import Future +from .Constants import SIO_NAMESPACE +from .Recommendation import Recommendation, RecommendationAction + +LOGGER = logging.getLogger(__name__) + +class ClientNamespace(socketio.ClientNamespace): + def __init__(self, dispatcher_queue : queue.Queue[Recommendation]): + self._dispatcher_queue = dispatcher_queue + super().__init__(namespace=SIO_NAMESPACE) + + def on_connect(self): + LOGGER.info('[on_connect] Connected') + + def on_disconnect(self, reason): + MSG = '[on_disconnect] Disconnected!, reason: {:s}' + LOGGER.info(MSG.format(str(reason))) + + def on_vlink_create(self, data): + MSG = '[on_vlink_create] begin data={:s}' + LOGGER.info(MSG.format(str(data))) + + json_data = json.loads(data) + request_key = json_data.pop('_request_key') + + recommendation = Recommendation( + action = RecommendationAction.VLINK_CREATE, + data = json_data, + ) + result = Future() + + MSG = '[on_vlink_create] Recommendation ({:s}): {:s}' + LOGGER.info(MSG.format(str(request_key), str(recommendation))) + + LOGGER.debug('[on_vlink_create] Queuing recommendation...') + self._dispatcher_queue.put_nowait((recommendation, result)) + + reply = dict() + reply['_request_key'] = request_key + try: + reply['result'] = result.result() + event = reply['result'].pop('event') + except Exception as e: + reply['error'] = str(e) + #reply['stacktrace'] = str(e) + event = 'error' + + LOGGER.debug('[on_vlink_create] Replying...') + self.emit(event, json.dumps(reply)) + LOGGER.debug('[on_vlink_create] end') + + def on_vlink_remove(self, data): + MSG = '[on_vlink_remove] begin data={:s}' + LOGGER.info(MSG.format(str(data))) + + json_data = json.loads(data) + request_key = json_data.pop('_request_key') + + recommendation = Recommendation( + action = RecommendationAction.VLINK_REMOVE, + data = json_data, + ) + result = Future() + + MSG = '[on_vlink_remove] Recommendation ({:s}): {:s}' + LOGGER.info(MSG.format(str(request_key), str(recommendation))) + + LOGGER.debug('[on_vlink_remove] Queuing recommendation...') + self._dispatcher_queue.put_nowait((recommendation, result)) + + reply = dict() + reply['_request_key'] = request_key + try: + reply['result'] = result.result() + event = reply['result'].pop('event') + except Exception as e: + reply['error'] = str(e) + #reply['stacktrace'] = str(e) + event = 'error' + + LOGGER.debug('[on_vlink_remove] Replying...') + self.emit(event, json.dumps(reply)) + LOGGER.debug('[on_vlink_remove] end') diff --git a/src/e2e_orchestrator/service/subscriptions/dispatchers/recommendation/Constants.py b/src/e2e_orchestrator/service/subscriptions/dispatchers/recommendation/Constants.py new file mode 100644 index 0000000000000000000000000000000000000000..da3af24fd6996531422d7a8447e5edb6fcfb984c --- /dev/null +++ b/src/e2e_orchestrator/service/subscriptions/dispatchers/recommendation/Constants.py @@ -0,0 +1,15 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +SIO_NAMESPACE = '/recommendations' diff --git a/src/e2e_orchestrator/service/subscriptions/dispatchers/recommendation/Dispatcher.py b/src/e2e_orchestrator/service/subscriptions/dispatchers/recommendation/Dispatcher.py new file mode 100644 index 0000000000000000000000000000000000000000..6913014ad1ae6cef9a954eb27f2e2323c50c8aca --- /dev/null +++ b/src/e2e_orchestrator/service/subscriptions/dispatchers/recommendation/Dispatcher.py @@ -0,0 +1,67 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy, logging, socketio +from typing import Dict +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import Service, ServiceId +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Service import json_service_id +from service.client.ServiceClient import ServiceClient +from .._Dispatcher import _Dispatcher +from .ClientNamespace import ClientNamespace +from .Recommendation import Recommendation, RecommendationAction +from .Tools import compose_optical_service + +LOGGER = logging.getLogger(__name__) + +class RecommendationDispatcher(_Dispatcher): + + def register(self, sio_client : socketio.Client) -> None: + sio_client.register_namespace(ClientNamespace(self.dispatcher_queue)) + + def process_request(self, request : Recommendation) -> Dict: + LOGGER.info('[process_request] request={:s}'.format(str(request))) + + if request.action == RecommendationAction.VLINK_CREATE: + vlink_optical_service = compose_optical_service(request.data) + vlink_optical_service_add = copy.deepcopy(vlink_optical_service) + vlink_optical_service_add.pop('service_endpoint_ids', None) + vlink_optical_service_add.pop('service_constraints', None) + vlink_optical_service_add.pop('service_config', None) + + service_client = ServiceClient() + service_id = service_client.CreateService(Service(**vlink_optical_service_add)) + service_uuid = service_id.service_uuid.uuid + vlink_optical_service['service_id']['service_uuid']['uuid'] = service_uuid + service_id = service_client.UpdateService(Service(**vlink_optical_service)) + + result = {'event': 'vlink_created', 'vlink_uuid': service_uuid} + elif request.action == RecommendationAction.VLINK_REMOVE: + vlink_service_uuid = request.data['link_uuid']['uuid'] + context_id = json_context_id(DEFAULT_CONTEXT_NAME) + vlink_optical_service_id = json_service_id(vlink_service_uuid, context_id=context_id) + + service_client = ServiceClient() + service_id = service_client.DeleteService(ServiceId(**vlink_optical_service_id)) + + if vlink_service_uuid == 'IP1/PORT-xe1==IP2/PORT-xe1': + service_id = service_client.DeleteService(ServiceId(**vlink_optical_service_id)) + + result = {'event': 'vlink_removed'} + else: + MSG = 'RecommendationAction not supported in Recommendation({:s})' + raise NotImplementedError(MSG.format(str(request))) + + return result diff --git a/src/e2e_orchestrator/service/subscriptions/dispatchers/recommendation/Recommendation.py b/src/e2e_orchestrator/service/subscriptions/dispatchers/recommendation/Recommendation.py new file mode 100644 index 0000000000000000000000000000000000000000..ca03b193f82b1447d1ef2e7b4ef39a9284249658 --- /dev/null +++ b/src/e2e_orchestrator/service/subscriptions/dispatchers/recommendation/Recommendation.py @@ -0,0 +1,27 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from dataclasses import dataclass, field +from enum import Enum +from typing import Dict + +class RecommendationAction(Enum): + VLINK_CREATE = 'vlink-create' + VLINK_REMOVE = 'vlink-remove' + +@dataclass +class Recommendation: + action : RecommendationAction + data : Dict = field(default_factory=dict) diff --git a/src/e2e_orchestrator/service/subscriptions/dispatchers/recommendation/Tools.py b/src/e2e_orchestrator/service/subscriptions/dispatchers/recommendation/Tools.py new file mode 100644 index 0000000000000000000000000000000000000000..12b2069cf8cf2feb6d5a3981ab68ca4fd164ec36 --- /dev/null +++ b/src/e2e_orchestrator/service/subscriptions/dispatchers/recommendation/Tools.py @@ -0,0 +1,167 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, networkx +from dataclasses import dataclass, field +from typing import Dict, List, Set +from common.proto.context_pb2 import ServiceTypeEnum +from common.tools.context_queries.Topology import get_topology_details +from common.tools.object_factory.Constraint import json_constraint_custom +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Device import json_device_id +from common.tools.object_factory.EndPoint import json_endpoint_id +from common.tools.object_factory.Service import json_service +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME +from common.DeviceTypes import DeviceTypeEnum +from context.client.ContextClient import ContextClient + + +LOGGER = logging.getLogger(__name__) + + +@dataclass +class GraphAndMapping: + graph : networkx.Graph = field(default_factory=networkx.Graph) + device_to_type : Dict[str, str] = field(default_factory=dict) + device_name_to_uuid : Dict[str, str] = field(default_factory=dict) + endpoint_name_to_uuid : Dict[Dict[str, str], str] = field(default_factory=dict) + endpoint_to_device_uuid : Dict[str, str] = field(default_factory=dict) + + +EXCLUDED_DEVICE_TYPES : Set[str] = { + DeviceTypeEnum.EMULATED_IP_SDN_CONTROLLER.value, + DeviceTypeEnum.EMULATED_MICROWAVE_RADIO_SYSTEM.value, + DeviceTypeEnum.EMULATED_OPEN_LINE_SYSTEM.value, + DeviceTypeEnum.EMULATED_XR_CONSTELLATION.value, + DeviceTypeEnum.IETF_SLICE.value, + DeviceTypeEnum.IP_SDN_CONTROLLER.value, + DeviceTypeEnum.MICROWAVE_RADIO_SYSTEM.value, + DeviceTypeEnum.NCE.value, + DeviceTypeEnum.OPEN_LINE_SYSTEM.value, + DeviceTypeEnum.TERAFLOWSDN_CONTROLLER.value, + DeviceTypeEnum.XR_CONSTELLATION.value, +} + + +def compose_graph_from_topology() -> GraphAndMapping: + context_client = ContextClient() + topology_details = get_topology_details( + context_client, DEFAULT_TOPOLOGY_NAME, + context_uuid=DEFAULT_CONTEXT_NAME, rw_copy=False + ) + + graph_and_mapping = GraphAndMapping() + + excluded_device_uuids : Set[str] = set() + + for device in topology_details.devices: + device_uuid = device.device_id.device_uuid.uuid + graph_and_mapping.device_name_to_uuid.setdefault(device.name, device_uuid) + graph_and_mapping.device_name_to_uuid.setdefault(device_uuid, device_uuid) + graph_and_mapping.device_to_type.setdefault(device_uuid, device.device_type) + + if device.device_type in EXCLUDED_DEVICE_TYPES: + excluded_device_uuids.add(device_uuid) + continue + + endpoint_uuids = list() + for endpoint in device.device_endpoints: + endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid + endpoint_uuids.append(endpoint_uuid) + graph_and_mapping.graph.add_node(endpoint_uuid) + + graph_and_mapping.endpoint_name_to_uuid.setdefault((device_uuid, endpoint.name), endpoint_uuid) + graph_and_mapping.endpoint_name_to_uuid.setdefault((device_uuid, endpoint_uuid), endpoint_uuid) + graph_and_mapping.endpoint_to_device_uuid.setdefault(endpoint_uuid, device_uuid) + + for endpoint_uuid_i in endpoint_uuids: + for endpoint_uuid_j in endpoint_uuids: + if endpoint_uuid_i == endpoint_uuid_j: continue + graph_and_mapping.graph.add_edge(endpoint_uuid_i, endpoint_uuid_j) + + for link in topology_details.links: + endpoint_id_a = link.link_endpoint_ids[ 0] + endpoint_id_z = link.link_endpoint_ids[-1] + + device_uuid_a = endpoint_id_a.device_id.device_uuid.uuid + if device_uuid_a in excluded_device_uuids: continue + + device_uuid_z = endpoint_id_z.device_id.device_uuid.uuid + if device_uuid_z in excluded_device_uuids: continue + + graph_and_mapping.graph.add_edge( + endpoint_id_a.endpoint_uuid.uuid, + endpoint_id_z.endpoint_uuid.uuid, + ) + + return graph_and_mapping + +def compose_optical_service(vlink_request : Dict) -> Dict: + graph_and_mapping = compose_graph_from_topology() + + vlink_endpoint_id_a = vlink_request['link_endpoint_ids'][ 0] + vlink_endpoint_id_b = vlink_request['link_endpoint_ids'][-1] + + device_uuid_or_name_a = vlink_endpoint_id_a['device_id']['device_uuid']['uuid'] + device_uuid_or_name_b = vlink_endpoint_id_b['device_id']['device_uuid']['uuid'] + endpoint_uuid_or_name_a = vlink_endpoint_id_a['endpoint_uuid']['uuid'] + endpoint_uuid_or_name_b = vlink_endpoint_id_b['endpoint_uuid']['uuid'] + + device_uuid_a = graph_and_mapping.device_name_to_uuid[device_uuid_or_name_a] + device_uuid_b = graph_and_mapping.device_name_to_uuid[device_uuid_or_name_b] + + endpoint_uuid_a = graph_and_mapping.endpoint_name_to_uuid[(device_uuid_a, endpoint_uuid_or_name_a)] + endpoint_uuid_b = graph_and_mapping.endpoint_name_to_uuid[(device_uuid_b, endpoint_uuid_or_name_b)] + + path_hops = networkx.shortest_path( + graph_and_mapping.graph, endpoint_uuid_a, endpoint_uuid_b + ) + + LOGGER.info('[compose_optical_service] path_hops={:s}'.format(str(path_hops))) + + optical_border_endpoint_ids : List[str] = list() + for endpoint_uuid in path_hops: + LOGGER.info('[compose_optical_service] endpoint_uuid={:s}'.format(str(endpoint_uuid))) + device_uuid = graph_and_mapping.endpoint_to_device_uuid[endpoint_uuid] + LOGGER.info('[compose_optical_service] device_uuid={:s}'.format(str(device_uuid))) + device_type = graph_and_mapping.device_to_type[device_uuid] + LOGGER.info('[compose_optical_service] device_type={:s}'.format(str(device_type))) + if device_type != DeviceTypeEnum.EMULATED_OPTICAL_TRANSPONDER.value: continue + device_id = json_device_id(device_uuid) + endpoint_id = json_endpoint_id(device_id, endpoint_uuid) + LOGGER.info('[compose_optical_service] endpoint_id={:s}'.format(str(endpoint_id))) + optical_border_endpoint_ids.append(endpoint_id) + + LOGGER.info('[compose_optical_service] optical_border_endpoint_ids={:s}'.format(str(optical_border_endpoint_ids))) + + constraints = [ + json_constraint_custom('bandwidth[gbps]', str(vlink_request['attributes']['total_capacity_gbps'])), + json_constraint_custom('bidirectionality', '1'), + ] + + vlink_service_uuid = vlink_request['link_id']['link_uuid']['uuid'] + + if vlink_service_uuid == 'IP1/PORT-xe1==IP2/PORT-xe1': + constraints.append(json_constraint_custom('optical-band-width[GHz]', '300')) + + vlink_optical_service = json_service( + vlink_service_uuid, + ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY, + context_id=json_context_id(DEFAULT_CONTEXT_NAME), + endpoint_ids=[ + optical_border_endpoint_ids[0], optical_border_endpoint_ids[-1] + ], + constraints=constraints, + ) + return vlink_optical_service diff --git a/src/e2e_orchestrator/service/subscriptions/dispatchers/recommendation/__init__.py b/src/e2e_orchestrator/service/subscriptions/dispatchers/recommendation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..023830645e0fcb60e3f8583674a954810af222f2 --- /dev/null +++ b/src/e2e_orchestrator/service/subscriptions/dispatchers/recommendation/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/src/forecaster/service/__main__.py b/src/forecaster/service/__main__.py index cf5e881109529560d47d5e1b0ac30333d760e32f..54f3331fab21eecfe926226b189b785afa9a98e9 100644 --- a/src/forecaster/service/__main__.py +++ b/src/forecaster/service/__main__.py @@ -16,27 +16,28 @@ import logging, signal, sys, threading from prometheus_client import start_http_server from common.Constants import ServiceNameEnum from common.Settings import ( - ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, get_metrics_port, - wait_for_environment_variables) + ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, + get_log_level, get_metrics_port, wait_for_environment_variables +) from .ForecasterService import ForecasterService -terminate = threading.Event() -LOGGER : logging.Logger = None + +TERMINATE = threading.Event() + +LOG_LEVEL = get_log_level() +logging.basicConfig(level=LOG_LEVEL, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s") +logging.getLogger('apscheduler.executors.default').setLevel(logging.WARNING) +logging.getLogger('apscheduler.scheduler').setLevel(logging.WARNING) +logging.getLogger('monitoring-client').setLevel(logging.WARNING) +LOGGER = logging.getLogger(__name__) + def signal_handler(signal, frame): # pylint: disable=redefined-outer-name LOGGER.warning('Terminate signal received') - terminate.set() + TERMINATE.set() -def main(): - global LOGGER # pylint: disable=global-statement - - log_level = get_log_level() - logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s") - logging.getLogger('apscheduler.executors.default').setLevel(logging.WARNING) - logging.getLogger('apscheduler.scheduler').setLevel(logging.WARNING) - logging.getLogger('monitoring-client').setLevel(logging.WARNING) - LOGGER = logging.getLogger(__name__) +def main(): wait_for_environment_variables([ get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST ), get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC), @@ -53,12 +54,13 @@ def main(): metrics_port = get_metrics_port() start_http_server(metrics_port) - # Starting Forecaster service + # Starting service grpc_service = ForecasterService() grpc_service.start() + LOGGER.info('Running...') # Wait for Ctrl+C or termination signal - while not terminate.wait(timeout=1.0): pass + while not TERMINATE.wait(timeout=1.0): pass LOGGER.info('Terminating...') grpc_service.stop() @@ -66,5 +68,6 @@ def main(): LOGGER.info('Bye') return 0 + if __name__ == '__main__': sys.exit(main()) diff --git a/src/nbi/Dockerfile b/src/nbi/Dockerfile index c56dff12b978ddf21d053242d96dd663af72e686..ac54fc851f22672f86bbaaa9e94e8cf534ad30cb 100644 --- a/src/nbi/Dockerfile +++ b/src/nbi/Dockerfile @@ -89,5 +89,5 @@ RUN mkdir -p /var/teraflow/tests/tools COPY src/tests/tools/mock_osm/. tests/tools/mock_osm/ # Start the service -ENTRYPOINT ["gunicorn", "-w", "4", "--worker-class", "eventlet", "-b", "0.0.0.0:8080", "nbi.service.app:app"] -#ENTRYPOINT ["gunicorn", "-w", "4", "--worker-class", "geventwebsocket.gunicorn.workers.GeventWebSocketWorker", "-b", "0.0.0.0:8080", "nbi.service.app:app"] +# NOTE: Configured single worker to prevent issues with multi-worker synchronization. To be invetsigated. +ENTRYPOINT ["gunicorn", "--workers", "1", "--worker-class", "eventlet", "--bind", "0.0.0.0:8080", "nbi.service.app:app"] diff --git a/src/nbi/README.md b/src/nbi/README.md index f997ce21c9b809a1749f046672e895d3ad466824..5dc5be29b9cd5f30c8783c5242fad35b47215d7b 100644 --- a/src/nbi/README.md +++ b/src/nbi/README.md @@ -2,6 +2,15 @@ The NBI component uses libyang to validate and process messages. Follow instructions below to install it. + +## IMPORTANT +**TL;DR**: Use kafka-python for consuming from kafka in the NBI component. + +Why: + +`confluent-kafka` is written in C, thus, it bypasses eventlet monkey_patches that convert normal threads into green_threads. +That implies methods such as consumer.poll() become blocking in eventlet scenario used by gunicorn web server. + ## Install libyang - Ref: https://github.com/CESNET/libyang - Ref: https://github.com/CESNET/libyang-python/ diff --git a/src/nbi/requirements.in b/src/nbi/requirements.in index 401a6de3026f4ab5896f21224e7674435553e080..e21aee4f699991f02ab535ac7d6216ac50dc3977 100644 --- a/src/nbi/requirements.in +++ b/src/nbi/requirements.in @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +confluent-kafka==2.3.* # only for creating topics and compatibility deepdiff==6.7.* deepmerge==1.1.* eventlet==0.39.0 @@ -19,16 +20,18 @@ Flask==2.1.3 Flask-HTTPAuth==4.5.0 Flask-RESTful==0.3.9 flask-socketio==5.5.1 -jsonschema==4.4.0 #gevent==24.11.1 #gevent-websocket==0.10.1 #greenlet==3.1.1 gunicorn==23.0.0 +jsonschema==4.4.0 +kafka-python==2.0.6 # for publishing and consuming messages in an eventlet-compatible way libyang==2.8.4 netaddr==0.9.0 pyang==2.6.0 git+https://github.com/robshakir/pyangbind.git pydantic==2.6.3 +python-socketio==5.12.1 requests==2.27.1 werkzeug==2.3.7 #websockets==12.0 diff --git a/src/nbi/service/NbiApplication.py b/src/nbi/service/NbiApplication.py index 2216177ff5859b006e62e588161dcb33b7f19c18..16c1a6a1a53515d365abfd5748a5b479b5c73238 100644 --- a/src/nbi/service/NbiApplication.py +++ b/src/nbi/service/NbiApplication.py @@ -18,6 +18,7 @@ from typing import Any, List, Optional, Tuple from flask import Flask, request from flask_restful import Api, Resource from flask_socketio import Namespace, SocketIO +from common.tools.kafka.Variables import KafkaConfig, KafkaTopic from nbi.Config import SECRET_KEY @@ -40,10 +41,16 @@ class NbiApplication: self._app.config['SECRET_KEY'] = SECRET_KEY self._app.after_request(log_request) self._api = Api(self._app, prefix=base_url) - #socketio_path = '/'.join([base_url.rstrip('/'), 'socket.io']) + + # Configure KafkaManager to enable SocketIO Servers running in different + # gunicorn workers to self-coordinate and share sessions. + #self._sio_client_manager = socketio.KafkaManager( + # url='kafka://{:s}'.format(KafkaConfig.get_kafka_address()), + # channel=KafkaTopic.NBI_SOCKETIO_WORKERS.value + #) self._sio = SocketIO( self._app, cors_allowed_origins='*', async_mode='eventlet', - #path=socketio_path, + #client_manager=self._sio_client_manager, logger=True, engineio_logger=True ) diff --git a/src/nbi/service/app.py b/src/nbi/service/app.py index a413c6db81869149157d75a5954986ed26c5b699..99f66a94cbe85983d6de9cb9247f0a551d5a8da3 100644 --- a/src/nbi/service/app.py +++ b/src/nbi/service/app.py @@ -20,6 +20,7 @@ eventlet.monkey_patch() #pylint: disable=wrong-import-position import logging +from common.tools.kafka.Variables import KafkaTopic from common.Constants import ServiceNameEnum from common.Settings import ( ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, @@ -39,14 +40,32 @@ from .ietf_network_slice import register_ietf_nss from .qkd_app import register_qkd_app from .restconf_root import register_restconf_root from .tfs_api import register_tfs_api +#from .topology_updates import register_topology_updates +from .vntm_recommend import register_vntm_recommend from .well_known_meta import register_well_known LOG_LEVEL = get_log_level() -logging.basicConfig(level=LOG_LEVEL) +logging.basicConfig( + level=LOG_LEVEL, + format="[Worker-%(process)d][%(asctime)s] %(levelname)s:%(name)s:%(message)s", +) +logging.getLogger('kafka.client').setLevel(logging.WARNING) +logging.getLogger('kafka.cluster').setLevel(logging.WARNING) +logging.getLogger('kafka.conn').setLevel(logging.WARNING) +logging.getLogger('kafka.consumer.fetcher').setLevel(logging.WARNING) +logging.getLogger('kafka.consumer.group').setLevel(logging.WARNING) +logging.getLogger('kafka.consumer.subscription_state').setLevel(logging.WARNING) +logging.getLogger('kafka.metrics.metrics').setLevel(logging.WARNING) +logging.getLogger('kafka.producer.kafka').setLevel(logging.WARNING) +logging.getLogger('kafka.producer.record_accumulator').setLevel(logging.WARNING) +logging.getLogger('kafka.producer.sender').setLevel(logging.WARNING) +logging.getLogger('kafka.protocol.parser').setLevel(logging.WARNING) logging.getLogger('socketio.server').setLevel(logging.WARNING) LOGGER = logging.getLogger(__name__) +LOGGER.info('Starting...') + wait_for_environment_variables([ get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST ), get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC), @@ -58,23 +77,32 @@ wait_for_environment_variables([ BASE_URL = get_service_baseurl_http(ServiceNameEnum.NBI) or '' +LOGGER.info('Creating missing Kafka topics...') +KafkaTopic.create_all_topics() +LOGGER.info('Created required Kafka topics') + nbi_app = NbiApplication(base_url=BASE_URL) -register_health_probes(nbi_app) -register_restconf_root(nbi_app) -register_well_known (nbi_app) -register_tfs_api (nbi_app) -register_etsi_bwm_api (nbi_app) -register_ietf_hardware(nbi_app) -register_ietf_l2vpn (nbi_app) -register_ietf_l3vpn (nbi_app) -register_ietf_network (nbi_app) -register_ietf_nss (nbi_app) -register_ietf_acl (nbi_app) -register_qkd_app (nbi_app) +register_health_probes (nbi_app) +register_restconf_root (nbi_app) +register_well_known (nbi_app) +register_tfs_api (nbi_app) +register_etsi_bwm_api (nbi_app) +register_ietf_hardware (nbi_app) +register_ietf_l2vpn (nbi_app) +register_ietf_l3vpn (nbi_app) +register_ietf_network (nbi_app) +register_ietf_nss (nbi_app) +register_ietf_acl (nbi_app) +register_qkd_app (nbi_app) +#register_topology_updates(nbi_app) # does not work; check if eventlet-grpc side effects +register_vntm_recommend (nbi_app) +LOGGER.info('All connectors registered') nbi_app.dump_configuration() app = nbi_app.get_flask_app() +LOGGER.info('Initialization completed!') + if __name__ == '__main__': # Only used to run it locally during development stage; # otherwise, app is directly launched by gunicorn. diff --git a/src/nbi/service/context_subscription/__init__.py b/src/nbi/service/context_subscription/__init__.py deleted file mode 100644 index 1e88a3cd10afd7ee50130bac1e0be42a795d16ac..0000000000000000000000000000000000000000 --- a/src/nbi/service/context_subscription/__init__.py +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging - -from websockets.sync.server import serve -from common.proto.vnt_manager_pb2 import VNTSubscriptionRequest -from common.Settings import get_setting -from context.client.ContextClient import ContextClient -from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME -from common.tools.object_factory.Topology import json_topology_id -from common.tools.object_factory.Context import json_context_id -from common.proto.context_pb2 import ContextId, TopologyId -import json -import os -from vnt_manager.client.VNTManagerClient import VNTManagerClient - -JSON_ADMIN_CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME) -ADMIN_CONTEXT_ID = ContextId(**JSON_ADMIN_CONTEXT_ID) -ADMIN_TOPOLOGY_ID = TopologyId(**json_topology_id(DEFAULT_TOPOLOGY_NAME, context_id=JSON_ADMIN_CONTEXT_ID)) - -vnt_manager_client: VNTManagerClient = VNTManagerClient() -context_client: ContextClient = ContextClient() - -ALL_HOSTS = "0.0.0.0" -WS_IP_PORT = int(get_setting('WS_IP_PORT', default='8761')) - -LOGGER = logging.getLogger(__name__) - - -def register_context_subscription(): - with serve(subcript_to_vnt_manager, ALL_HOSTS, WS_IP_PORT, logger=LOGGER) as server: - LOGGER.info("Running subscription server...: {}:{}".format(ALL_HOSTS, str(WS_IP_PORT))) - server.serve_forever() - LOGGER.info("Exiting subscription server...") - - -def subcript_to_vnt_manager(websocket): - for message in websocket: - LOGGER.debug("Message received: {}".format(message)) - message_json = json.loads(message) - request = VNTSubscriptionRequest() - request.host = message_json['host'] - request.port = message_json['port'] - LOGGER.debug("Received gRPC from ws: {}".format(request)) - - try: - vntm_reply = vnt_manager_client.VNTSubscript(request) - LOGGER.debug("Received gRPC from vntm: {}".format(vntm_reply)) - except Exception as e: - LOGGER.error('Could not subscript to VTNManager: {}'.format(e)) - - websocket.send(vntm_reply.subscription) diff --git a/src/nbi/service/health_probes/HeartbeatThread.py b/src/nbi/service/health_probes/HeartbeatThread.py index c49f4ab49fdd1d09f5747ba9680b79e22299bc11..67da0b55e2d02f429b1c16dace60c910b64e1443 100644 --- a/src/nbi/service/health_probes/HeartbeatThread.py +++ b/src/nbi/service/health_probes/HeartbeatThread.py @@ -31,20 +31,14 @@ class HeartbeatThread(threading.Thread): self._terminate.set() def run(self): - LOGGER.debug('[HeartbeatThread::run] begin') try: + LOGGER.info('[run] Running...') while not self._terminate.is_set(): - LOGGER.debug('[HeartbeatThread::run] Running...') time.sleep(HEARTHBEAT_INTERVAL) - LOGGER.debug('[HeartbeatThread::run] Interval elapsed') - server : socketio.Server = self._namespace.server if server is None: continue - - LOGGER.debug('[HeartbeatThread::run] emitting...') data = {'uptime_seconds': time.time() - START_TIME} server.emit('uptime', data, namespace=SIO_NAMESPACE, to=SIO_ROOM) - LOGGER.debug('[HeartbeatThread::run] emitted') except: # pylint: disable=bare-except - LOGGER.exception('[HeartbeatThread::run] thread failed') - LOGGER.debug('[HeartbeatThread::run] end') + LOGGER.exception('[run] Unexpected Thread Exception') + LOGGER.info('[run] Terminated') diff --git a/src/nbi/service/health_probes/Namespaces.py b/src/nbi/service/health_probes/Namespaces.py index 9f7517d9b2a634cf21aaa73eed2b49d44dbd507b..5f6a5205f938c0867c52f7e3b0006e3836069051 100644 --- a/src/nbi/service/health_probes/Namespaces.py +++ b/src/nbi/service/health_probes/Namespaces.py @@ -24,17 +24,17 @@ class HeartbeatServerNamespace(Namespace): def __init__(self): super().__init__(namespace=SIO_NAMESPACE) self._thread = HeartbeatThread(self) - self._thread.start() + #self._thread.start() def stop_thread(self) -> None: self._thread.stop() def on_connect(self, auth): - MSG = '[HeartbeatServerNamespace::on_connect] Client connect: sid={:s}, auth={:s}' + MSG = '[on_connect] Client connect: sid={:s}, auth={:s}' LOGGER.info(MSG.format(str(request.sid), str(auth))) join_room(SIO_ROOM, namespace=SIO_NAMESPACE) def on_disconnect(self, reason): - MSG = '[HeartbeatServerNamespace::on_disconnect] Client disconnect: sid={:s}, reason={:s}' + MSG = '[on_disconnect] Client disconnect: sid={:s}, reason={:s}' LOGGER.info(MSG.format(str(request.sid), str(reason))) leave_room(SIO_ROOM, namespace=SIO_NAMESPACE) diff --git a/src/nbi/service/tfs_api/Resources.py b/src/nbi/service/tfs_api/Resources.py index ab608f2d746a2faffc819f25cc026ce9b949aff7..d634a0a59f68e898adb29d148f72c6f4adebe689 100644 --- a/src/nbi/service/tfs_api/Resources.py +++ b/src/nbi/service/tfs_api/Resources.py @@ -14,6 +14,7 @@ import json import logging +from typing import Dict, List from flask.json import jsonify from flask_restful import Resource, request from werkzeug.exceptions import BadRequest @@ -55,10 +56,10 @@ class Contexts(_Resource): json_requests = request.get_json() if 'contexts' in json_requests: json_requests = json_requests['contexts'] - return [ - format_grpc_to_json(self.context_client.SetContext(grpc_context(context))) + return jsonify([ + grpc_message_to_json(self.context_client.SetContext(grpc_context(context))) for context in json_requests - ] + ]) class Context(_Resource): def get(self, context_uuid : str): @@ -157,10 +158,10 @@ class Topologies(_Resource): for topology in json_requests: if context_uuid != topology['topology_id']['context_id']['context_uuid']['uuid']: raise BadRequest('Mismatching context_uuid') - return [ - format_grpc_to_json(self.context_client.SetTopology(grpc_topology(**topology))) + return jsonify([ + grpc_message_to_json(self.context_client.SetTopology(grpc_topology(topology))) for topology in json_requests - ] + ]) class Topology(_Resource): def get(self, context_uuid : str, topology_uuid : str): @@ -194,13 +195,15 @@ class Services(_Resource): json_requests = request.get_json() if 'services' in json_requests: json_requests = json_requests['services'] + if isinstance(json_requests, dict): + json_requests = [json_requests] for service in json_requests: if context_uuid != service['service_id']['context_id']['context_uuid']['uuid']: raise BadRequest('Mismatching context_uuid') - return [ - format_grpc_to_json(self.service_client.CreateService(grpc_service(**service))) + return jsonify([ + grpc_message_to_json(self.service_client.CreateService(grpc_service(service))) for service in json_requests - ] + ]) class Service(_Resource): def get(self, context_uuid : str, service_uuid : str): @@ -232,10 +235,10 @@ class Slices(_Resource): for slice_ in json_requests: if context_uuid != slice_['slice_id']['context_id']['context_uuid']['uuid']: raise BadRequest('Mismatching context_uuid') - return [ - format_grpc_to_json(self.slice_client.CreateSlice(grpc_slice(**slice_))) + return jsonify([ + grpc_message_to_json(self.slice_client.CreateSlice(grpc_slice(slice_))) for slice_ in json_requests - ] + ]) class Slice(_Resource): def get(self, context_uuid : str, slice_uuid : str): @@ -264,10 +267,10 @@ class Devices(_Resource): json_requests = request.get_json() if 'devices' in json_requests: json_requests = json_requests['devices'] - return [ - format_grpc_to_json(self.device_client.AddDevice(grpc_device(device))) + return jsonify([ + grpc_message_to_json(self.device_client.AddDevice(grpc_device(device))) for device in json_requests - ] + ]) class Device(_Resource): def get(self, device_uuid : str): @@ -294,10 +297,15 @@ class Links(_Resource): json_requests = request.get_json() if 'links' in json_requests: json_requests = json_requests['links'] - return [ - format_grpc_to_json(self.context_client.SetLink(grpc_link(link))) - for link in json_requests - ] + json_link_ids : List[Dict] = list() + for link_json in json_requests: + link = grpc_link(link_json) + if link.link_type == LinkTypeEnum.LINKTYPE_VIRTUAL: + link_id = self.vntmanager_client.SetVirtualLink(link) + else: + link_id = self.context_client.SetLink(link) + json_link_ids.append(grpc_message_to_json(link_id)) + return jsonify(json_link_ids) class Link(_Resource): def get(self, link_uuid : str): diff --git a/src/nbi/service/topology_updates/Constants.py b/src/nbi/service/topology_updates/Constants.py new file mode 100644 index 0000000000000000000000000000000000000000..3e4dada3a736ca9922f86108b38fec53e6be1295 --- /dev/null +++ b/src/nbi/service/topology_updates/Constants.py @@ -0,0 +1,17 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +SIO_NAMESPACE = '/topo-updates' +SIO_ROOM = 'topo-updates' diff --git a/src/nbi/service/topology_updates/Namespaces.py b/src/nbi/service/topology_updates/Namespaces.py new file mode 100644 index 0000000000000000000000000000000000000000..963a1f26c4cf188debe654494c1de40e665d9ced --- /dev/null +++ b/src/nbi/service/topology_updates/Namespaces.py @@ -0,0 +1,63 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from flask import request +from flask_socketio import Namespace, join_room, leave_room +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME +from common.proto.context_pb2 import TopologyId +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Topology import json_topology_id +from context.client.ContextClient import ContextClient +from .Constants import SIO_NAMESPACE, SIO_ROOM +from .TopoUpdatesThread import TopoUpdatesThread + +LOGGER = logging.getLogger(__name__) + +class TopoUpdatesServerNamespace(Namespace): + def __init__(self): + super().__init__(namespace=SIO_NAMESPACE) + self._thread = TopoUpdatesThread(self) + self._thread.start() + + def stop_thread(self) -> None: + self._thread.stop() + + def on_connect(self, auth): + MSG = '[on_connect] Client connect: sid={:s}, auth={:s}' + LOGGER.info(MSG.format(str(request.sid), str(auth))) + join_room(SIO_ROOM, namespace=SIO_NAMESPACE) + + LOGGER.debug('[on_connect] emitting topology snapshot...') + + context_id = json_context_id(DEFAULT_CONTEXT_NAME) + topology_id = json_topology_id(DEFAULT_TOPOLOGY_NAME, context_id) + + try: + context_client = ContextClient() + topology_details = context_client.GetTopologyDetails( + TopologyId(**topology_id) + ) + except: # pylint: disable=bare-except + MSG = 'Unable to retrieve topology snapshot: {:s}' + LOGGER.exception(MSG.format(str(topology_id))) + else: + topology_snapshot = grpc_message_to_json_string(topology_details) + self.emit('topology-snapshot', topology_snapshot) + + def on_disconnect(self, reason): + MSG = '[on_disconnect] Client disconnect: sid={:s}, reason={:s}' + LOGGER.info(MSG.format(str(request.sid), str(reason))) + leave_room(SIO_ROOM, namespace=SIO_NAMESPACE) diff --git a/src/nbi/service/topology_updates/TopoUpdatesThread.py b/src/nbi/service/topology_updates/TopoUpdatesThread.py new file mode 100644 index 0000000000000000000000000000000000000000..dd9f96cd6ee2c3130b1c60f3b56df0583df0f6fd --- /dev/null +++ b/src/nbi/service/topology_updates/TopoUpdatesThread.py @@ -0,0 +1,92 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, socketio, threading +from common.proto.context_pb2 import TopologyId +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Topology import json_topology_id +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME +from context.client.ContextClient import ContextClient +from context.client.EventsCollector import EventsCollector +from .Constants import SIO_NAMESPACE, SIO_ROOM + +LOGGER = logging.getLogger(__name__) + +ADMIN_TOPOLOGY_ID = TopologyId( + **json_topology_id( + DEFAULT_TOPOLOGY_NAME, + context_id=json_context_id(DEFAULT_CONTEXT_NAME) + ) +) + +GET_EVENT_TIMEOUT = 1.0 + +class TopoUpdatesThread(threading.Thread): + def __init__(self, namespace : socketio.Namespace): + super().__init__(daemon=True) + self._terminate = threading.Event() + self._namespace = namespace + + def start(self): + self._terminate.clear() + return super().start() + + def stop(self) -> None: + self._terminate.set() + + def run(self): + LOGGER.info('[run] Starting...') + try: + context_client = ContextClient() + events_collector = EventsCollector( + context_client, + log_events_received = True, + activate_context_collector = True, + activate_topology_collector = True, + activate_device_collector = True, + activate_link_collector = True, + activate_service_collector = False, + activate_slice_collector = False, + activate_connection_collector = False, + ) + events_collector.start() + + LOGGER.info('[run] Running...') + while not self._terminate.is_set(): + event = events_collector.get_event(block=True, timeout=GET_EVENT_TIMEOUT) + if event is None: continue + MSG = '[run] Event: {:s}' + LOGGER.debug(MSG.format(grpc_message_to_json_string(event))) + + # TODO: ideally, each event should trigger a notification containing + # the type of event and the relevant data for the event. To simplify, + # for now, the entire topology is sent. + + topology_details = context_client.GetTopologyDetails(ADMIN_TOPOLOGY_ID) + topology_update = grpc_message_to_json_string(topology_details) + + LOGGER.debug('[run] checking server namespace...') + server : socketio.Server = self._namespace.server + if server is None: continue + + LOGGER.debug('[run] emitting topology update...') + server.emit('topology-update', topology_update, namespace=SIO_NAMESPACE, to=SIO_ROOM) + LOGGER.debug('[run] emitted') + + LOGGER.info('[run] Exiting') + events_collector.stop() + except: # pylint: disable=bare-except + LOGGER.exception('[run] Unexpected Thread Exception') + LOGGER.info('[run] Terminated') diff --git a/src/nbi/service/topology_updates/__init__.py b/src/nbi/service/topology_updates/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6a3d33228a8cab317b70c4d3185544f0d54e5d41 --- /dev/null +++ b/src/nbi/service/topology_updates/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from nbi.service.NbiApplication import NbiApplication +from .Namespaces import TopoUpdatesServerNamespace + +def register_topology_updates(nbi_app : NbiApplication): + nbi_app.add_websocket_namespace(TopoUpdatesServerNamespace()) diff --git a/src/nbi/service/vntm_recommend/Constants.py b/src/nbi/service/vntm_recommend/Constants.py new file mode 100644 index 0000000000000000000000000000000000000000..43e91fed5e36cfd2d6f69e1435103175a11ce743 --- /dev/null +++ b/src/nbi/service/vntm_recommend/Constants.py @@ -0,0 +1,17 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +SIO_NAMESPACE = '/recommendations' +SIO_ROOM = 'recommendations' diff --git a/src/nbi/service/vntm_recommend/Namespaces.py b/src/nbi/service/vntm_recommend/Namespaces.py new file mode 100644 index 0000000000000000000000000000000000000000..b19550fae4422d81230cd29143a6a3f3c333484e --- /dev/null +++ b/src/nbi/service/vntm_recommend/Namespaces.py @@ -0,0 +1,76 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging +from flask import request +from flask_socketio import Namespace, join_room, leave_room +from kafka import KafkaProducer +from common.tools.kafka.Variables import KafkaConfig, KafkaTopic +from .Constants import SIO_NAMESPACE, SIO_ROOM +from .VntRecommThread import VntRecommThread + +LOGGER = logging.getLogger(__name__) + +class VntRecommServerNamespace(Namespace): + def __init__(self): + super().__init__(namespace=SIO_NAMESPACE) + self._thread = VntRecommThread(self) + self._thread.start() + + self.kafka_producer = KafkaProducer( + bootstrap_servers = KafkaConfig.get_kafka_address(), + ) + + def stop_thread(self) -> None: + self._thread.stop() + + def on_connect(self, auth): + MSG = '[on_connect] Client connect: sid={:s}, auth={:s}' + LOGGER.debug(MSG.format(str(request.sid), str(auth))) + join_room(SIO_ROOM, namespace=SIO_NAMESPACE) + + def on_disconnect(self, reason): + MSG = '[on_disconnect] Client disconnect: sid={:s}, reason={:s}' + LOGGER.debug(MSG.format(str(request.sid), str(reason))) + leave_room(SIO_ROOM, namespace=SIO_NAMESPACE) + + def on_vlink_created(self, data): + MSG = '[on_vlink_created] begin: sid={:s}, data={:s}' + LOGGER.debug(MSG.format(str(request.sid), str(data))) + + data = json.loads(data) + request_key = str(data.pop('_request_key')).encode('utf-8') + vntm_reply = json.dumps({'event': 'vlink_created', 'data': data}).encode('utf-8') + LOGGER.debug('[on_vlink_created] request_key={:s}/{:s}'.format(str(type(request_key)), str(request_key))) + LOGGER.debug('[on_vlink_created] vntm_reply={:s}/{:s}'.format(str(type(vntm_reply)), str(vntm_reply))) + + self.kafka_producer.send( + KafkaTopic.VNTMANAGER_RESPONSE.value, key=request_key, value=vntm_reply + ) + self.kafka_producer.flush() + + def on_vlink_removed(self, data): + MSG = '[on_vlink_removed] begin: sid={:s}, data={:s}' + LOGGER.debug(MSG.format(str(request.sid), str(data))) + + data = json.loads(data) + request_key = str(data.pop('_request_key')).encode('utf-8') + vntm_reply = json.dumps({'event': 'vlink_removed', 'data': data}).encode('utf-8') + LOGGER.debug('[on_vlink_removed] request_key={:s}/{:s}'.format(str(type(request_key)), str(request_key))) + LOGGER.debug('[on_vlink_removed] vntm_reply={:s}/{:s}'.format(str(type(vntm_reply)), str(vntm_reply))) + + self.kafka_producer.send( + KafkaTopic.VNTMANAGER_RESPONSE.value, key=request_key, value=vntm_reply + ) + self.kafka_producer.flush() diff --git a/src/nbi/service/vntm_recommend/VntRecommThread.py b/src/nbi/service/vntm_recommend/VntRecommThread.py new file mode 100644 index 0000000000000000000000000000000000000000..2b745c16de984ee9fa15e439eb67a3b81929c914 --- /dev/null +++ b/src/nbi/service/vntm_recommend/VntRecommThread.py @@ -0,0 +1,88 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json, logging, socketio, threading +from typing import Dict, List +from common.tools.kafka.Variables import KafkaConfig, KafkaTopic +from kafka import KafkaConsumer, TopicPartition +from kafka.consumer.fetcher import ConsumerRecord +from .Constants import SIO_NAMESPACE, SIO_ROOM + +LOGGER = logging.getLogger(__name__) + +class VntRecommThread(threading.Thread): + def __init__(self, namespace : socketio.Namespace): + super().__init__(daemon=True) + self._terminate = threading.Event() + self._namespace = namespace + + def start(self): + self._terminate.clear() + return super().start() + + def stop(self) -> None: + self._terminate.set() + + def run(self): + LOGGER.info('[run] Starting...') + try: + kafka_consumer = KafkaConsumer( + bootstrap_servers = KafkaConfig.get_kafka_address(), + group_id = None, # consumer dispatch all messages sent to subscribed topics + auto_offset_reset = 'latest', + ) + + LOGGER.info('[run] Subscribing...') + kafka_consumer.subscribe(topics=[KafkaTopic.VNTMANAGER_REQUEST.value]) + LOGGER.info('[run] Subscribed') + + while not self._terminate.is_set(): + topic_records : Dict[TopicPartition, List[ConsumerRecord]] = \ + kafka_consumer.poll(timeout_ms=1000, max_records=1) + if len(topic_records) == 0: continue # no pending records + self.process_topic_records(topic_records) + + LOGGER.info('[run] Closing...') + kafka_consumer.close() + except: # pylint: disable=bare-except + LOGGER.exception('[run] Unexpected Thread Exception') + LOGGER.info('[run] Terminated') + + def process_topic_records( + self, topic_records : Dict[TopicPartition, List[ConsumerRecord]] + ) -> None: + MSG = '[process_topic_records] topic_records={:s}' + LOGGER.debug(MSG.format(str(topic_records))) + for topic, records in topic_records.items(): + if topic.topic == KafkaTopic.VNTMANAGER_REQUEST.value: + for record in records: self.emit_recommendation(record) + + def emit_recommendation(self, record : ConsumerRecord) -> None: + message_key = record.key.decode('utf-8') + message_value = record.value.decode('utf-8') + message_value = json.loads(message_value) + message_event = message_value.pop('event') + message_data = json.loads(message_value['data']) + message_data['_request_key'] = message_key + message_data = json.dumps(message_data) + + MSG = '[emit_recommendation] Recommendation: event={:s} data={:s}' + LOGGER.debug(MSG.format(str(message_event), str(message_data))) + + LOGGER.debug('[emit_recommendation] checking server namespace...') + server : socketio.Server = self._namespace.server + if server is None: return + LOGGER.debug('[emit_recommendation] emitting recommendation...') + server.emit(message_event, message_data, namespace=SIO_NAMESPACE, to=SIO_ROOM) + LOGGER.debug('[emit_recommendation] emitted') diff --git a/src/nbi/service/vntm_recommend/__init__.py b/src/nbi/service/vntm_recommend/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..21deb31f0f2571ab3d6f8f0c563bf70b693e7a79 --- /dev/null +++ b/src/nbi/service/vntm_recommend/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from nbi.service.NbiApplication import NbiApplication +from .Namespaces import VntRecommServerNamespace + +def register_vntm_recommend(nbi_app : NbiApplication): + nbi_app.add_websocket_namespace(VntRecommServerNamespace()) diff --git a/src/opticalcontroller/OpticalController.py b/src/opticalcontroller/OpticalController.py index 74fd7882689d92a654a749ea5be0c2583841a31b..9cf0f67334037e90b1d58fffb0e8a8e9ba6259db 100644 --- a/src/opticalcontroller/OpticalController.py +++ b/src/opticalcontroller/OpticalController.py @@ -15,6 +15,7 @@ import logging, time from flask import Flask from flask import render_template +from common.DeviceTypes import DeviceTypeEnum from flask_restplus import Resource, Api from google.protobuf.json_format import MessageToDict from common.proto.context_pb2 import TopologyId @@ -317,7 +318,44 @@ class GetTopology(Resource): node_dict = {} topo, nodes = readTopologyDataFromContext(topog_id) - for link in topo: + OPTICAL_ROADM_TYPES = { + DeviceTypeEnum.OPTICAL_ROADM.value, DeviceTypeEnum.EMULATED_OPTICAL_ROADM.value + } + OPTICAL_TRANSPONDER_TYPES = { + DeviceTypeEnum.OPTICAL_TRANSPONDER.value, DeviceTypeEnum.EMULATED_OPTICAL_TRANSPONDER.value + } + added_device_uuids = set() + for device in nodes: + if device.device_type in OPTICAL_ROADM_TYPES: + dev_type = "OC-ROADM" + elif device.device_type in OPTICAL_TRANSPONDER_TYPES: + dev_type = "OC-TP" + else: + continue + + dev_dic = { + "id":device.device_id.device_uuid.uuid, + #"ip":f"10.30.2.{207+i}", + #"port":"50001", + "type": dev_type, + "driver": "OpticalOC" + } + node_dict[device.name] = dev_dic + added_device_uuids.add(device.device_id.device_uuid.uuid) + #i+=1 + #print(f"refresh_optical controller optical_links_dict= {links_dict}") + #print(f"refresh_optical controller node_dict {node_dict}") + + for link in topo: + endpoint_id_a = link.link_endpoint_ids[ 0] + endpoint_id_z = link.link_endpoint_ids[-1] + + device_uuid_a = endpoint_id_a.device_id.device_uuid.uuid + if device_uuid_a not in added_device_uuids: continue + + device_uuid_z = endpoint_id_z.device_id.device_uuid.uuid + if device_uuid_z not in added_device_uuids: continue + link_dict_type = MessageToDict(link, preserving_proto_field_name=True) if "c_slots" in link_dict_type["optical_details"]: @@ -331,19 +369,6 @@ class GetTopology(Resource): links_dict["optical_links"].append(link_dict_type) - for device in nodes : - dev_dic = { - "id":device.device_id.device_uuid.uuid, - #"ip":f"10.30.2.{207+i}", - #"port":"50001", - "type":"OC-ROADM" if device.device_type =="optical-roadm" else "OC-TP", - "driver": "OpticalOC" - } - node_dict[device.name] = dev_dic - #i+=1 - #print(f"refresh_optical controller optical_links_dict= {links_dict}") - #print(f"refresh_optical controller node_dict {node_dict}") - rsa = RSA(node_dict, links_dict) if debug: print(rsa.init_link_slots2()) @@ -354,4 +379,4 @@ class GetTopology(Resource): return "Error", 400 if __name__ == '__main__': - app.run(host='0.0.0.0', port=10060) + app.run(host='0.0.0.0', port=10060, debug=True) diff --git a/src/opticalcontroller/RSA.py b/src/opticalcontroller/RSA.py index da31187eb1d5c3f6f51cbbdc1c9641653b683e4b..e5cf6e4065f0daf116fa623b93f0dea36215b457 100644 --- a/src/opticalcontroller/RSA.py +++ b/src/opticalcontroller/RSA.py @@ -12,10 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging from opticalcontroller.dijkstra import Graph, shortest_path from opticalcontroller.tools import * from opticalcontroller.variables import * +LOGGER = logging.getLogger(__name__) + +def print(*args) -> None: + LOGGER.info(' '.join([str(a) for a in args])) class RSA(): def __init__(self, nodes, links): diff --git a/src/pathcomp/frontend/Dockerfile b/src/pathcomp/frontend/Dockerfile index c96db7293cb16cbe18e8ae2b8894590d16fb492c..baecf8f6927307fa1c318a92df237aec64b1eeca 100644 --- a/src/pathcomp/frontend/Dockerfile +++ b/src/pathcomp/frontend/Dockerfile @@ -77,6 +77,8 @@ COPY src/service/__init__.py service/__init__.py COPY src/service/client/. service/client/ COPY src/slice/__init__.py slice/__init__.py COPY src/slice/client/. slice/client/ +COPY src/vnt_manager/__init__.py vnt_manager/__init__.py +COPY src/vnt_manager/client/. vnt_manager/client/ # Start the service ENTRYPOINT ["python", "-m", "pathcomp.frontend.service"] diff --git a/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py b/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py index 0554084f8e3c83504e3c225ecc5a3e7503ed9c53..f9672d070f4fdeecf12f0447090b968380494769 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py +++ b/src/pathcomp/frontend/service/algorithms/tools/ComposeRequest.py @@ -101,8 +101,8 @@ def compose_device(grpc_device : Device) -> Dict: link_port_direction = LinkPortDirection.BIDIRECTIONAL.value termination_direction = TerminationDirection.BIDIRECTIONAL.value termination_state = TerminationState.TERMINATED_BIDIRECTIONAL.value - total_potential_capacity = compose_capacity(200, CapacityUnit.MBPS.value) - available_capacity = compose_capacity(200, CapacityUnit.MBPS.value) + total_potential_capacity = compose_capacity(100000, CapacityUnit.GBPS.value) + available_capacity = compose_capacity(100000, CapacityUnit.GBPS.value) endpoint = compose_endpoint( endpoint_id, endpoint_type, link_port_direction, termination_direction, termination_state, total_potential_capacity, available_capacity) @@ -130,7 +130,7 @@ def compose_link(grpc_link : Link) -> Dict: elif total_capacity_gbps is not None: used_capacity_gbps = total_capacity_gbps - if total_capacity_gbps is None: total_capacity_gbps = 100 + if total_capacity_gbps is None: total_capacity_gbps = 100000 if used_capacity_gbps is None: used_capacity_gbps = 0 available_capacity_gbps = total_capacity_gbps - used_capacity_gbps diff --git a/src/service/service/ServiceServiceServicerImpl.py b/src/service/service/ServiceServiceServicerImpl.py index b6f8a7faf89acf25c84326132ff5e965aade61b2..3a376c0147ad61ca1db1041975f4d6afdd79460e 100644 --- a/src/service/service/ServiceServiceServicerImpl.py +++ b/src/service/service/ServiceServiceServicerImpl.py @@ -13,7 +13,7 @@ # limitations under the License. import grpc, json, logging, random, uuid -from typing import Optional +from typing import Dict, Optional from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method from common.method_wrappers.ServiceExceptions import ( AlreadyExistsException, InvalidArgumentException, NotFoundException, NotImplementedException, @@ -265,10 +265,18 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): devices = topology_details.devices context_uuid_x = topology_details.topology_id.context_id.context_uuid.uuid topology_uuid_x = topology_details.topology_id.topology_uuid.uuid + + device_names : Dict[str, str] = dict() + for device in devices: + device_uuid = device.device_id.device_uuid.uuid + device_names[device_uuid] = device.name + devs = [] ports = [] for endpoint_id in service.service_endpoint_ids: - devs.append(endpoint_id.device_id.device_uuid.uuid) + endpoint_device_uuid = endpoint_id.device_id.device_uuid.uuid + endpoint_device_name = device_names[endpoint_device_uuid] + devs.append(endpoint_device_name) ports.append(endpoint_id.endpoint_uuid.uuid) src = devs[0] dst = devs[1] @@ -359,7 +367,7 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): context_client.RemoveService(request) return Empty() - if service.service_type == ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY: + if is_deployed_optical() and service.service_type == ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY: params = { "src" : None, "dst" : None, diff --git a/src/service/service/service_handlers/__init__.py b/src/service/service/service_handlers/__init__.py index 85545d238f2b93bd77b1beb1fce2d46b01b06800..74ad3620a29a95ff3ac5e9599216879648691e56 100644 --- a/src/service/service/service_handlers/__init__.py +++ b/src/service/service/service_handlers/__init__.py @@ -28,7 +28,7 @@ from .microwave.MicrowaveServiceHandler import MicrowaveServiceHandler from .p4.p4_service_handler import P4ServiceHandler from .tapi_tapi.TapiServiceHandler import TapiServiceHandler from .tapi_xr.TapiXrServiceHandler import TapiXrServiceHandler -from .e2e_orch.E2EOrchestratorServiceHandler import E2EOrchestratorServiceHandler +from .optical_tfs.OpticalTfsServiceHandler import OpticalTfsServiceHandler from .oc.OCServiceHandler import OCServiceHandler from .qkd.qkd_service_handler import QKDServiceHandler @@ -117,9 +117,9 @@ SERVICE_HANDLERS = [ FilterFieldEnum.DEVICE_DRIVER : [DeviceDriverEnum.DEVICEDRIVER_IETF_L2VPN], } ]), - (E2EOrchestratorServiceHandler, [ + (OpticalTfsServiceHandler, [ { - FilterFieldEnum.SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_E2E, + FilterFieldEnum.SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY, FilterFieldEnum.DEVICE_DRIVER : [DeviceDriverEnum.DEVICEDRIVER_OPTICAL_TFS], } ]), diff --git a/src/service/service/service_handlers/l3nm_openconfig/ConfigRules.py b/src/service/service/service_handlers/l3nm_openconfig/ConfigRules.py index 37f8256b1b294724ee017e925c8879777c56008d..99d4bc8e53f73037ebcb66868ef7e96edbb99dcf 100644 --- a/src/service/service/service_handlers/l3nm_openconfig/ConfigRules.py +++ b/src/service/service/service_handlers/l3nm_openconfig/ConfigRules.py @@ -12,52 +12,74 @@ # See the License for the specific language governing permissions and # limitations under the License. +import functools from typing import Any, Dict, List, Optional, Tuple from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set from service.service.service_handler_api.AnyTreeTools import TreeNode -def get_value(field_name : str, *containers, default=None) -> Optional[Any]: +def get_settings_containers( + service_settings : TreeNode, device_settings : TreeNode, endpoint_settings : TreeNode +) -> List[Dict]: + settings_containers : List[Dict] = list() + + # highest priority settings container + if endpoint_settings is not None: + json_endpoint_settings : Dict = endpoint_settings.value + settings_containers.append(json_endpoint_settings) + + if device_settings is not None: + json_device_settings : Dict = device_settings.value + settings_containers.append(json_device_settings) + + # lowest priority settings container + if service_settings is not None: + json_service_settings : Dict = service_settings.value + settings_containers.append(json_service_settings) + + return settings_containers + +def get_value(containers : List[Dict], field_name : str, **kwargs) -> Optional[Any]: if len(containers) == 0: raise Exception('No containers specified') + for container in containers: if field_name not in container: continue return container[field_name] - return default + + if 'default' in kwargs: + return kwargs['default'] + + MSG = 'Field({:s}) not found in containers specified({:s}) and no default value specified' + # pylint: disable=broad-exception-raised + raise Exception(MSG.format(str(field_name), str(containers))) def setup_config_rules( service_uuid : str, connection_uuid : str, device_uuid : str, endpoint_uuid : str, endpoint_name : str, service_settings : TreeNode, device_settings : TreeNode, endpoint_settings : TreeNode, endpoint_acls : List [Tuple] ) -> List[Dict]: - if service_settings is None: return [] - if device_settings is None: return [] - if endpoint_settings is None: return [] - - json_settings : Dict = service_settings.value - json_device_settings : Dict = device_settings.value - json_endpoint_settings : Dict = endpoint_settings.value - - settings = (json_settings, json_endpoint_settings, json_device_settings) - - mtu = get_value('mtu', *settings, default=1450) # 1512 - #address_families = json_settings.get('address_families', [] ) # ['IPV4'] - bgp_as = get_value('bgp_as', *settings, default=65000) # 65000 - - router_id = json_endpoint_settings.get('router_id', '0.0.0.0') # '10.95.0.10' - route_distinguisher = json_settings.get('route_distinguisher', '65000:101' ) # '60001:801' - sub_interface_index = json_endpoint_settings.get('sub_interface_index', 0 ) # 1 - vlan_id = json_endpoint_settings.get('vlan_id', 1 ) # 400 - address_ip = json_endpoint_settings.get('address_ip', '0.0.0.0') # '2.2.2.1' - address_prefix = json_endpoint_settings.get('address_prefix', 24 ) # 30 - - policy_import = json_endpoint_settings.get('policy_AZ', '2' ) # 2 - policy_export = json_endpoint_settings.get('policy_ZA', '7' ) # 30 + settings_containers : Tuple[Dict] = get_settings_containers( + service_settings, device_settings, endpoint_settings + ) + get_settings_value = functools.partial(get_value, settings_containers) + + mtu = get_settings_value('mtu', default=1450 ) # 1512 + #address_families = get_settings_value('address_families', default=[] ) # ['IPV4'] + bgp_as = get_settings_value('bgp_as', default=65000 ) # 65000 + router_id = get_settings_value('router_id', default='0.0.0.0' ) # '10.95.0.10' + route_distinguisher = get_settings_value('route_distinguisher', default='65000:101') # '60001:801' + sub_interface_index = get_settings_value('sub_interface_index', default=0 ) # 1 + vlan_id = get_settings_value('vlan_id', default=1 ) # 400 + address_ip = get_settings_value('address_ip', default='0.0.0.0' ) # '2.2.2.1' + address_prefix = get_settings_value('address_prefix', default=24 ) # 30 + policy_import = get_settings_value('policy_AZ', default='2' ) # 2 + policy_export = get_settings_value('policy_ZA', default='7' ) # 30 #network_interface_desc = '{:s}-NetIf'.format(service_uuid) - network_interface_desc = json_endpoint_settings.get('ni_description','') + network_interface_desc = get_settings_value('ni_description', default='' ) #network_subinterface_desc = '{:s}-NetSubIf'.format(service_uuid) - network_subinterface_desc = json_endpoint_settings.get('subif_description','') - #service_short_uuid = service_uuid.split('-')[-1] + network_subinterface_desc = get_settings_value('subif_description', default='' ) + service_short_uuid = service_uuid.split('-')[-1] #network_instance_name = '{:s}-NetInst'.format(service_short_uuid) - network_instance_name = json_endpoint_settings.get('ni_name', service_uuid.split('-')[-1]) #ELAN-AC:1 + network_instance_name = get_settings_value('ni_name', default=service_short_uuid) #ELAN-AC:1 if_subif_name = '{:s}.{:d}'.format(endpoint_name, vlan_id) diff --git a/src/service/service/service_handlers/e2e_orch/E2EOrchestratorServiceHandler.py b/src/service/service/service_handlers/optical_tfs/OpticalTfsServiceHandler.py similarity index 79% rename from src/service/service/service_handlers/e2e_orch/E2EOrchestratorServiceHandler.py rename to src/service/service/service_handlers/optical_tfs/OpticalTfsServiceHandler.py index 9512f242c90db54dbf1952a4294bcf6a74d1d121..4ac85f3ac02bccfa06b29a50aceefee9c03a25eb 100644 --- a/src/service/service/service_handlers/e2e_orch/E2EOrchestratorServiceHandler.py +++ b/src/service/service/service_handlers/optical_tfs/OpticalTfsServiceHandler.py @@ -16,7 +16,7 @@ import json, logging from typing import Any, Dict, List, Optional, Tuple, Union from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method from common.proto.context_pb2 import ConfigRule, DeviceId, Service -from common.tools.object_factory.ConfigRule import json_config_rule_set +from common.tools.object_factory.ConfigRule import json_config_rule_delete, json_config_rule_set from common.tools.object_factory.Device import json_device_id from common.type_checkers.Checkers import chk_type from service.service.service_handler_api.Tools import get_device_endpoint_uuids @@ -26,9 +26,9 @@ from service.service.task_scheduler.TaskExecutor import TaskExecutor LOGGER = logging.getLogger(__name__) -METRICS_POOL = MetricsPool('Service', 'Handler', labels={'handler': 'e2e_orch'}) +METRICS_POOL = MetricsPool('Service', 'Handler', labels={'handler': 'optical_tfs'}) -class E2EOrchestratorServiceHandler(_ServiceHandler): +class OpticalTfsServiceHandler(_ServiceHandler): def __init__( # pylint: disable=super-init-not-called self, service : Service, task_executor : TaskExecutor, **settings ) -> None: @@ -36,6 +36,20 @@ class E2EOrchestratorServiceHandler(_ServiceHandler): self.__task_executor = task_executor self.__settings_handler = SettingsHandler(service.service_config, **settings) + def _get_constraints(self) -> Tuple[Optional[int], Optional[int], Optional[int]]: + bitrate = None + bidir = None + ob_width = None + for constraint in self.__service.service_constraints: + if constraint.WhichOneof('constraint') != 'custom': continue + if constraint.custom.constraint_type == 'bandwidth[gbps]': + bitrate = int(float(constraint.custom.constraint_value)) + elif constraint.custom.constraint_type == 'bidirectionality': + bidir = int(constraint.custom.constraint_value) == 1 + elif constraint.custom.constraint_type == 'optical-band-width[GHz]': + ob_width = int(constraint.custom.constraint_value) + return bitrate, bidir, ob_width + @metered_subclass_method(METRICS_POOL) def SetEndpoint( self, endpoints : List[Tuple[str, str, Optional[str]]], connection_uuid : Optional[str] = None @@ -45,9 +59,8 @@ class E2EOrchestratorServiceHandler(_ServiceHandler): if len(endpoints) < 2: return [] service_uuid = self.__service.service_id.service_uuid.uuid - settings = self.__settings_handler.get('/settings') - json_settings : Dict = {} if settings is None else settings.value - bitrate = json_settings.get('bitrate', 1000) + service_name = self.__service.name + bitrate, bidir, ob_width = self._get_constraints() results = [] try: @@ -64,10 +77,15 @@ class E2EOrchestratorServiceHandler(_ServiceHandler): controller = src_controller json_config_rule = json_config_rule_set('/services/service[{:s}]'.format(service_uuid), { - 'uuid' : service_uuid, - 'src_node' : src_endpoint_uuid, - 'dst_node' : dst_endpoint_uuid, - 'bitrate' : bitrate + 'service_uuid' : service_uuid, + 'service_name' : service_name, + 'src_device_uuid' : src_device_uuid, + 'src_endpoint_uuid': src_endpoint_uuid, + 'dst_device_uuid' : dst_device_uuid, + 'dst_endpoint_uuid': dst_endpoint_uuid, + 'bitrate' : bitrate, + 'bidir' : bidir, + 'ob_width' : ob_width, }) del controller.device_config.config_rules[:] controller.device_config.config_rules.append(ConfigRule(**json_config_rule)) @@ -88,31 +106,25 @@ class E2EOrchestratorServiceHandler(_ServiceHandler): if len(endpoints) < 2: return [] service_uuid = self.__service.service_id.service_uuid.uuid - settings = self.__settings_handler.get('/settings') - json_settings : Dict = {} if settings is None else settings.value - flow_id = json_settings.get('flow_id', 100) - bitrate = json_settings.get('bitrate', 1000) + service_name = self.__service.name results = [] try: - src_device_uuid, src_endpoint_uuid = get_device_endpoint_uuids(endpoints[0]) + src_device_uuid, _ = get_device_endpoint_uuids(endpoints[0]) src_device = self.__task_executor.get_device(DeviceId(**json_device_id(src_device_uuid))) src_controller = self.__task_executor.get_device_controller(src_device) if src_controller is None: src_controller = src_device - dst_device_uuid, dst_endpoint_uuid = get_device_endpoint_uuids(endpoints[1]) + dst_device_uuid, _ = get_device_endpoint_uuids(endpoints[-1]) dst_device = self.__task_executor.get_device(DeviceId(**json_device_id(dst_device_uuid))) dst_controller = self.__task_executor.get_device_controller(dst_device) if dst_controller is None: dst_controller = dst_device controller = src_controller - json_config_rule = json_config_rule_set('/services/service[{:s}]'.format(service_uuid), { - 'uuid' : service_uuid, - 'flow_id' : flow_id, - 'src_node' : src_endpoint_uuid, - 'dst_node' : dst_endpoint_uuid, - 'bitrate' : bitrate + json_config_rule = json_config_rule_delete('/services/service[{:s}]'.format(service_uuid), { + 'service_uuid': service_uuid, + 'service_name': service_name, }) del controller.device_config.config_rules[:] diff --git a/src/service/service/service_handlers/e2e_orch/__init__.py b/src/service/service/service_handlers/optical_tfs/__init__.py similarity index 100% rename from src/service/service/service_handlers/e2e_orch/__init__.py rename to src/service/service/service_handlers/optical_tfs/__init__.py diff --git a/src/service/service/task_scheduler/tasks/Task_ConnectionConfigure.py b/src/service/service/task_scheduler/tasks/Task_ConnectionConfigure.py index fac991432028e2c042bfce5dc02dd22d44cac5d0..d98daf280903be397c4315bdb4f87d0086af95ad 100644 --- a/src/service/service/task_scheduler/tasks/Task_ConnectionConfigure.py +++ b/src/service/service/task_scheduler/tasks/Task_ConnectionConfigure.py @@ -56,12 +56,15 @@ class Task_ConnectionConfigure(_Task): endpointids_to_set = endpointids_to_raw(connection.path_hops_endpoint_ids) errors = list() - for _, (service_handler, connection_devices) in service_handlers.items(): - _endpointids_to_set = [ - (device_uuid, endpoint_uuid, topology_uuid) - for device_uuid, endpoint_uuid, topology_uuid in endpointids_to_set - if device_uuid in connection_devices - ] + for device_type, (service_handler, connection_devices) in service_handlers.items(): + if device_type == DeviceTypeEnum.TERAFLOWSDN_CONTROLLER: + _endpointids_to_set = endpointids_to_set + else: + _endpointids_to_set = [ + (device_uuid, endpoint_uuid, topology_uuid) + for device_uuid, endpoint_uuid, topology_uuid in endpointids_to_set + if device_uuid in connection_devices + ] results_setendpoint = service_handler.SetEndpoint( _endpointids_to_set, connection_uuid=connection_uuid ) diff --git a/src/service/service/task_scheduler/tasks/Task_ConnectionDeconfigure.py b/src/service/service/task_scheduler/tasks/Task_ConnectionDeconfigure.py index cb1e91cc2172162858f3cf0246fa4ce46497b82e..8528f8365c1e3a461c288fdf6efe14475921051c 100644 --- a/src/service/service/task_scheduler/tasks/Task_ConnectionDeconfigure.py +++ b/src/service/service/task_scheduler/tasks/Task_ConnectionDeconfigure.py @@ -56,12 +56,15 @@ class Task_ConnectionDeconfigure(_Task): endpointids_to_delete = endpointids_to_raw(connection.path_hops_endpoint_ids) errors = list() - for _, (service_handler, connection_devices) in service_handlers.items(): - _endpointids_to_delete = [ - (device_uuid, endpoint_uuid, topology_uuid) - for device_uuid, endpoint_uuid, topology_uuid in endpointids_to_delete - if device_uuid in connection_devices - ] + for device_type, (service_handler, connection_devices) in service_handlers.items(): + if device_type == DeviceTypeEnum.TERAFLOWSDN_CONTROLLER: + _endpointids_to_delete = endpointids_to_delete + else: + _endpointids_to_delete = [ + (device_uuid, endpoint_uuid, topology_uuid) + for device_uuid, endpoint_uuid, topology_uuid in endpointids_to_delete + if device_uuid in connection_devices + ] results_deleteendpoint = service_handler.DeleteEndpoint( _endpointids_to_delete, connection_uuid=connection_uuid ) diff --git a/src/tests/.gitlab-ci.yml b/src/tests/.gitlab-ci.yml index 06578bea80812f0feb8a46698cdddbfa5a7a6188..787b25ee1c8155b4c06be68c57d0314bf1f02c1d 100644 --- a/src/tests/.gitlab-ci.yml +++ b/src/tests/.gitlab-ci.yml @@ -23,6 +23,6 @@ include: - local: '/src/tests/eucnc24/.gitlab-ci.yml' - local: '/src/tests/ofc25-camara-agg-net-controller/.gitlab-ci.yml' - local: '/src/tests/ofc25-camara-e2e-controller/.gitlab-ci.yml' - #- local: '/src/tests/ecoc24/.gitlab-ci.yml' + #- local: '/src/tests/ofc25/.gitlab-ci.yml' - local: '/src/tests/tools/mock_tfs_nbi_dependencies/.gitlab-ci.yml' diff --git a/src/tests/ecoc22/Dockerfile b/src/tests/ecoc22/Dockerfile index b0489e023d96f58d07e7be241f2ed5de92154264..996d515f3ef6c2097bb811d14d9d04732f17c640 100644 --- a/src/tests/ecoc22/Dockerfile +++ b/src/tests/ecoc22/Dockerfile @@ -78,6 +78,8 @@ COPY src/service/__init__.py service/__init__.py COPY src/service/client/. service/client/ COPY src/slice/__init__.py slice/__init__.py COPY src/slice/client/. slice/client/ +COPY src/vnt_manager/__init__.py vnt_manager/__init__.py +COPY src/vnt_manager/client/. vnt_manager/client/ COPY src/tests/*.py ./tests/ COPY src/tests/ecoc22/__init__.py ./tests/ecoc22/__init__.py COPY src/tests/ecoc22/descriptors_emulated.json ./tests/ecoc22/descriptors_emulated.json diff --git a/src/tests/ecoc24/descriptors/descriptor_e2e.json b/src/tests/ecoc24/descriptors/descriptor_e2e.json deleted file mode 100644 index 41649d92ebf6a4cdb854a8f523b9b0497d04fd59..0000000000000000000000000000000000000000 --- a/src/tests/ecoc24/descriptors/descriptor_e2e.json +++ /dev/null @@ -1,86 +0,0 @@ -{ - "contexts":[ - { - "context_id":{ - "context_uuid":{ - "uuid":"admin" - } - } - } - ], - "topologies":[ - { - "topology_id":{ - "context_id":{ - "context_uuid":{ - "uuid":"admin" - } - }, - "topology_uuid":{ - "uuid":"admin" - } - } - } - ], - "links":[ - { - "link_id":{ - "link_uuid":{ - "uuid":"CSGW1_CTP1-OTP1" - } - }, - "link_endpoint_ids":[ - { - "device_id":{ - "device_uuid":{ - "uuid":"CSGW1" - } - }, - "endpoint_uuid":{ - "uuid":"PORT-xe5" - } - }, - { - "device_id":{ - "device_uuid":{ - "uuid":"OTP1.1" - } - }, - "endpoint_uuid":{ - "uuid":"P2" - } - } - ] - }, - { - "link_id":{ - "link_uuid":{ - "uuid":"CSGW2_CTP1-OTP1" - } - }, - "link_endpoint_ids":[ - { - "device_id":{ - "device_uuid":{ - "uuid":"CSGW2" - } - }, - "endpoint_uuid":{ - "uuid":"PORT-xe5" - } - }, - { - "device_id":{ - "device_uuid":{ - "uuid":"OTP2.1" - } - }, - "endpoint_uuid":{ - "uuid":"P2" - } - } - ] - } - ] - } - \ No newline at end of file diff --git a/src/tests/ecoc24/descriptors/descriptor_ip.json b/src/tests/ecoc24/descriptors/descriptor_ip.json deleted file mode 100644 index 11ad2d5c121925706f59486753504cf660cf27b1..0000000000000000000000000000000000000000 --- a/src/tests/ecoc24/descriptors/descriptor_ip.json +++ /dev/null @@ -1,213 +0,0 @@ -{ - "contexts":[ - { - "context_id":{ - "context_uuid":{ - "uuid":"admin" - } - } - } - ], - "topologies":[ - { - "topology_id":{ - "context_id":{ - "context_uuid":{ - "uuid":"admin" - } - }, - "topology_uuid":{ - "uuid":"admin" - } - } - } - ], - "devices":[ - { - "device_id":{ - "device_uuid":{ - "uuid":"CSGW1" - } - }, - "device_type":"emu-packet-router", - "device_drivers":[ - 0 - ], - - "device_operational_status":2, - - "device_config":{ - "config_rules":[ - { - "action":1, - "custom":{ - "resource_key":"_connect/address", - "resource_value":"127.0.0.1" - } - }, - { - "action":1, - "custom":{ - "resource_key":"_connect/port", - "resource_value":0 - } - }, - { - "action":1, - "custom":{ - "resource_key":"_connect/settings", - "resource_value":{ - "endpoints":[ - { - "name":"PORT-xe5", - "type":"copper", - "uuid":"PORT-xe5" - } - ] - } - } - }, - { - "action":1, - "custom":{ - "resource_key":"/endpoints/endpoint[PORT-xe5]", - "resource_value":{ - "name":"PORT-xe5", - "type":"copper", - "uuid":"PORT-xe5" - } - } - } - ] - }, - "device_endpoints":[ - { - "endpoint_id":{ - "device_id":{ - "device_uuid":{ - "uuid":"CSGW1" - } - }, - "endpoint_uuid":{ - "uuid":"PORT-xe5" - }, - "topology_id":{ - "context_id":{ - "context_uuid":{ - "uuid":"admin" - } - }, - "topology_uuid":{ - "uuid":"admin" - } - } - }, - "endpoint_location":{ - - }, - "endpoint_type":"optical", - "kpi_sample_types":[ - - ], - "name":"PORT-xe5" - } - ], - - "name":"CSGW1" - }, - { - "device_id":{ - "device_uuid":{ - "uuid":"CSGW2" - } - }, - "device_type":"emu-packet-router", - "device_drivers":[ - 0 - ], - - "device_operational_status":2, - - "device_config":{ - "config_rules":[ - { - "action":1, - "custom":{ - "resource_key":"_connect/address", - "resource_value":"127.0.0.1" - } - }, - { - "action":1, - "custom":{ - "resource_key":"_connect/port", - "resource_value":0 - } - }, - { - "action":1, - "custom":{ - "resource_key":"_connect/settings", - "resource_value":{ - "endpoints":[ - { - "name":"PORT-xe5", - "type":"copper", - "uuid":"PORT-xe5" - } - ] - } - } - }, - { - "action":1, - "custom":{ - "resource_key":"/endpoints/endpoint[PORT-xe5]", - "resource_value":{ - "name":"PORT-xe5", - "type":"copper", - "uuid":"PORT-xe5" - } - } - } - ] - }, - "device_endpoints":[ - { - "endpoint_id":{ - "device_id":{ - "device_uuid":{ - "uuid":"CSGW2" - } - }, - "endpoint_uuid":{ - "uuid":"PORT-xe5" - }, - "topology_id":{ - "context_id":{ - "context_uuid":{ - "uuid":"admin" - } - }, - "topology_uuid":{ - "uuid":"admin" - } - } - }, - "endpoint_location":{ - - }, - "endpoint_type":"optical", - "kpi_sample_types":[ - - ], - "name":"PORT-xe5" - } - ], - - "name":"CSGW2" - } - ], - "dummy_mode":true - } - \ No newline at end of file diff --git a/src/tests/ecoc24/descriptors/descriptor_opt.json b/src/tests/ecoc24/descriptors/descriptor_opt.json deleted file mode 100644 index cfb86b9660ac3559d10a9358e0484f27f4ff70c7..0000000000000000000000000000000000000000 --- a/src/tests/ecoc24/descriptors/descriptor_opt.json +++ /dev/null @@ -1,803 +0,0 @@ -{ - "contexts":[ - { - "context_id":{ - "context_uuid":{ - "uuid":"admin" - } - } - } - ], - "topologies":[ - { - "topology_id":{ - "context_id":{ - "context_uuid":{ - "uuid":"admin" - } - }, - "topology_uuid":{ - "uuid":"admin" - } - } - } - ], - "devices":[ - { - "device_id":{ - "device_uuid":{ - "uuid":"OTP1.1" - } - }, - "device_type":"emu-optical-transponder", - "device_drivers":[ - 0 - ], - "device_endpoints":[ - - ], - "device_operational_status":0, - "device_config":{ - "config_rules":[ - { - "action":1, - "custom":{ - "resource_key":"_connect/address", - "resource_value":"127.0.0.1" - } - }, - { - "action":1, - "custom":{ - "resource_key":"_connect/port", - "resource_value":"0" - } - }, - { - "action":1, - "custom":{ - "resource_key":"_connect/settings", - "resource_value":{ - "endpoints":[ - { - "sample_types":[ - - ], - "type":"copper/internal", - "uuid":"P1" - }, - { - "sample_types":[ - - ], - "type":"copper/internal", - "uuid":"P2" - } - ] - } - } - } - ] - } - }, - { - "device_id":{ - "device_uuid":{ - "uuid":"OTP1.2" - } - }, - "device_type":"emu-optical-transponder", - "device_drivers":[ - 0 - ], - "device_endpoints":[ - - ], - "device_operational_status":0, - "device_config":{ - "config_rules":[ - { - "action":1, - "custom":{ - "resource_key":"_connect/address", - "resource_value":"127.0.0.1" - } - }, - { - "action":1, - "custom":{ - "resource_key":"_connect/port", - "resource_value":"0" - } - }, - { - "action":1, - "custom":{ - "resource_key":"_connect/settings", - "resource_value":{ - "endpoints":[ - { - "sample_types":[ - - ], - "type":"copper/internal", - "uuid":"P1" - }, - { - "sample_types":[ - - ], - "type":"copper/internal", - "uuid":"P2" - } - ] - } - } - } - ] - } - }, - { - "device_id":{ - "device_uuid":{ - "uuid":"OTP1.3" - } - }, - "device_type":"emu-optical-transponder", - "device_drivers":[ - 0 - ], - "device_endpoints":[ - - ], - "device_operational_status":0, - "device_config":{ - "config_rules":[ - { - "action":1, - "custom":{ - "resource_key":"_connect/address", - "resource_value":"127.0.0.1" - } - }, - { - "action":1, - "custom":{ - "resource_key":"_connect/port", - "resource_value":"0" - } - }, - { - "action":1, - "custom":{ - "resource_key":"_connect/settings", - "resource_value":{ - "endpoints":[ - { - "sample_types":[ - - ], - "type":"copper/internal", - "uuid":"P1" - }, - { - "sample_types":[ - - ], - "type":"copper/internal", - "uuid":"P2" - } - ] - } - } - } - ] - } - }, - { - "device_id":{ - "device_uuid":{ - "uuid":"MG-ON1" - } - }, - "device_type":"emu-optical-roadm", - "device_drivers":[ - 0 - ], - "device_endpoints":[ - - ], - "device_operational_status":0, - "device_config":{ - "config_rules":[ - { - "action":1, - "custom":{ - "resource_key":"_connect/address", - "resource_value":"127.0.0.1" - } - }, - { - "action":1, - "custom":{ - "resource_key":"_connect/port", - "resource_value":"0" - } - }, - { - "action":1, - "custom":{ - "resource_key":"_connect/settings", - "resource_value":{ - "endpoints":[ - { - "sample_types":[ - - ], - "type":"copper/internal", - "uuid":"OTP1" - }, - { - "sample_types":[ - - ], - "type":"copper/internal", - "uuid":"OTP2" - }, - { - "sample_types":[ - - ], - "type":"copper/internal", - "uuid":"OTP3" - }, - { - "sample_types":[ - - ], - "type":"copper/internal", - "uuid":"2" - } - ] - } - } - } - ] - } - }, - { - "device_id":{ - "device_uuid":{ - "uuid":"MG-ON2" - } - }, - "device_type":"emu-optical-roadm", - "device_drivers":[ - 0 - ], - "device_endpoints":[ - - ], - "device_operational_status":0, - "device_config":{ - "config_rules":[ - { - "action":1, - "custom":{ - "resource_key":"_connect/address", - "resource_value":"127.0.0.1" - } - }, - { - "action":1, - "custom":{ - "resource_key":"_connect/port", - "resource_value":"0" - } - }, - { - "action":1, - "custom":{ - "resource_key":"_connect/settings", - "resource_value":{ - "endpoints":[ - { - "sample_types":[ - - ], - "type":"copper/internal", - "uuid":"1" - }, - { - "sample_types":[ - - ], - "type":"copper/internal", - "uuid":"3" - } - ] - } - } - } - ] - } - }, - { - "device_id":{ - "device_uuid":{ - "uuid":"MG-ON3" - } - }, - "device_type":"emu-optical-roadm", - "device_drivers":[ - 0 - ], - "device_endpoints":[ - - ], - "device_operational_status":0, - "device_config":{ - "config_rules":[ - { - "action":1, - "custom":{ - "resource_key":"_connect/address", - "resource_value":"127.0.0.1" - } - }, - { - "action":1, - "custom":{ - "resource_key":"_connect/port", - "resource_value":"0" - } - }, - { - "action":1, - "custom":{ - "resource_key":"_connect/settings", - "resource_value":{ - "endpoints":[ - { - "sample_types":[ - - ], - "type":"copper/internal", - "uuid":"OTP1" - }, - { - "sample_types":[ - - ], - "type":"copper/internal", - "uuid":"OTP2" - }, - { - "sample_types":[ - - ], - "type":"copper/internal", - "uuid":"OTP3" - }, - { - "sample_types":[ - - ], - "type":"copper/internal", - "uuid":"2" - } - ] - } - } - } - ] - } - }, - { - "device_id":{ - "device_uuid":{ - "uuid":"OTP2.1" - } - }, - "device_type":"emu-optical-transponder", - "device_drivers":[ - 0 - ], - "device_endpoints":[ - - ], - "device_operational_status":0, - "device_config":{ - "config_rules":[ - { - "action":1, - "custom":{ - "resource_key":"_connect/address", - "resource_value":"127.0.0.1" - } - }, - { - "action":1, - "custom":{ - "resource_key":"_connect/port", - "resource_value":"0" - } - }, - { - "action":1, - "custom":{ - "resource_key":"_connect/settings", - "resource_value":{ - "endpoints":[ - { - "sample_types":[ - - ], - "type":"copper/internal", - "uuid":"P1" - }, - { - "sample_types":[ - - ], - "type":"copper/internal", - "uuid":"P2" - } - ] - } - } - } - ] - } - }, - { - "device_id":{ - "device_uuid":{ - "uuid":"OTP2.2" - } - }, - "device_type":"emu-optical-transponder", - "device_drivers":[ - 0 - ], - "device_endpoints":[ - - ], - "device_operational_status":0, - "device_config":{ - "config_rules":[ - { - "action":1, - "custom":{ - "resource_key":"_connect/address", - "resource_value":"127.0.0.1" - } - }, - { - "action":1, - "custom":{ - "resource_key":"_connect/port", - "resource_value":"0" - } - }, - { - "action":1, - "custom":{ - "resource_key":"_connect/settings", - "resource_value":{ - "endpoints":[ - { - "sample_types":[ - - ], - "type":"copper/internal", - "uuid":"P1" - }, - { - "sample_types":[ - - ], - "type":"copper/internal", - "uuid":"P2" - } - ] - } - } - } - ] - } - }, - { - "device_id":{ - "device_uuid":{ - "uuid":"OTP2.3" - } - }, - "device_type":"emu-optical-transponder", - "device_drivers":[ - 0 - ], - "device_endpoints":[ - - ], - "device_operational_status":0, - "device_config":{ - "config_rules":[ - { - "action":1, - "custom":{ - "resource_key":"_connect/address", - "resource_value":"127.0.0.1" - } - }, - { - "action":1, - "custom":{ - "resource_key":"_connect/port", - "resource_value":"0" - } - }, - { - "action":1, - "custom":{ - "resource_key":"_connect/settings", - "resource_value":{ - "endpoints":[ - { - "sample_types":[ - - ], - "type":"copper/internal", - "uuid":"P1" - }, - { - "sample_types":[ - - ], - "type":"copper/internal", - "uuid":"P2" - } - ] - } - } - } - ] - } - } - ], - "links":[ - { - "link_id":{ - "link_uuid":{ - "uuid":"MG-ON1->MG-ON2" - } - }, - "link_endpoint_ids":[ - { - "device_id":{ - "device_uuid":{ - "uuid":"MG-ON1" - } - }, - "endpoint_uuid":{ - "uuid":"2" - } - }, - { - "device_id":{ - "device_uuid":{ - "uuid":"MG-ON2" - } - }, - "endpoint_uuid":{ - "uuid":"1" - } - } - ] - }, - { - "link_id":{ - "link_uuid":{ - "uuid":"MG-ON2->MG-ON3" - } - }, - "link_endpoint_ids":[ - { - "device_id":{ - "device_uuid":{ - "uuid":"MG-ON2" - } - }, - "endpoint_uuid":{ - "uuid":"3" - } - }, - { - "device_id":{ - "device_uuid":{ - "uuid":"MG-ON3" - } - }, - "endpoint_uuid":{ - "uuid":"2" - } - } - ] - }, - { - "link_id":{ - "link_uuid":{ - "uuid":"OTP1.1->MG-ON1" - } - }, - "link_endpoint_ids":[ - { - "device_id":{ - "device_uuid":{ - "uuid":"OTP1.1" - } - }, - "endpoint_uuid":{ - "uuid":"P1" - } - }, - { - "device_id":{ - "device_uuid":{ - "uuid":"MG-ON1" - } - }, - "endpoint_uuid":{ - "uuid":"OTP1" - } - } - ] - }, - { - "link_id":{ - "link_uuid":{ - "uuid":"OTP1.2->MG-ON1" - } - }, - "link_endpoint_ids":[ - { - "device_id":{ - "device_uuid":{ - "uuid":"OTP1.2" - } - }, - "endpoint_uuid":{ - "uuid":"P1" - } - }, - { - "device_id":{ - "device_uuid":{ - "uuid":"MG-ON1" - } - }, - "endpoint_uuid":{ - "uuid":"OTP2" - } - } - ] - }, - { - "link_id":{ - "link_uuid":{ - "uuid":"OTP1.3->MG-ON1" - } - }, - "link_endpoint_ids":[ - { - "device_id":{ - "device_uuid":{ - "uuid":"OTP1.3" - } - }, - "endpoint_uuid":{ - "uuid":"P1" - } - }, - { - "device_id":{ - "device_uuid":{ - "uuid":"MG-ON1" - } - }, - "endpoint_uuid":{ - "uuid":"OTP3" - } - } - ] - }, - { - "link_id":{ - "link_uuid":{ - "uuid":"OTP2.1->MG-ON3" - } - }, - "link_endpoint_ids":[ - { - "device_id":{ - "device_uuid":{ - "uuid":"OTP2.1" - } - }, - "endpoint_uuid":{ - "uuid":"P1" - } - }, - { - "device_id":{ - "device_uuid":{ - "uuid":"MG-ON3" - } - }, - "endpoint_uuid":{ - "uuid":"OTP1" - } - } - ] - }, - { - "link_id":{ - "link_uuid":{ - "uuid":"OTP2.2->MG-ON3" - } - }, - "link_endpoint_ids":[ - { - "device_id":{ - "device_uuid":{ - "uuid":"OTP2.2" - } - }, - "endpoint_uuid":{ - "uuid":"P1" - } - }, - { - "device_id":{ - "device_uuid":{ - "uuid":"MG-ON3" - } - }, - "endpoint_uuid":{ - "uuid":"OTP2" - } - } - ] - }, - { - "link_id":{ - "link_uuid":{ - "uuid":"OTP2.3->MG-ON3" - } - }, - "link_endpoint_ids":[ - { - "device_id":{ - "device_uuid":{ - "uuid":"OTP2.3" - } - }, - "endpoint_uuid":{ - "uuid":"P1" - } - }, - { - "device_id":{ - "device_uuid":{ - "uuid":"MG-ON3" - } - }, - "endpoint_uuid":{ - "uuid":"OTP3" - } - } - ] - } - ] - } - \ No newline at end of file diff --git a/src/tests/ecoc24/dump_logs.sh b/src/tests/ecoc24/dump_logs.sh deleted file mode 100755 index 48ac8e9a0fc070b3e022ba4a665fad22aa8a7313..0000000000000000000000000000000000000000 --- a/src/tests/ecoc24/dump_logs.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash -# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -rm -rf tmp/exec - -echo "Collecting logs for E2E..." -mkdir -p tmp/exec/e2e -kubectl --namespace tfs-e2e logs deployments/contextservice server > tmp/exec/e2e/context.log -kubectl --namespace tfs-e2e logs deployments/deviceservice server > tmp/exec/e2e/device.log -kubectl --namespace tfs-e2e logs deployments/serviceservice server > tmp/exec/e2e/service.log -kubectl --namespace tfs-e2e logs deployments/pathcompservice frontend > tmp/exec/e2e/pathcomp-frontend.log -kubectl --namespace tfs-e2e logs deployments/pathcompservice backend > tmp/exec/e2e/pathcomp-backend.log -kubectl --namespace tfs-e2e logs deployments/sliceservice server > tmp/exec/e2e/slice.log -printf "\n" - -echo "Collecting logs for IP..." -mkdir -p tmp/exec/ip -kubectl --namespace tfs-ip logs deployments/contextservice server > tmp/exec/ip/context.log -kubectl --namespace tfs-ip logs deployments/deviceservice server > tmp/exec/ip/device.log -kubectl --namespace tfs-ip logs deployments/serviceservice server > tmp/exec/ip/service.log -kubectl --namespace tfs-ip logs deployments/pathcompservice frontend > tmp/exec/ip/pathcomp-frontend.log -kubectl --namespace tfs-ip logs deployments/pathcompservice backend > tmp/exec/ip/pathcomp-backend.log -kubectl --namespace tfs-ip logs deployments/sliceservice server > tmp/exec/ip/slice.log -printf "\n" - -echo "Done!" diff --git a/src/tests/eucnc24/Dockerfile b/src/tests/eucnc24/Dockerfile index 3235c74cf00876359ef2e672f4170058f64894c8..2bf88714e09ca51060626c24c38e0c85628bccbf 100644 --- a/src/tests/eucnc24/Dockerfile +++ b/src/tests/eucnc24/Dockerfile @@ -71,6 +71,8 @@ COPY src/service/__init__.py service/__init__.py COPY src/service/client/. service/client/ COPY src/slice/__init__.py slice/__init__.py COPY src/slice/client/. slice/client/ +COPY src/vnt_manager/__init__.py vnt_manager/__init__.py +COPY src/vnt_manager/client/. vnt_manager/client/ COPY src/tests/*.py ./tests/ COPY src/tests/eucnc24/__init__.py ./tests/eucnc24/__init__.py COPY src/tests/eucnc24/data/. ./tests/eucnc24/data/ diff --git a/src/tests/ofc22/Dockerfile b/src/tests/ofc22/Dockerfile index a06f66fd0e954999722832cd7757b520a9141467..6e4ca36ecdd7033b178a6b02349316dc44b78ca1 100644 --- a/src/tests/ofc22/Dockerfile +++ b/src/tests/ofc22/Dockerfile @@ -78,6 +78,8 @@ COPY src/service/__init__.py service/__init__.py COPY src/service/client/. service/client/ COPY src/slice/__init__.py slice/__init__.py COPY src/slice/client/. slice/client/ +COPY src/vnt_manager/__init__.py vnt_manager/__init__.py +COPY src/vnt_manager/client/. vnt_manager/client/ COPY src/tests/*.py ./tests/ COPY src/tests/ofc22/__init__.py ./tests/ofc22/__init__.py COPY src/tests/ofc22/descriptors_emulated.json ./tests/ofc22/descriptors_emulated.json diff --git a/src/tests/ofc24/Dockerfile b/src/tests/ofc24/Dockerfile index 4e4e9f9e91f3a681fa6951aaa6f257746b98ebb1..f2afcfbb728cd6baa3b4b8640a26a6c4cfc7401f 100644 --- a/src/tests/ofc24/Dockerfile +++ b/src/tests/ofc24/Dockerfile @@ -78,6 +78,8 @@ COPY src/service/__init__.py service/__init__.py COPY src/service/client/. service/client/ COPY src/slice/__init__.py slice/__init__.py COPY src/slice/client/. slice/client/ +COPY src/vnt_manager/__init__.py vnt_manager/__init__.py +COPY src/vnt_manager/client/. vnt_manager/client/ COPY src/tests/*.py ./tests/ COPY src/tests/ofc24/__init__.py ./tests/ofc24/__init__.py COPY src/tests/ofc24/descriptors/topology.json ./tests/ofc24/descriptors/topology.json diff --git a/src/tests/ecoc24/.gitlab-ci.yml b/src/tests/ofc25/.gitlab-ci.yml similarity index 93% rename from src/tests/ecoc24/.gitlab-ci.yml rename to src/tests/ofc25/.gitlab-ci.yml index 8e3f7e71a1d33b51da985b883b47bd52c414b9ab..52ea2e3a45fc15bf5655c92683db3c21e1563ec4 100644 --- a/src/tests/ecoc24/.gitlab-ci.yml +++ b/src/tests/ofc25/.gitlab-ci.yml @@ -13,9 +13,9 @@ # limitations under the License. # Build, tag, and push the Docker image to the GitLab Docker registry -build ecoc24: +build ofc25: variables: - TEST_NAME: 'ecoc24' + TEST_NAME: 'ofc25' stage: build before_script: - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY @@ -36,13 +36,13 @@ build ecoc24: - .gitlab-ci.yml # Deploy TeraFlowSDN and Execute end-2-end test -end2end_test ecoc24: +end2end_test ofc25: variables: - TEST_NAME: 'ecoc24' + TEST_NAME: 'ofc25' stage: end2end_test # Disable to force running it after all other tasks #needs: - # - build ecoc24 + # - build ofc25 before_script: - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY - docker network rm -f na-br @@ -85,11 +85,11 @@ end2end_test ecoc24: # Delete secondary ingress controllers - - kubectl delete -f src/tests/ecoc24/nginx-ingress-controller-opt.yaml --ignore-not-found + - kubectl delete -f src/tests/ofc25/nginx-ingress-controller-opt.yaml --ignore-not-found # Create secondary ingress controllers - - kubectl apply -f src/tests/ecoc24/nginx-ingress-controller-opt.yaml + - kubectl apply -f src/tests/ofc25/nginx-ingress-controller-opt.yaml # Deploy TFS for OPT - - source src/tests/ecoc24/deploy_specs_opt.sh + - source src/tests/ofc25/deploy_specs_opt.sh # Change the name for the database - cp manifests/contextservice.yaml manifests/contextservice.yaml.bak @@ -115,13 +115,13 @@ end2end_test ecoc24: # Deploy IP TeraFlowSDN # Delete secondary ingress controllers - - kubectl delete -f src/tests/ecoc24/nginx-ingress-controller-ip.yaml --ignore-not-found + - kubectl delete -f src/tests/ofc25/nginx-ingress-controller-ip.yaml --ignore-not-found # Create secondary ingress controllers - - kubectl apply -f src/tests/ecoc24/nginx-ingress-controller-ip.yaml + - kubectl apply -f src/tests/ofc25/nginx-ingress-controller-ip.yaml # Deploy TFS for IP - - source src/tests/ecoc24/deploy_specs_ip.sh + - source src/tests/ofc25/deploy_specs_ip.sh # Change the name for the database - cp manifests/contextservice.yaml manifests/contextservice.yaml.bak @@ -151,10 +151,10 @@ end2end_test ecoc24: # Delete secondary ingress controllers - - kubectl delete -f src/tests/ecoc24/nginx-ingress-controller-e2e.yaml --ignore-not-found + - kubectl delete -f src/tests/ofc25/nginx-ingress-controller-e2e.yaml --ignore-not-found # Create secondary ingress controllers - - kubectl apply -f src/tests/ecoc24/nginx-ingress-controller-e2e.yaml + - kubectl apply -f src/tests/ofc25/nginx-ingress-controller-e2e.yaml # Change the name for the database - cp manifests/contextservice.yaml manifests/contextservice.yaml.bak @@ -175,7 +175,7 @@ end2end_test ecoc24: - mv manifests/contextservice.yaml.bak manifests/contextservice.yaml #Configure Subscription WS - - ./src/tests/ecoc24/subscription_ws_e2e.sh + - ./src/tests/ofc25/subscription_ws_e2e.sh - mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_e2e.sh diff --git a/src/tests/ecoc24/Dockerfile b/src/tests/ofc25/Dockerfile similarity index 72% rename from src/tests/ecoc24/Dockerfile rename to src/tests/ofc25/Dockerfile index 8fca59f57680e33275f9a02c21f8b0693def8ddd..0439d980bf50c7d3ee01ab6d1ab852b91a72cae6 100644 --- a/src/tests/ecoc24/Dockerfile +++ b/src/tests/ofc25/Dockerfile @@ -49,9 +49,9 @@ RUN rm *.proto RUN find . -type f -exec sed -i -E 's/(import\ .*)_pb2/from . \1_pb2/g' {} \; # Create component sub-folders, get specific Python packages -RUN mkdir -p /var/teraflow/tests/ecoc24 -WORKDIR /var/teraflow/tests/ecoc24 -COPY src/tests/ofc24/requirements.in requirements.in +RUN mkdir -p /var/teraflow/tests/ofc25 +WORKDIR /var/teraflow/tests/ofc25 +COPY src/tests/ofc25/requirements.in requirements.in RUN pip-compile --quiet --output-file=requirements.txt requirements.in RUN python3 -m pip install -r requirements.txt @@ -73,24 +73,26 @@ COPY src/service/__init__.py service/__init__.py COPY src/service/client/. service/client/ COPY src/slice/__init__.py slice/__init__.py COPY src/slice/client/. slice/client/ +COPY src/vnt_manager/__init__.py vnt_manager/__init__.py +COPY src/vnt_manager/client/. vnt_manager/client/ COPY src/tests/*.py ./tests/ -COPY src/tests/ecoc24/__init__.py ./tests/ecoc24/__init__.py -COPY src/tests/ecoc24/descriptors/descriptor_ip.json ./tests/ecoc24/descriptors/descriptor_ip.json -COPY src/tests/ecoc24/descriptors/descriptor_opt.json ./tests/ecoc24/descriptors/descriptor_opt.json -COPY src/tests/ecoc24/descriptors/descriptor_e2e.json ./tests/ecoc24/descriptors/descriptor_e2e.json -COPY src/tests/ecoc24/tests/. ./tests/ecoc24/tests/ +COPY src/tests/ofc25/__init__.py ./tests/ofc25/__init__.py +COPY src/tests/ofc25/descriptors/descriptor_ip.json ./tests/ofc25/descriptors/descriptor_ip.json +COPY src/tests/ofc25/descriptors/descriptor_opt.json ./tests/ofc25/descriptors/descriptor_opt.json +COPY src/tests/ofc25/descriptors/descriptor_e2e.json ./tests/ofc25/descriptors/descriptor_e2e.json +COPY src/tests/ofc25/tests/. ./tests/ofc25/tests/ RUN tee ./run_tests.sh <<EOF !/bin/bash source /var/teraflow/tfs_runtime_env_vars.sh export PYTHONPATH=/var/teraflow -pytest --verbose --log-level=INFO /var/teraflow/tests/ecoc24/tests/test_functional_bootstrap_opt.py --junitxml=/opt/results/report_bootstrap_opt.xml -pytest --verbose --log-level=INFO /var/teraflow/tests/ecoc24/tests/test_functional_bootstrap_ip.py --junitxml=/opt/results/report_bootstrap_ip.xml +pytest --verbose --log-level=INFO /var/teraflow/tests/ofc25/tests/test_functional_bootstrap_opt.py --junitxml=/opt/results/report_bootstrap_opt.xml +pytest --verbose --log-level=INFO /var/teraflow/tests/ofc25/tests/test_functional_bootstrap_ip.py --junitxml=/opt/results/report_bootstrap_ip.xml sleep 5 -pytest --verbose --log-level=INFO /var/teraflow/tests/ecoc24/tests/test_functional_bootstrap_e2e.py --junitxml=/opt/results/report_bootstrap_e2e.xml -pytest --verbose --log-level=INFO /var/teraflow/tests/ofc24/tests/test_functional_create_service.py --junitxml=/opt/results/report_create_service.xml -pytest --verbose --log-level=INFO /var/teraflow/tests/ofc24/tests/test_functional_delete_service.py --junitxml=/opt/results/report_delete_service.xml -pytest --verbose --log-level=INFO /var/teraflow/tests/ofc24/tests/test_functional_cleanup.py --junitxml=/opt/results/report_cleanup.xml +pytest --verbose --log-level=INFO /var/teraflow/tests/ofc25/tests/test_functional_bootstrap_e2e.py --junitxml=/opt/results/report_bootstrap_e2e.xml +pytest --verbose --log-level=INFO /var/teraflow/tests/ofc25/tests/test_functional_create_service.py --junitxml=/opt/results/report_create_service.xml +pytest --verbose --log-level=INFO /var/teraflow/tests/ofc25/tests/test_functional_delete_service.py --junitxml=/opt/results/report_delete_service.xml +pytest --verbose --log-level=INFO /var/teraflow/tests/ofc25/tests/test_functional_cleanup.py --junitxml=/opt/results/report_cleanup.xml EOF RUN chmod ug+x ./run_tests.sh diff --git a/src/tests/ecoc24/__init__.py b/src/tests/ofc25/__init__.py similarity index 100% rename from src/tests/ecoc24/__init__.py rename to src/tests/ofc25/__init__.py diff --git a/src/tests/ecoc24/deploy_e2e.sh b/src/tests/ofc25/_old/deploy_e2e.sh similarity index 84% rename from src/tests/ecoc24/deploy_e2e.sh rename to src/tests/ofc25/_old/deploy_e2e.sh index cbfcfdc21871b82c862f85dab1cefc136f2eb253..943487ae5751f487047ad5d96a550ee08068071d 100755 --- a/src/tests/ecoc24/deploy_e2e.sh +++ b/src/tests/ofc25/_old/deploy_e2e.sh @@ -18,13 +18,13 @@ kubectl delete namespace tfs-e2e # Delete secondary ingress controllers -kubectl delete -f src/tests/ecoc24/nginx-ingress-controller-e2e.yaml +kubectl delete -f src/tests/ofc25/nginx-ingress-controller-e2e.yaml # Create secondary ingress controllers -kubectl apply -f src/tests/ecoc24/nginx-ingress-controller-e2e.yaml +kubectl apply -f src/tests/ofc25/nginx-ingress-controller-e2e.yaml # Deploy TFS for E2E -source src/tests/ecoc24/deploy_specs_e2e.sh +source src/tests/ofc25/deploy_specs_e2e.sh # Change the name for the database cp manifests/contextservice.yaml manifests/contextservice.yaml.bak @@ -33,6 +33,6 @@ sed -i '/name: CRDB_DATABASE/{n;s/value: .*/value: "tfs_e2e_context"/}' manifest mv manifests/contextservice.yaml.bak manifests/contextservice.yaml #Configure Subscription WS -./src/tests/ecoc24/subscription_ws_e2e.sh +./src/tests/ofc25/subscription_ws_e2e.sh mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_e2e.sh diff --git a/src/tests/ecoc24/deploy_ip.sh b/src/tests/ofc25/_old/deploy_ip.sh similarity index 84% rename from src/tests/ecoc24/deploy_ip.sh rename to src/tests/ofc25/_old/deploy_ip.sh index 694b246980db904162e8c4b6cbd90dc294a9f19b..7cd1459461893e3f8bd1d376eb70f5c282977890 100755 --- a/src/tests/ecoc24/deploy_ip.sh +++ b/src/tests/ofc25/_old/deploy_ip.sh @@ -18,13 +18,13 @@ kubectl delete namespace tfs-ip # Delete secondary ingress controllers -kubectl delete -f src/tests/ecoc24/nginx-ingress-controller-ip.yaml +kubectl delete -f src/tests/ofc25/nginx-ingress-controller-ip.yaml # Create secondary ingress controllers -kubectl apply -f src/tests/ecoc24/nginx-ingress-controller-ip.yaml +kubectl apply -f src/tests/ofc25/nginx-ingress-controller-ip.yaml # Deploy TFS for IP -source src/tests/ecoc24/deploy_specs_ip.sh +source src/tests/ofc25/deploy_specs_ip.sh # Change the name for the database cp manifests/contextservice.yaml manifests/contextservice.yaml.bak @@ -33,6 +33,6 @@ sed -i '/name: CRDB_DATABASE/{n;s/value: .*/value: "tfs_ip_context"/}' manifests mv manifests/contextservice.yaml.bak manifests/contextservice.yaml #Configure Subscription WS -./src/tests/ecoc24/subscription_ws_ip.sh +./src/tests/ofc25/subscription_ws_ip.sh mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_ip.sh diff --git a/src/tests/ecoc24/deploy_opt.sh b/src/tests/ofc25/_old/deploy_opt.sh similarity index 86% rename from src/tests/ecoc24/deploy_opt.sh rename to src/tests/ofc25/_old/deploy_opt.sh index c85b6e08bd9a6b3c9ed00decf022e27012783b8e..a38645e41c7929eebe10c8b36420a7f74342f6fc 100755 --- a/src/tests/ecoc24/deploy_opt.sh +++ b/src/tests/ofc25/_old/deploy_opt.sh @@ -18,13 +18,13 @@ kubectl delete namespace tfs-opt # Delete secondary ingress controllers -kubectl delete -f src/tests/ecoc24/nginx-ingress-controller-opt.yaml +kubectl delete -f src/tests/ofc25/nginx-ingress-controller-opt.yaml # Create secondary ingress controllers -kubectl apply -f src/tests/ecoc24/nginx-ingress-controller-opt.yaml +kubectl apply -f src/tests/ofc25/nginx-ingress-controller-opt.yaml # Deploy TFS for OPT -source src/tests/ecoc24/deploy_specs_opt.sh +source src/tests/ofc25/deploy_specs_opt.sh # Change the name for the database cp manifests/contextservice.yaml manifests/contextservice.yaml.bak diff --git a/src/tests/ecoc24/subscription_ws_e2e.sh b/src/tests/ofc25/_old/subscription_ws_e2e.sh similarity index 100% rename from src/tests/ecoc24/subscription_ws_e2e.sh rename to src/tests/ofc25/_old/subscription_ws_e2e.sh diff --git a/src/tests/ecoc24/subscription_ws_ip.sh b/src/tests/ofc25/_old/subscription_ws_ip.sh similarity index 100% rename from src/tests/ecoc24/subscription_ws_ip.sh rename to src/tests/ofc25/_old/subscription_ws_ip.sh diff --git a/src/tests/ofc25/deploy.sh b/src/tests/ofc25/deploy.sh new file mode 100755 index 0000000000000000000000000000000000000000..8bd0b898920123892fd62b65ade2c3f168591abc --- /dev/null +++ b/src/tests/ofc25/deploy.sh @@ -0,0 +1,123 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ===== Check Microk8s is ready ============================== +#microk8s status --wait-ready +#kubectl get pods --all-namespaces + +# ===== Cleanup old deployments ============================== +#helm3 uninstall --namespace nats-e2e nats-e2e 2>/dev/null || true +#helm3 uninstall --namespace nats-ip nats-ip 2>/dev/null || true +#helm3 uninstall --namespace nats-opt nats-opt 2>/dev/null || true +#helm3 uninstall --namespace nats nats 2>/dev/null || true +#kubectl delete namespaces tfs tfs-ip tfs-opt tfs-e2e --ignore-not-found +#kubectl delete namespaces qdb qdb-e2e qdb-opt qdb-ip --ignore-not-found +#kubectl delete namespaces kafka kafka-ip kafka-opt kafka-e2e --ignore-not-found +#kubectl delete namespaces nats nats-ip nats-opt nats-e2e --ignore-not-found +#kubectl delete -f src/tests/ofc25/nginx-ingress-controller-opt.yaml --ignore-not-found +#kubectl delete -f src/tests/ofc25/nginx-ingress-controller-ip.yaml --ignore-not-found +#kubectl delete -f src/tests/ofc25/nginx-ingress-controller-e2e.yaml --ignore-not-found +#sleep 5 + +# ===== Check Microk8s is ready ============================== +#microk8s status --wait-ready +#kubectl get pods --all-namespaces + +# Configure TeraFlowSDN deployment +# Uncomment if DEBUG log level is needed for the components +#yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/contextservice.yaml +#yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/deviceservice.yaml +#yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="frontend").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/pathcompservice.yaml +#yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/serviceservice.yaml +#yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/sliceservice.yaml +#yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/nbiservice.yaml +#yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/e2eorchestratorservice.yaml +#yq -i '((select(.kind=="Deployment").spec.template.spec.containers.[] | select(.name=="server").env.[]) | select(.name=="LOG_LEVEL").value) |= "DEBUG"' manifests/vntmservice.yaml + +# Create secondary ingress controllers +#kubectl apply -f src/tests/ofc25/nginx-ingress-controller-opt.yaml +#kubectl apply -f src/tests/ofc25/nginx-ingress-controller-ip.yaml +#kubectl apply -f src/tests/ofc25/nginx-ingress-controller-e2e.yaml + +cp manifests/contextservice.yaml manifests/contextservice.yaml.bak + +# ===== Deploy Optical TeraFlowSDN ============================== +source src/tests/ofc25/deploy_specs_opt.sh +cp manifests/contextservice.yaml.bak manifests/contextservice.yaml +sed -i '/name: CRDB_DATABASE/{n;s/value: .*/value: "tfs_opt_context"/}' manifests/contextservice.yaml + +./deploy/crdb.sh +./deploy/nats.sh +./deploy/kafka.sh +#./deploy/qdb.sh +#./deploy/expose_dashboard.sh +./deploy/tfs.sh +./deploy/show.sh + +mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_opt.sh + + +# ===== Deploy IP TeraFlowSDN ============================== +source src/tests/ofc25/deploy_specs_ip.sh +cp manifests/contextservice.yaml.bak manifests/contextservice.yaml +sed -i '/name: CRDB_DATABASE/{n;s/value: .*/value: "tfs_ip_context"/}' manifests/contextservice.yaml + +./deploy/crdb.sh +./deploy/nats.sh +./deploy/kafka.sh +#./deploy/qdb.sh +#./deploy/expose_dashboard.sh +./deploy/tfs.sh +./deploy/show.sh + +mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_ip.sh + + +# ===== Deploy End-to-End TeraFlowSDN ==================== +source src/tests/ofc25/deploy_specs_e2e.sh +cp manifests/contextservice.yaml.bak manifests/contextservice.yaml +sed -i '/name: CRDB_DATABASE/{n;s/value: .*/value: "tfs_e2e_context"/}' manifests/contextservice.yaml + +./deploy/crdb.sh +./deploy/nats.sh +./deploy/kafka.sh +#./deploy/qdb.sh +#./deploy/expose_dashboard.sh +./deploy/tfs.sh +./deploy/show.sh + +mv tfs_runtime_env_vars.sh tfs_runtime_env_vars_e2e.sh + + +# ===== Recovering files ========================= +mv manifests/contextservice.yaml.bak manifests/contextservice.yaml + + +# ===== Wait Content for NATS Subscription ========================= +echo "Waiting for E2E Context to have subscriber ready..." +while ! kubectl --namespace tfs-e2e logs deployment/contextservice -c server 2>&1 | grep -q 'Subscriber is Ready? True'; do sleep 1; done +kubectl --namespace tfs-e2e logs deployment/contextservice -c server + + +# ===== Onboarding topologies ========================= +# TODO: pending... + + +# ===== Configure subscriptions ========================= +# This should not be needed +#./src/tests/ofc25/subscription_ws_ip.sh +#./src/tests/ofc25/subscription_ws_e2e.sh + +echo "Done!" diff --git a/src/tests/ecoc24/deploy_specs_e2e.sh b/src/tests/ofc25/deploy_specs_e2e.sh similarity index 98% rename from src/tests/ecoc24/deploy_specs_e2e.sh rename to src/tests/ofc25/deploy_specs_e2e.sh index 2792f9fd33df501d6ce10fde7938dc5898e59b46..e276410df1d34ed7a41d4f7bc1337b35ba125e7e 100755 --- a/src/tests/ecoc24/deploy_specs_e2e.sh +++ b/src/tests/ofc25/deploy_specs_e2e.sh @@ -97,7 +97,7 @@ export TFS_IMAGE_TAG="dev" export TFS_K8S_NAMESPACE="tfs-e2e" # Set additional manifest files to be applied after the deployment -export TFS_EXTRA_MANIFESTS="src/tests/ecoc24/tfs-ingress-e2e.yaml" +export TFS_EXTRA_MANIFESTS="src/tests/ofc25/tfs-ingress-e2e.yaml" # Uncomment to monitor performance of components #export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml" @@ -134,7 +134,7 @@ export CRDB_PASSWORD="tfs123" export CRDB_DEPLOY_MODE="single" # Disable flag for dropping database, if it exists. -export CRDB_DROP_DATABASE_IF_EXISTS="YES" +export CRDB_DROP_DATABASE_IF_EXISTS="" # Disable flag for re-deploying CockroachDB from scratch. export CRDB_REDEPLOY="" @@ -204,7 +204,7 @@ export GRAF_EXT_PORT_HTTP="3000" # ----- Apache Kafka ----------------------------------------------------------- # Set the namespace where Apache Kafka will be deployed. -export KFK_NAMESPACE="kafka" +export KFK_NAMESPACE="kafka-e2e" # Set the port Apache Kafka server will be exposed to. export KFK_SERVER_PORT="9092" diff --git a/src/tests/ecoc24/deploy_specs_ip.sh b/src/tests/ofc25/deploy_specs_ip.sh similarity index 98% rename from src/tests/ecoc24/deploy_specs_ip.sh rename to src/tests/ofc25/deploy_specs_ip.sh index 1e105557b147956667612d7d51f54b3052fe405a..247b9353c9905f965b00202ad6cbc2db305d25da 100755 --- a/src/tests/ecoc24/deploy_specs_ip.sh +++ b/src/tests/ofc25/deploy_specs_ip.sh @@ -97,7 +97,7 @@ export TFS_IMAGE_TAG="dev" export TFS_K8S_NAMESPACE="tfs-ip" # Set additional manifest files to be applied after the deployment -export TFS_EXTRA_MANIFESTS="src/tests/ecoc24/tfs-ingress-ip.yaml" +export TFS_EXTRA_MANIFESTS="src/tests/ofc25/tfs-ingress-ip.yaml" # Uncomment to monitor performance of components #export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml" @@ -134,7 +134,7 @@ export CRDB_PASSWORD="tfs123" export CRDB_DEPLOY_MODE="single" # Disable flag for dropping database, if it exists. -export CRDB_DROP_DATABASE_IF_EXISTS="YES" +export CRDB_DROP_DATABASE_IF_EXISTS="" # Disable flag for re-deploying CockroachDB from scratch. export CRDB_REDEPLOY="" @@ -204,7 +204,7 @@ export GRAF_EXT_PORT_HTTP="3000" # ----- Apache Kafka ----------------------------------------------------------- # Set the namespace where Apache Kafka will be deployed. -export KFK_NAMESPACE="kafka" +export KFK_NAMESPACE="kafka-ip" # Set the port Apache Kafka server will be exposed to. export KFK_SERVER_PORT="9092" diff --git a/src/tests/ecoc24/deploy_specs_opt.sh b/src/tests/ofc25/deploy_specs_opt.sh similarity index 98% rename from src/tests/ecoc24/deploy_specs_opt.sh rename to src/tests/ofc25/deploy_specs_opt.sh index 5c24678311877ad077c664c64354c9bffdfe5b58..3535f50fe46a66e39369b7eaf094525e4fd66392 100755 --- a/src/tests/ecoc24/deploy_specs_opt.sh +++ b/src/tests/ofc25/deploy_specs_opt.sh @@ -97,7 +97,7 @@ export TFS_IMAGE_TAG="dev" export TFS_K8S_NAMESPACE="tfs-opt" # Set additional manifest files to be applied after the deployment -export TFS_EXTRA_MANIFESTS="src/tests/ecoc24/tfs-ingress-opt.yaml" +export TFS_EXTRA_MANIFESTS="src/tests/ofc25/tfs-ingress-opt.yaml" # Uncomment to monitor performance of components #export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml" @@ -204,7 +204,7 @@ export GRAF_EXT_PORT_HTTP="3000" # ----- Apache Kafka ----------------------------------------------------------- # Set the namespace where Apache Kafka will be deployed. -export KFK_NAMESPACE="kafka" +export KFK_NAMESPACE="kafka-opt" # Set the port Apache Kafka server will be exposed to. export KFK_SERVER_PORT="9092" diff --git a/src/tests/ofc25/descriptors/create-vlink-01.json b/src/tests/ofc25/descriptors/create-vlink-01.json new file mode 100644 index 0000000000000000000000000000000000000000..edb9bc61ca6c4a13a290d381107f7eeb6abdbb78 --- /dev/null +++ b/src/tests/ofc25/descriptors/create-vlink-01.json @@ -0,0 +1,13 @@ +{ + "links": [ + { + "link_id": {"link_uuid": {"uuid": "IP1/PORT-xe1==IP2/PORT-xe1"}}, + "link_type": "LINKTYPE_VIRTUAL", + "attributes": {"total_capacity_gbps": 800.0}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "IP1"}}, "endpoint_uuid": {"uuid": "PORT-xe1"}}, + {"device_id": {"device_uuid": {"uuid": "IP2"}}, "endpoint_uuid": {"uuid": "PORT-xe1"}} + ] + } + ] +} diff --git a/src/tests/ofc25/descriptors/create-vlink-02.json b/src/tests/ofc25/descriptors/create-vlink-02.json new file mode 100644 index 0000000000000000000000000000000000000000..63b141a2d85dbe42ca4da70a359f1f76bc55ff30 --- /dev/null +++ b/src/tests/ofc25/descriptors/create-vlink-02.json @@ -0,0 +1,13 @@ +{ + "links": [ + { + "link_id": {"link_uuid": {"uuid": "IP1/PORT-xe2==IP2/PORT-xe2"}}, + "link_type": "LINKTYPE_VIRTUAL", + "attributes": {"total_capacity_gbps": 800.0}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "IP1"}}, "endpoint_uuid": {"uuid": "PORT-xe2"}}, + {"device_id": {"device_uuid": {"uuid": "IP2"}}, "endpoint_uuid": {"uuid": "PORT-xe2"}} + ] + } + ] +} diff --git a/src/tests/ofc25/descriptors/create-vlink-03.json b/src/tests/ofc25/descriptors/create-vlink-03.json new file mode 100644 index 0000000000000000000000000000000000000000..1f57c5ce40e1e631c9a3a58a94e7fc89939f4086 --- /dev/null +++ b/src/tests/ofc25/descriptors/create-vlink-03.json @@ -0,0 +1,13 @@ +{ + "links": [ + { + "link_id": {"link_uuid": {"uuid": "IP1/PORT-xe3==IP2/PORT-xe3"}}, + "link_type": "LINKTYPE_VIRTUAL", + "attributes": {"total_capacity_gbps": 800.0}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "IP1"}}, "endpoint_uuid": {"uuid": "PORT-xe3"}}, + {"device_id": {"device_uuid": {"uuid": "IP2"}}, "endpoint_uuid": {"uuid": "PORT-xe3"}} + ] + } + ] +} diff --git a/src/tests/ofc25/descriptors/old/topology_e2e.json b/src/tests/ofc25/descriptors/old/topology_e2e.json new file mode 100644 index 0000000000000000000000000000000000000000..a8adf32ea6744b665f186eca22600044f01e7c4f --- /dev/null +++ b/src/tests/ofc25/descriptors/old/topology_e2e.json @@ -0,0 +1,85 @@ +{ + "contexts": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}} + ], + "topologies": [ + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}} + ], + "devices": [ + { + "device_id": {"device_uuid": {"uuid": "TFS-PACKET"}}, "device_type": "teraflowsdn", + "device_drivers": ["DEVICEDRIVER_IETF_L3VPN"], "device_operational_status": "DEVICEOPERATIONALSTATUS_UNDEFINED", + "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.10"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "8002"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": { + "scheme": "http", "username": "admin", "password": "admin", "import_topology": "topology" + }}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "TFS-OPTICAL"}}, "device_type": "teraflowsdn", + "device_drivers": ["DEVICEDRIVER_OPTICAL_TFS"], "device_operational_status": "DEVICEOPERATIONALSTATUS_UNDEFINED", + "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.10"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "8003"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": { + "scheme": "http", "username": "admin", "password": "admin", "import_topology": "topology" + }}} + ]} + } + ], + "links": [ + {"link_id": {"link_uuid": {"uuid": "IP1-T1.1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "IP1"}}, "endpoint_uuid": {"uuid": "PORT-xe1"}}, + {"device_id": {"device_uuid": {"uuid": "T1.1"}}, "endpoint_uuid": {"uuid": "CLIENT" }} + ]}, + {"link_id": {"link_uuid": {"uuid": "IP1-T1.2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "IP1"}}, "endpoint_uuid": {"uuid": "PORT-xe2"}}, + {"device_id": {"device_uuid": {"uuid": "T1.2"}}, "endpoint_uuid": {"uuid": "CLIENT" }} + ]}, + {"link_id": {"link_uuid": {"uuid": "IP1-T1.3"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "IP1"}}, "endpoint_uuid": {"uuid": "PORT-xe3"}}, + {"device_id": {"device_uuid": {"uuid": "T1.3"}}, "endpoint_uuid": {"uuid": "CLIENT" }} + ]}, + + {"link_id": {"link_uuid": {"uuid": "IP2-TP2.1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "IP2"}}, "endpoint_uuid": {"uuid": "PORT-xe1"}}, + {"device_id": {"device_uuid": {"uuid": "TP2.1"}}, "endpoint_uuid": {"uuid": "CLIENT" }} + ]}, + {"link_id": {"link_uuid": {"uuid": "IP2-TP2.2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "IP2"}}, "endpoint_uuid": {"uuid": "PORT-xe2"}}, + {"device_id": {"device_uuid": {"uuid": "TP2.2"}}, "endpoint_uuid": {"uuid": "CLIENT" }} + ]}, + {"link_id": {"link_uuid": {"uuid": "IP2-TP2.3"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "IP2"}}, "endpoint_uuid": {"uuid": "PORT-xe3"}}, + {"device_id": {"device_uuid": {"uuid": "TP2.3"}}, "endpoint_uuid": {"uuid": "CLIENT" }} + ]}, + + {"link_id": {"link_uuid": {"uuid": "T1.1-IP1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "T1.1"}}, "endpoint_uuid": {"uuid": "CLIENT" }}, + {"device_id": {"device_uuid": {"uuid": "IP1"}}, "endpoint_uuid": {"uuid": "PORT-xe1"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "T1.2-IP1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "T1.2"}}, "endpoint_uuid": {"uuid": "CLIENT" }}, + {"device_id": {"device_uuid": {"uuid": "IP1"}}, "endpoint_uuid": {"uuid": "PORT-xe2"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "T1.3-IP1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "T1.3"}}, "endpoint_uuid": {"uuid": "CLIENT" }}, + {"device_id": {"device_uuid": {"uuid": "IP1"}}, "endpoint_uuid": {"uuid": "PORT-xe3"}} + ]}, + + {"link_id": {"link_uuid": {"uuid": "TP2.1-IP2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "TP2.1"}}, "endpoint_uuid": {"uuid": "CLIENT" }}, + {"device_id": {"device_uuid": {"uuid": "IP2"}}, "endpoint_uuid": {"uuid": "PORT-xe1"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "TP2.2-IP2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "TP2.2"}}, "endpoint_uuid": {"uuid": "CLIENT" }}, + {"device_id": {"device_uuid": {"uuid": "IP2"}}, "endpoint_uuid": {"uuid": "PORT-xe2"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "TP2.3-IP2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "TP2.3"}}, "endpoint_uuid": {"uuid": "CLIENT" }}, + {"device_id": {"device_uuid": {"uuid": "IP2"}}, "endpoint_uuid": {"uuid": "PORT-xe3"}} + ]} + ] +} diff --git a/src/tests/ofc25/descriptors/old/topology_opt.json b/src/tests/ofc25/descriptors/old/topology_opt.json new file mode 100644 index 0000000000000000000000000000000000000000..dffa68ed2766497cf727164fd3d1f602a306f870 --- /dev/null +++ b/src/tests/ofc25/descriptors/old/topology_opt.json @@ -0,0 +1,192 @@ +{ + "contexts": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}} + ], + "topologies": [ + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}} + ], + "devices": [ + { + "device_id": {"device_uuid": {"uuid": "TP1.1"}}, "device_type": "emu-optical-transponder", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_operational_status": "DEVICEOPERATIONALSTATUS_UNDEFINED", + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "CLIENT", "type": "copper" }, + {"uuid": "LINE", "type": "optical/channel"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "TP1.2"}}, "device_type": "emu-optical-transponder", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_operational_status": "DEVICEOPERATIONALSTATUS_UNDEFINED", + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "CLIENT", "type": "copper" }, + {"uuid": "LINE", "type": "optical/channel"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "TP1.3"}}, "device_type": "emu-optical-transponder", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_operational_status": "DEVICEOPERATIONALSTATUS_UNDEFINED", + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "CLIENT", "type": "copper" }, + {"uuid": "LINE", "type": "optical/channel"} + ]}}} + ]} + }, + + { + "device_id": {"device_uuid": {"uuid": "MG-ON1"}}, "device_type": "emu-optical-roadm", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_operational_status": "DEVICEOPERATIONALSTATUS_UNDEFINED", + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "ADP1", "type": "optical/add-drop"}, + {"uuid": "ADP2", "type": "optical/add-drop"}, + {"uuid": "ADP3", "type": "optical/add-drop"}, + {"uuid": "LINE2", "type": "optical/line" } + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "MG-ON2"}}, "device_type": "emu-optical-roadm", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_operational_status": "DEVICEOPERATIONALSTATUS_UNDEFINED", + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "LINE1", "type": "optical/line"}, + {"uuid": "LINE3", "type": "optical/line"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "MG-ON3"}}, "device_type": "emu-optical-roadm", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_operational_status": "DEVICEOPERATIONALSTATUS_UNDEFINED", + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "ADP1", "type": "optical/add-drop"}, + {"uuid": "ADP2", "type": "optical/add-drop"}, + {"uuid": "ADP3", "type": "optical/add-drop"}, + {"uuid": "LINE2", "type": "optical/line" } + ]}}} + ]} + }, + + { + "device_id": {"device_uuid": {"uuid": "TP2.1"}}, "device_type": "emu-optical-transponder", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_operational_status": "DEVICEOPERATIONALSTATUS_UNDEFINED", + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "CLIENT", "type": "copper" }, + {"uuid": "LINE", "type": "optical/channel"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "TP2.2"}}, "device_type": "emu-optical-transponder", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_operational_status": "DEVICEOPERATIONALSTATUS_UNDEFINED", + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "CLIENT", "type": "copper" }, + {"uuid": "LINE", "type": "optical/channel"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "TP2.3"}}, "device_type": "emu-optical-transponder", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_operational_status": "DEVICEOPERATIONALSTATUS_UNDEFINED", + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "CLIENT", "type": "copper" }, + {"uuid": "LINE", "type": "optical/channel"} + ]}}} + ]} + } + ], + "links": [ + {"link_id": {"link_uuid": {"uuid": "MG-ON1->MG-ON2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MG-ON1"}}, "endpoint_uuid": {"uuid": "LINE2"}}, + {"device_id": {"device_uuid": {"uuid": "MG-ON2"}}, "endpoint_uuid": {"uuid": "LINE1"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "MG-ON2->MG-ON1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MG-ON2"}}, "endpoint_uuid": {"uuid": "LINE1"}}, + {"device_id": {"device_uuid": {"uuid": "MG-ON1"}}, "endpoint_uuid": {"uuid": "LINE2"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "MG-ON2->MG-ON3"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MG-ON2"}}, "endpoint_uuid": {"uuid": "LINE3"}}, + {"device_id": {"device_uuid": {"uuid": "MG-ON3"}}, "endpoint_uuid": {"uuid": "LINE2"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "MG-ON3->MG-ON2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MG-ON3"}}, "endpoint_uuid": {"uuid": "LINE2"}}, + {"device_id": {"device_uuid": {"uuid": "MG-ON2"}}, "endpoint_uuid": {"uuid": "LINE3"}} + ]}, + + {"link_id": {"link_uuid": {"uuid": "TP1.1->MG-ON1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "TP1.1" }}, "endpoint_uuid": {"uuid": "LINE"}}, + {"device_id": {"device_uuid": {"uuid": "MG-ON1"}}, "endpoint_uuid": {"uuid": "ADP1"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "TP1.2->MG-ON1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "TP1.2" }}, "endpoint_uuid": {"uuid": "LINE"}}, + {"device_id": {"device_uuid": {"uuid": "MG-ON1"}}, "endpoint_uuid": {"uuid": "ADP2"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "TP1.3->MG-ON1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "TP1.3" }}, "endpoint_uuid": {"uuid": "LINE"}}, + {"device_id": {"device_uuid": {"uuid": "MG-ON1"}}, "endpoint_uuid": {"uuid": "ADP3"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "TP2.1->MG-ON3"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "TP2.1" }}, "endpoint_uuid": {"uuid": "LINE"}}, + {"device_id": {"device_uuid": {"uuid": "MG-ON3"}}, "endpoint_uuid": {"uuid": "ADP1"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "TP2.2->MG-ON3"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "TP2.2" }}, "endpoint_uuid": {"uuid": "LINE"}}, + {"device_id": {"device_uuid": {"uuid": "MG-ON3"}}, "endpoint_uuid": {"uuid": "ADP2"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "TP2.3->MG-ON3"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "TP2.3" }}, "endpoint_uuid": {"uuid": "LINE"}}, + {"device_id": {"device_uuid": {"uuid": "MG-ON3"}}, "endpoint_uuid": {"uuid": "ADP3"}} + ]}, + + {"link_id": {"link_uuid": {"uuid": "MG-ON1->TP1.1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MG-ON1"}}, "endpoint_uuid": {"uuid": "ADP1"}}, + {"device_id": {"device_uuid": {"uuid": "TP1.1" }}, "endpoint_uuid": {"uuid": "LINE"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "MG-ON1->TP1.2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MG-ON1"}}, "endpoint_uuid": {"uuid": "ADP2"}}, + {"device_id": {"device_uuid": {"uuid": "TP1.2" }}, "endpoint_uuid": {"uuid": "LINE"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "MG-ON1->TP1.3"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MG-ON1"}}, "endpoint_uuid": {"uuid": "ADP3"}}, + {"device_id": {"device_uuid": {"uuid": "TP1.3" }}, "endpoint_uuid": {"uuid": "LINE"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "MG-ON3->TP2.1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MG-ON3"}}, "endpoint_uuid": {"uuid": "ADP1"}}, + {"device_id": {"device_uuid": {"uuid": "TP2.1" }}, "endpoint_uuid": {"uuid": "LINE"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "MG-ON3->TP2.2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MG-ON3"}}, "endpoint_uuid": {"uuid": "ADP2"}}, + {"device_id": {"device_uuid": {"uuid": "TP2.2" }}, "endpoint_uuid": {"uuid": "LINE"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "MG-ON3->TP2.3"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MG-ON3"}}, "endpoint_uuid": {"uuid": "ADP3"}}, + {"device_id": {"device_uuid": {"uuid": "TP2.3" }}, "endpoint_uuid": {"uuid": "LINE"}} + ]} + ] +} diff --git a/src/tests/ofc25/descriptors/topology_e2e.json b/src/tests/ofc25/descriptors/topology_e2e.json new file mode 100644 index 0000000000000000000000000000000000000000..4d721861f67e8bbb1a073f6c9f9ac597d6c82aa5 --- /dev/null +++ b/src/tests/ofc25/descriptors/topology_e2e.json @@ -0,0 +1,85 @@ +{ + "contexts": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}} + ], + "topologies": [ + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}} + ], + "devices": [ + { + "device_id": {"device_uuid": {"uuid": "TFS-PACKET"}}, "device_type": "teraflowsdn", + "device_drivers": ["DEVICEDRIVER_IETF_L3VPN"], "device_operational_status": "DEVICEOPERATIONALSTATUS_UNDEFINED", + "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.10"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "8002"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": { + "scheme": "http", "username": "admin", "password": "admin", "import_topology": "topology" + }}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "TFS-OPTICAL"}}, "device_type": "teraflowsdn", + "device_drivers": ["DEVICEDRIVER_OPTICAL_TFS"], "device_operational_status": "DEVICEOPERATIONALSTATUS_UNDEFINED", + "device_config": {"config_rules": [ + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.10"}}, + {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "8003"}}, + {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": { + "scheme": "http", "username": "admin", "password": "admin", "import_topology": "topology" + }}} + ]} + } + ], + "links": [ + {"link_id": {"link_uuid": {"uuid": "IP1-T1.1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "IP1" }}, "endpoint_uuid": {"uuid": "PORT-xe1"}}, + {"device_id": {"device_uuid": {"uuid": "T1.1"}}, "endpoint_uuid": {"uuid": "CLIENT" }} + ]}, + {"link_id": {"link_uuid": {"uuid": "IP1-T1.2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "IP1" }}, "endpoint_uuid": {"uuid": "PORT-xe2"}}, + {"device_id": {"device_uuid": {"uuid": "T1.2"}}, "endpoint_uuid": {"uuid": "CLIENT" }} + ]}, + {"link_id": {"link_uuid": {"uuid": "IP1-T1.3"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "IP1" }}, "endpoint_uuid": {"uuid": "PORT-xe3"}}, + {"device_id": {"device_uuid": {"uuid": "T1.3"}}, "endpoint_uuid": {"uuid": "CLIENT" }} + ]}, + + {"link_id": {"link_uuid": {"uuid": "IP2-T2.1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "IP2" }}, "endpoint_uuid": {"uuid": "PORT-xe1"}}, + {"device_id": {"device_uuid": {"uuid": "T2.1"}}, "endpoint_uuid": {"uuid": "CLIENT" }} + ]}, + {"link_id": {"link_uuid": {"uuid": "IP2-T2.2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "IP2" }}, "endpoint_uuid": {"uuid": "PORT-xe2"}}, + {"device_id": {"device_uuid": {"uuid": "T2.2"}}, "endpoint_uuid": {"uuid": "CLIENT" }} + ]}, + {"link_id": {"link_uuid": {"uuid": "IP2-T2.3"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "IP2" }}, "endpoint_uuid": {"uuid": "PORT-xe3"}}, + {"device_id": {"device_uuid": {"uuid": "T2.3"}}, "endpoint_uuid": {"uuid": "CLIENT" }} + ]}, + + {"link_id": {"link_uuid": {"uuid": "T1.1-IP1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "T1.1"}}, "endpoint_uuid": {"uuid": "CLIENT" }}, + {"device_id": {"device_uuid": {"uuid": "IP1" }}, "endpoint_uuid": {"uuid": "PORT-xe1"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "T1.2-IP1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "T1.2"}}, "endpoint_uuid": {"uuid": "CLIENT" }}, + {"device_id": {"device_uuid": {"uuid": "IP1" }}, "endpoint_uuid": {"uuid": "PORT-xe2"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "T1.3-IP1"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "T1.3"}}, "endpoint_uuid": {"uuid": "CLIENT" }}, + {"device_id": {"device_uuid": {"uuid": "IP1" }}, "endpoint_uuid": {"uuid": "PORT-xe3"}} + ]}, + + {"link_id": {"link_uuid": {"uuid": "T2.1-IP2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "T2.1"}}, "endpoint_uuid": {"uuid": "CLIENT" }}, + {"device_id": {"device_uuid": {"uuid": "IP2" }}, "endpoint_uuid": {"uuid": "PORT-xe1"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "T2.2-IP2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "T2.2"}}, "endpoint_uuid": {"uuid": "CLIENT" }}, + {"device_id": {"device_uuid": {"uuid": "IP2" }}, "endpoint_uuid": {"uuid": "PORT-xe2"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "T2.3-IP2"}}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "T2.3"}}, "endpoint_uuid": {"uuid": "CLIENT" }}, + {"device_id": {"device_uuid": {"uuid": "IP2" }}, "endpoint_uuid": {"uuid": "PORT-xe3"}} + ]} + ] +} diff --git a/src/tests/ofc25/descriptors/topology_ip.json b/src/tests/ofc25/descriptors/topology_ip.json new file mode 100644 index 0000000000000000000000000000000000000000..b75aeb7b1049c3a840b4e2bf9841dea9a55bd059 --- /dev/null +++ b/src/tests/ofc25/descriptors/topology_ip.json @@ -0,0 +1,36 @@ +{ + "contexts": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}} + ], + "topologies": [ + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}} + ], + "devices": [ + { + "device_id": {"device_uuid": {"uuid": "IP1"}}, "device_type": "emu-packet-router", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_operational_status": "DEVICEOPERATIONALSTATUS_UNDEFINED", + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "PORT-xe1", "type": "copper"}, + {"uuid": "PORT-xe2", "type": "copper"}, + {"uuid": "PORT-xe3", "type": "copper"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "IP2"}}, "device_type": "emu-packet-router", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_operational_status": "DEVICEOPERATIONALSTATUS_UNDEFINED", + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "PORT-xe1", "type": "copper"}, + {"uuid": "PORT-xe2", "type": "copper"}, + {"uuid": "PORT-xe3", "type": "copper"} + ]}}} + ]} + } + ] +} diff --git a/src/tests/ofc25/descriptors/topology_opt.json b/src/tests/ofc25/descriptors/topology_opt.json new file mode 100644 index 0000000000000000000000000000000000000000..6b6d5f2602121827aa723c2ab42cf03878e25862 --- /dev/null +++ b/src/tests/ofc25/descriptors/topology_opt.json @@ -0,0 +1,552 @@ +{ + "contexts": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}} + ], + "topologies": [ + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}} + ], + "devices": [ + { + "device_id": {"device_uuid": {"uuid": "T1.1"}}, "device_type": "emu-optical-transponder", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_operational_status": "DEVICEOPERATIONALSTATUS_UNDEFINED", + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "CLIENT", "type": "copper" }, + {"uuid": "1", "type": "optical/channel"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "T1.2"}}, "device_type": "emu-optical-transponder", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_operational_status": "DEVICEOPERATIONALSTATUS_UNDEFINED", + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "CLIENT", "type": "copper" }, + {"uuid": "2", "type": "optical/channel"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "T1.3"}}, "device_type": "emu-optical-transponder", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_operational_status": "DEVICEOPERATIONALSTATUS_UNDEFINED", + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "CLIENT", "type": "copper" }, + {"uuid": "3", "type": "optical/channel"} + ]}}} + ]} + }, + + { + "device_id": {"device_uuid": {"uuid": "MGON1"}}, "device_type": "emu-optical-roadm", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_operational_status": "DEVICEOPERATIONALSTATUS_UNDEFINED", + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "port-33-in", "type": "optical/client-add" }, + {"uuid": "port-33-out", "type": "optical/client-drop"}, + {"uuid": "port-34-in", "type": "optical/client-add" }, + {"uuid": "port-34-out", "type": "optical/client-drop"}, + {"uuid": "port-35-in", "type": "optical/client-add" }, + {"uuid": "port-35-out", "type": "optical/client-drop"}, + {"uuid": "port-9-in", "type": "optical/line-in" }, + {"uuid": "port-9-out", "type": "optical/line-out" } + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "MGON2"}}, "device_type": "emu-optical-roadm", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_operational_status": "DEVICEOPERATIONALSTATUS_UNDEFINED", + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "port-1-in", "type": "optical/line-in" }, + {"uuid": "port-1-out", "type": "optical/line-out" }, + {"uuid": "port-9-in", "type": "optical/line-in" }, + {"uuid": "port-9-out", "type": "optical/line-out" } + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "MGON3"}}, "device_type": "emu-optical-roadm", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_operational_status": "DEVICEOPERATIONALSTATUS_UNDEFINED", + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "port-33-in", "type": "optical/client-add" }, + {"uuid": "port-33-out", "type": "optical/client-drop"}, + {"uuid": "port-34-in", "type": "optical/client-add" }, + {"uuid": "port-34-out", "type": "optical/client-drop"}, + {"uuid": "port-35-in", "type": "optical/client-add" }, + {"uuid": "port-35-out", "type": "optical/client-drop"}, + {"uuid": "port-1-in", "type": "optical/line-in" }, + {"uuid": "port-1-out", "type": "optical/line-out" } + ]}}} + ]} + }, + + { + "device_id": {"device_uuid": {"uuid": "T2.1"}}, "device_type": "emu-optical-transponder", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_operational_status": "DEVICEOPERATIONALSTATUS_UNDEFINED", + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "CLIENT", "type": "copper" }, + {"uuid": "1", "type": "optical/channel"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "T2.2"}}, "device_type": "emu-optical-transponder", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_operational_status": "DEVICEOPERATIONALSTATUS_UNDEFINED", + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "CLIENT", "type": "copper" }, + {"uuid": "2", "type": "optical/channel"} + ]}}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "T2.3"}}, "device_type": "emu-optical-transponder", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], "device_operational_status": "DEVICEOPERATIONALSTATUS_UNDEFINED", + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "CLIENT", "type": "copper" }, + {"uuid": "3", "type": "optical/channel"} + ]}}} + ]} + } + ], + "optical_links": [ + { + "name": "T1.1-MGON1", "link_id": {"link_uuid": {"uuid": "T1.1->MGON1"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "T1.1" }}, "endpoint_uuid": {"uuid": "1" }}, + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-33-in"}} + ], + "optical_details": { + "length": 0, "src_port": "1", "dst_port": "port-33-in", "local_peer_port": "1", "remote_peer_port": "port-33-out", "used": false, + "c_slots": { + "1": 1, "2": 1, "3": 1, "4": 1, "5": 1, "6": 1, "7": 1, "8": 1, "9": 1, "10": 1, + "11": 1, "12": 1, "13": 1, "14": 1, "15": 1, "16": 1, "17": 1, "18": 1, "19": 1, "20": 1, + "21": 1, "22": 1, "23": 1, "24": 1, "25": 1, "26": 1, "27": 1, "28": 1, "29": 1, "30": 1, + "31": 1, "32": 1, "33": 1, "34": 1, "35": 1, "36": 1, "37": 1, "38": 1, "39": 1, "40": 1, + "41": 1, "42": 1, "43": 1, "44": 1, "45": 1, "46": 1, "47": 1, "48": 1, "49": 1, "50": 1, + "51": 1, "52": 1, "53": 1, "54": 1, "55": 1, "56": 1, "57": 1, "58": 1, "59": 1, "60": 1 + }, + "l_slots": { + "101": 1, "102": 1, "103": 1, "104": 1, "105": 1, "106": 1, "107": 1, "108": 1, "109": 1, "110": 1, + "111": 1, "112": 1, "113": 1, "114": 1, "115": 1, "116": 1, "117": 1, "118": 1, "119": 1, "120": 1 + }, + "s_slots": { + "501": 1, "502": 1, "503": 1, "504": 1, "505": 1, "506": 1, "507": 1, "508": 1, "509": 1, "510": 1, + "511": 1, "512": 1, "513": 1, "514": 1, "515": 1, "516": 1, "517": 1, "518": 1, "519": 1, "520": 1 + } + } + }, + { + "name": "T1.2-MGON1", "link_id": {"link_uuid": {"uuid": "T1.2->MGON1"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "T1.2"}}, "endpoint_uuid": {"uuid": "2"}}, + {"device_id": {"device_uuid": {"uuid": "MGON1"}},"endpoint_uuid": {"uuid": "port-34-in"}} + ], + "optical_details": { + "length": 0, "src_port": "2", "dst_port": "port-34-in", "local_peer_port": "2", "remote_peer_port": "port-34-out", "used": false, + "c_slots": { + "1": 1, "2": 1, "3": 1, "4": 1, "5": 1, "6": 1, "7": 1, "8": 1, "9": 1, "10": 1, + "11": 1, "12": 1, "13": 1, "14": 1, "15": 1, "16": 1, "17": 1, "18": 1, "19": 1, "20": 1, + "21": 1, "22": 1, "23": 1, "24": 1, "25": 1, "26": 1, "27": 1, "28": 1, "29": 1, "30": 1, + "31": 1, "32": 1, "33": 1, "34": 1, "35": 1, "36": 1, "37": 1, "38": 1, "39": 1, "40": 1, + "41": 1, "42": 1, "43": 1, "44": 1, "45": 1, "46": 1, "47": 1, "48": 1, "49": 1, "50": 1, + "51": 1, "52": 1, "53": 1, "54": 1, "55": 1, "56": 1, "57": 1, "58": 1, "59": 1, "60": 1 + }, + "l_slots": { + "101": 1, "102": 1, "103": 1, "104": 1, "105": 1, "106": 1, "107": 1, "108": 1, "109": 1, "110": 1, + "111": 1, "112": 1, "113": 1, "114": 1, "115": 1, "116": 1, "117": 1, "118": 1, "119": 1, "120": 1 + }, + "s_slots": { + "501": 1, "502": 1, "503": 1, "504": 1, "505": 1, "506": 1, "507": 1, "508": 1, "509": 1, "510": 1, + "511": 1, "512": 1, "513": 1, "514": 1, "515": 1, "516": 1, "517": 1, "518": 1, "519": 1, "520": 1 + } + } + }, + { + "name": "T1.3-MGON1", "link_id": {"link_uuid": {"uuid": "T1.3->MGON1"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "T1.3"}}, "endpoint_uuid": {"uuid": "3"}}, + {"device_id": {"device_uuid": {"uuid": "MGON1"}},"endpoint_uuid": {"uuid": "port-35-in"}} + ], + "optical_details": { + "length": 0, "src_port": "3", "dst_port": "port-35-in", "local_peer_port": "3", "remote_peer_port": "port-35-out", "used": false, + "c_slots": { + "1": 1, "2": 1, "3": 1, "4": 1, "5": 1, "6": 1, "7": 1, "8": 1, "9": 1, "10": 1, + "11": 1, "12": 1, "13": 1, "14": 1, "15": 1, "16": 1, "17": 1, "18": 1, "19": 1, "20": 1, + "21": 1, "22": 1, "23": 1, "24": 1, "25": 1, "26": 1, "27": 1, "28": 1, "29": 1, "30": 1, + "31": 1, "32": 1, "33": 1, "34": 1, "35": 1, "36": 1, "37": 1, "38": 1, "39": 1, "40": 1, + "41": 1, "42": 1, "43": 1, "44": 1, "45": 1, "46": 1, "47": 1, "48": 1, "49": 1, "50": 1, + "51": 1, "52": 1, "53": 1, "54": 1, "55": 1, "56": 1, "57": 1, "58": 1, "59": 1, "60": 1 + }, + "l_slots": { + "101": 1, "102": 1, "103": 1, "104": 1, "105": 1, "106": 1, "107": 1, "108": 1, "109": 1, "110": 1, + "111": 1, "112": 1, "113": 1, "114": 1, "115": 1, "116": 1, "117": 1, "118": 1, "119": 1, "120": 1 + }, + "s_slots": { + "501": 1, "502": 1, "503": 1, "504": 1, "505": 1, "506": 1, "507": 1, "508": 1, "509": 1, "510": 1, + "511": 1, "512": 1, "513": 1, "514": 1, "515": 1, "516": 1, "517": 1, "518": 1, "519": 1, "520": 1 + } + } + }, + { + "name": "MGON1-T1.1", "link_id": {"link_uuid": {"uuid": "MGON1->T1.1"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-33-out"}}, + {"device_id": {"device_uuid": {"uuid": "T1.1"}},"endpoint_uuid": {"uuid": "1"}} + ], + "optical_details": { + "length": 0, "src_port": "port-33-out", "dst_port": "1", "local_peer_port": "port-33-in", "remote_peer_port": "1", "used": false, + "c_slots": { + "1": 1, "2": 1, "3": 1, "4": 1, "5": 1, "6": 1, "7": 1, "8": 1, "9": 1, "10": 1, + "11": 1, "12": 1, "13": 1, "14": 1, "15": 1, "16": 1, "17": 1, "18": 1, "19": 1, "20": 1, + "21": 1, "22": 1, "23": 1, "24": 1, "25": 1, "26": 1, "27": 1, "28": 1, "29": 1, "30": 1, + "31": 1, "32": 1, "33": 1, "34": 1, "35": 1, "36": 1, "37": 1, "38": 1, "39": 1, "40": 1, + "41": 1, "42": 1, "43": 1, "44": 1, "45": 1, "46": 1, "47": 1, "48": 1, "49": 1, "50": 1, + "51": 1, "52": 1, "53": 1, "54": 1, "55": 1, "56": 1, "57": 1, "58": 1, "59": 1, "60": 1 + }, + "l_slots": { + "101": 1, "102": 1, "103": 1, "104": 1, "105": 1, "106": 1, "107": 1, "108": 1, "109": 1, "110": 1, + "111": 1, "112": 1, "113": 1, "114": 1, "115": 1, "116": 1, "117": 1, "118": 1, "119": 1, "120": 1 + }, + "s_slots": { + "501": 1, "502": 1, "503": 1, "504": 1, "505": 1, "506": 1, "507": 1, "508": 1, "509": 1, "510": 1, + "511": 1, "512": 1, "513": 1, "514": 1, "515": 1, "516": 1, "517": 1, "518": 1, "519": 1, "520": 1 + } + } + }, + { + "name": "MGON1-T1.2", "link_id": {"link_uuid": {"uuid": "MGON1->T1.2"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-34-out"}}, + {"device_id": {"device_uuid": {"uuid": "T1.2"}},"endpoint_uuid": {"uuid": "2"}} + ], + "optical_details": { + "length": 0, "src_port": "port-34-out", "dst_port": "2", "local_peer_port": "port-34-in", "remote_peer_port": "2", "used": false, + "c_slots": { + "1": 1, "2": 1, "3": 1, "4": 1, "5": 1, "6": 1, "7": 1, "8": 1, "9": 1, "10": 1, + "11": 1, "12": 1, "13": 1, "14": 1, "15": 1, "16": 1, "17": 1, "18": 1, "19": 1, "20": 1, + "21": 1, "22": 1, "23": 1, "24": 1, "25": 1, "26": 1, "27": 1, "28": 1, "29": 1, "30": 1, + "31": 1, "32": 1, "33": 1, "34": 1, "35": 1, "36": 1, "37": 1, "38": 1, "39": 1, "40": 1, + "41": 1, "42": 1, "43": 1, "44": 1, "45": 1, "46": 1, "47": 1, "48": 1, "49": 1, "50": 1, + "51": 1, "52": 1, "53": 1, "54": 1, "55": 1, "56": 1, "57": 1, "58": 1, "59": 1, "60": 1 + }, + "l_slots": { + "101": 1, "102": 1, "103": 1, "104": 1, "105": 1, "106": 1, "107": 1, "108": 1, "109": 1, "110": 1, + "111": 1, "112": 1, "113": 1, "114": 1, "115": 1, "116": 1, "117": 1, "118": 1, "119": 1, "120": 1 + }, + "s_slots": { + "501": 1, "502": 1, "503": 1, "504": 1, "505": 1, "506": 1, "507": 1, "508": 1, "509": 1, "510": 1, + "511": 1, "512": 1, "513": 1, "514": 1, "515": 1, "516": 1, "517": 1, "518": 1, "519": 1, "520": 1 + } + } + }, + { + "name": "MGON1-T1.3", "link_id": {"link_uuid": {"uuid": "MGON1->T1.3"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-35-out"}}, + {"device_id": {"device_uuid": {"uuid": "T1.3"}},"endpoint_uuid": {"uuid": "3"}} + ], + "optical_details": { + "length": 0, "src_port": "port-35-out", "dst_port": "3", "local_peer_port": "port-35-in", "remote_peer_port": "3", "used": false, + "c_slots": { + "1": 1, "2": 1, "3": 1, "4": 1, "5": 1, "6": 1, "7": 1, "8": 1, "9": 1, "10": 1, + "11": 1, "12": 1, "13": 1, "14": 1, "15": 1, "16": 1, "17": 1, "18": 1, "19": 1, "20": 1, + "21": 1, "22": 1, "23": 1, "24": 1, "25": 1, "26": 1, "27": 1, "28": 1, "29": 1, "30": 1, + "31": 1, "32": 1, "33": 1, "34": 1, "35": 1, "36": 1, "37": 1, "38": 1, "39": 1, "40": 1, + "41": 1, "42": 1, "43": 1, "44": 1, "45": 1, "46": 1, "47": 1, "48": 1, "49": 1, "50": 1, + "51": 1, "52": 1, "53": 1, "54": 1, "55": 1, "56": 1, "57": 1, "58": 1, "59": 1, "60": 1 + }, + "l_slots": { + "101": 1, "102": 1, "103": 1, "104": 1, "105": 1, "106": 1, "107": 1, "108": 1, "109": 1, "110": 1, + "111": 1, "112": 1, "113": 1, "114": 1, "115": 1, "116": 1, "117": 1, "118": 1, "119": 1, "120": 1 + }, + "s_slots": { + "501": 1, "502": 1, "503": 1, "504": 1, "505": 1, "506": 1, "507": 1, "508": 1, "509": 1, "510": 1, + "511": 1, "512": 1, "513": 1, "514": 1, "515": 1, "516": 1, "517": 1, "518": 1, "519": 1, "520": 1 + } + } + }, + { + "name": "MGON1-MGON2", "link_id": {"link_uuid": {"uuid": "MGON1->MGON2"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-9-out"}}, + {"device_id": {"device_uuid": {"uuid": "MGON2"}},"endpoint_uuid": {"uuid": "port-1-in"}} + ], + "optical_details": { + "length": 0, "src_port": "port-9-out", "dst_port": "port-1-in", "local_peer_port": "port-9-in", "remote_peer_port": "port-1-out", + "c_slots": { + "1": 1, "2": 1, "3": 1, "4": 1, "5": 1, "6": 1, "7": 1, "8": 1, "9": 1, "10": 1, + "11": 1, "12": 1, "13": 1, "14": 1, "15": 1, "16": 1, "17": 1, "18": 1, "19": 1, "20": 1, + "21": 1, "22": 1, "23": 1, "24": 1, "25": 1, "26": 1, "27": 1, "28": 1, "29": 1, "30": 1, + "31": 1, "32": 1, "33": 1, "34": 1, "35": 1, "36": 1, "37": 1, "38": 1, "39": 1, "40": 1, + "41": 1, "42": 1, "43": 1, "44": 1, "45": 1, "46": 1, "47": 1, "48": 1, "49": 1, "50": 1, + "51": 1, "52": 1, "53": 1, "54": 1, "55": 1, "56": 1, "57": 1, "58": 1, "59": 1, "60": 1 + }, + "l_slots": { + "101": 1, "102": 1, "103": 1, "104": 1, "105": 1, "106": 1, "107": 1, "108": 1, "109": 1, "110": 1, + "111": 1, "112": 1, "113": 1, "114": 1, "115": 1, "116": 1, "117": 1, "118": 1, "119": 1, "120": 1 + }, + "s_slots": { + "501": 1, "502": 1, "503": 1, "504": 1, "505": 1, "506": 1, "507": 1, "508": 1, "509": 1, "510": 1, + "511": 1, "512": 1, "513": 1, "514": 1, "515": 1, "516": 1, "517": 1, "518": 1, "519": 1, "520": 1 + } + } + }, + { + "name": "MGON2-MGON1", "link_id": {"link_uuid": {"uuid": "MGON2->MGON1"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON2"}}, "endpoint_uuid": {"uuid": "port-1-out"}}, + {"device_id": {"device_uuid": {"uuid": "MGON1"}},"endpoint_uuid": {"uuid": "port-9-in"}} + ], + "optical_details": { + "length": 0, "src_port": "port-1-out", "dst_port": "port-9-in", "local_peer_port": "port-1-in", "remote_peer_port": "port-9-out", + "c_slots": { + "1": 1, "2": 1, "3": 1, "4": 1, "5": 1, "6": 1, "7": 1, "8": 1, "9": 1, "10": 1, + "11": 1, "12": 1, "13": 1, "14": 1, "15": 1, "16": 1, "17": 1, "18": 1, "19": 1, "20": 1, + "21": 1, "22": 1, "23": 1, "24": 1, "25": 1, "26": 1, "27": 1, "28": 1, "29": 1, "30": 1, + "31": 1, "32": 1, "33": 1, "34": 1, "35": 1, "36": 1, "37": 1, "38": 1, "39": 1, "40": 1, + "41": 1, "42": 1, "43": 1, "44": 1, "45": 1, "46": 1, "47": 1, "48": 1, "49": 1, "50": 1, + "51": 1, "52": 1, "53": 1, "54": 1, "55": 1, "56": 1, "57": 1, "58": 1, "59": 1, "60": 1 + }, + "l_slots": { + "101": 1, "102": 1, "103": 1, "104": 1, "105": 1, "106": 1, "107": 1, "108": 1, "109": 1, "110": 1, + "111": 1, "112": 1, "113": 1, "114": 1, "115": 1, "116": 1, "117": 1, "118": 1, "119": 1, "120": 1 + }, + "s_slots": { + "501": 1, "502": 1, "503": 1, "504": 1, "505": 1, "506": 1, "507": 1, "508": 1, "509": 1, "510": 1, + "511": 1, "512": 1, "513": 1, "514": 1, "515": 1, "516": 1, "517": 1, "518": 1, "519": 1, "520": 1 + } + } + }, + { + "name": "MGON2-MGON3", "link_id": {"link_uuid": {"uuid": "MGON2->MGON3"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON2"}}, "endpoint_uuid": {"uuid": "port-9-out"}}, + {"device_id": {"device_uuid": {"uuid": "MGON3"}},"endpoint_uuid": {"uuid": "port-1-in"}} + ], + "optical_details": { + "length": 0, "src_port": "port-9-out", "dst_port": "port-1-in", "local_peer_port": "port-9-in", "remote_peer_port": "port-1-out", + "c_slots": { + "1": 1, "2": 1, "3": 1, "4": 1, "5": 1, "6": 1, "7": 1, "8": 1, "9": 1, "10": 1, + "11": 1, "12": 1, "13": 1, "14": 1, "15": 1, "16": 1, "17": 1, "18": 1, "19": 1, "20": 1, + "21": 1, "22": 1, "23": 1, "24": 1, "25": 1, "26": 1, "27": 1, "28": 1, "29": 1, "30": 1, + "31": 1, "32": 1, "33": 1, "34": 1, "35": 1, "36": 1, "37": 1, "38": 1, "39": 1, "40": 1, + "41": 1, "42": 1, "43": 1, "44": 1, "45": 1, "46": 1, "47": 1, "48": 1, "49": 1, "50": 1, + "51": 1, "52": 1, "53": 1, "54": 1, "55": 1, "56": 1, "57": 1, "58": 1, "59": 1, "60": 1 + }, + "l_slots": { + "101": 1, "102": 1, "103": 1, "104": 1, "105": 1, "106": 1, "107": 1, "108": 1, "109": 1, "110": 1, + "111": 1, "112": 1, "113": 1, "114": 1, "115": 1, "116": 1, "117": 1, "118": 1, "119": 1, "120": 1 + }, + "s_slots": { + "501": 1, "502": 1, "503": 1, "504": 1, "505": 1, "506": 1, "507": 1, "508": 1, "509": 1, "510": 1, + "511": 1, "512": 1, "513": 1, "514": 1, "515": 1, "516": 1, "517": 1, "518": 1, "519": 1, "520": 1 + } + } + }, + { + "name": "MGON3-MGON2", "link_id": {"link_uuid": {"uuid": "MGON3->MGON2"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON3"}}, "endpoint_uuid": {"uuid": "port-1-out"}}, + {"device_id": {"device_uuid": {"uuid": "MGON2"}},"endpoint_uuid": {"uuid": "port-9-in"}} + ], + "optical_details": { + "length": 0, "src_port": "port-1-out", "dst_port": "port-9-in", "local_peer_port": "port-1-in", "remote_peer_port": "port-9-out", + "c_slots": { + "1": 1, "2": 1, "3": 1, "4": 1, "5": 1, "6": 1, "7": 1, "8": 1, "9": 1, "10": 1, + "11": 1, "12": 1, "13": 1, "14": 1, "15": 1, "16": 1, "17": 1, "18": 1, "19": 1, "20": 1, + "21": 1, "22": 1, "23": 1, "24": 1, "25": 1, "26": 1, "27": 1, "28": 1, "29": 1, "30": 1, + "31": 1, "32": 1, "33": 1, "34": 1, "35": 1, "36": 1, "37": 1, "38": 1, "39": 1, "40": 1, + "41": 1, "42": 1, "43": 1, "44": 1, "45": 1, "46": 1, "47": 1, "48": 1, "49": 1, "50": 1, + "51": 1, "52": 1, "53": 1, "54": 1, "55": 1, "56": 1, "57": 1, "58": 1, "59": 1, "60": 1 + }, + "l_slots": { + "101": 1, "102": 1, "103": 1, "104": 1, "105": 1, "106": 1, "107": 1, "108": 1, "109": 1, "110": 1, + "111": 1, "112": 1, "113": 1, "114": 1, "115": 1, "116": 1, "117": 1, "118": 1, "119": 1, "120": 1 + }, + "s_slots": { + "501": 1, "502": 1, "503": 1, "504": 1, "505": 1, "506": 1, "507": 1, "508": 1, "509": 1, "510": 1, + "511": 1, "512": 1, "513": 1, "514": 1, "515": 1, "516": 1, "517": 1, "518": 1, "519": 1, "520": 1 + } + } + }, + { + "name": "T2.1-MGON3", "link_id": {"link_uuid": {"uuid": "T2.1->MGON3"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "T2.1"}}, "endpoint_uuid": {"uuid": "1"}}, + {"device_id": {"device_uuid": {"uuid": "MGON3"}},"endpoint_uuid": {"uuid": "port-33-in"}} + ], + "optical_details": { + "length": 0, "src_port": "1", "dst_port": "port-33-in", "local_peer_port": "1", "remote_peer_port": "port-33-out", "used": false, + "c_slots": { + "1": 1, "2": 1, "3": 1, "4": 1, "5": 1, "6": 1, "7": 1, "8": 1, "9": 1, "10": 1, + "11": 1, "12": 1, "13": 1, "14": 1, "15": 1, "16": 1, "17": 1, "18": 1, "19": 1, "20": 1, + "21": 1, "22": 1, "23": 1, "24": 1, "25": 1, "26": 1, "27": 1, "28": 1, "29": 1, "30": 1, + "31": 1, "32": 1, "33": 1, "34": 1, "35": 1, "36": 1, "37": 1, "38": 1, "39": 1, "40": 1, + "41": 1, "42": 1, "43": 1, "44": 1, "45": 1, "46": 1, "47": 1, "48": 1, "49": 1, "50": 1, + "51": 1, "52": 1, "53": 1, "54": 1, "55": 1, "56": 1, "57": 1, "58": 1, "59": 1, "60": 1 + }, + "l_slots": { + "101": 1, "102": 1, "103": 1, "104": 1, "105": 1, "106": 1, "107": 1, "108": 1, "109": 1, "110": 1, + "111": 1, "112": 1, "113": 1, "114": 1, "115": 1, "116": 1, "117": 1, "118": 1, "119": 1, "120": 1 + }, + "s_slots": { + "501": 1, "502": 1, "503": 1, "504": 1, "505": 1, "506": 1, "507": 1, "508": 1, "509": 1, "510": 1, + "511": 1, "512": 1, "513": 1, "514": 1, "515": 1, "516": 1, "517": 1, "518": 1, "519": 1, "520": 1 + } + } + }, + { + "name": "T2.2-MGON3", "link_id": {"link_uuid": {"uuid": "T2.2->MGON3"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "T2.2"}}, "endpoint_uuid": {"uuid": "2"}}, + {"device_id": {"device_uuid": {"uuid": "MGON3"}},"endpoint_uuid": {"uuid": "port-34-in"}} + ], + "optical_details": { + "length": 0, "src_port": "2", "dst_port": "port-34-in", "local_peer_port": "2", "remote_peer_port": "port-34-out", "used": false, + "c_slots": { + "1": 1, "2": 1, "3": 1, "4": 1, "5": 1, "6": 1, "7": 1, "8": 1, "9": 1, "10": 1, + "11": 1, "12": 1, "13": 1, "14": 1, "15": 1, "16": 1, "17": 1, "18": 1, "19": 1, "20": 1, + "21": 1, "22": 1, "23": 1, "24": 1, "25": 1, "26": 1, "27": 1, "28": 1, "29": 1, "30": 1, + "31": 1, "32": 1, "33": 1, "34": 1, "35": 1, "36": 1, "37": 1, "38": 1, "39": 1, "40": 1, + "41": 1, "42": 1, "43": 1, "44": 1, "45": 1, "46": 1, "47": 1, "48": 1, "49": 1, "50": 1, + "51": 1, "52": 1, "53": 1, "54": 1, "55": 1, "56": 1, "57": 1, "58": 1, "59": 1, "60": 1 + }, + "l_slots": { + "101": 1, "102": 1, "103": 1, "104": 1, "105": 1, "106": 1, "107": 1, "108": 1, "109": 1, "110": 1, + "111": 1, "112": 1, "113": 1, "114": 1, "115": 1, "116": 1, "117": 1, "118": 1, "119": 1, "120": 1 + }, + "s_slots": { + "501": 1, "502": 1, "503": 1, "504": 1, "505": 1, "506": 1, "507": 1, "508": 1, "509": 1, "510": 1, + "511": 1, "512": 1, "513": 1, "514": 1, "515": 1, "516": 1, "517": 1, "518": 1, "519": 1, "520": 1 + } + } + }, + { + "name": "T2.3-MGON3", "link_id": {"link_uuid": {"uuid": "T2.3->MGON3"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "T2.3"}}, "endpoint_uuid": {"uuid": "3"}}, + {"device_id": {"device_uuid": {"uuid": "MGON3"}},"endpoint_uuid": {"uuid": "port-35-in"}} + ], + "optical_details": { + "length": 0, "src_port": "3", "dst_port": "port-35-in", "local_peer_port": "3", "remote_peer_port": "port-35-out", "used": false, + "c_slots": { + "1": 1, "2": 1, "3": 1, "4": 1, "5": 1, "6": 1, "7": 1, "8": 1, "9": 1, "10": 1, + "11": 1, "12": 1, "13": 1, "14": 1, "15": 1, "16": 1, "17": 1, "18": 1, "19": 1, "20": 1, + "21": 1, "22": 1, "23": 1, "24": 1, "25": 1, "26": 1, "27": 1, "28": 1, "29": 1, "30": 1, + "31": 1, "32": 1, "33": 1, "34": 1, "35": 1, "36": 1, "37": 1, "38": 1, "39": 1, "40": 1, + "41": 1, "42": 1, "43": 1, "44": 1, "45": 1, "46": 1, "47": 1, "48": 1, "49": 1, "50": 1, + "51": 1, "52": 1, "53": 1, "54": 1, "55": 1, "56": 1, "57": 1, "58": 1, "59": 1, "60": 1 + }, + "l_slots": { + "101": 1, "102": 1, "103": 1, "104": 1, "105": 1, "106": 1, "107": 1, "108": 1, "109": 1, "110": 1, + "111": 1, "112": 1, "113": 1, "114": 1, "115": 1, "116": 1, "117": 1, "118": 1, "119": 1, "120": 1 + }, + "s_slots": { + "501": 1, "502": 1, "503": 1, "504": 1, "505": 1, "506": 1, "507": 1, "508": 1, "509": 1, "510": 1, + "511": 1, "512": 1, "513": 1, "514": 1, "515": 1, "516": 1, "517": 1, "518": 1, "519": 1, "520": 1 + } + } + }, + { + "name": "MGON3-T2.1", "link_id": {"link_uuid": {"uuid": "MGON3->T2.1"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON3"}}, "endpoint_uuid": {"uuid": "port-33-out"}}, + {"device_id": {"device_uuid": {"uuid": "T2.1"}},"endpoint_uuid": {"uuid": "1"}} + ], + "optical_details": { + "length": 0, "src_port": "port-33-out", "dst_port": "1", "local_peer_port": "port-33-in", "remote_peer_port": "1", "used": false, + "c_slots": { + "1": 1, "2": 1, "3": 1, "4": 1, "5": 1, "6": 1, "7": 1, "8": 1, "9": 1, "10": 1, + "11": 1, "12": 1, "13": 1, "14": 1, "15": 1, "16": 1, "17": 1, "18": 1, "19": 1, "20": 1, + "21": 1, "22": 1, "23": 1, "24": 1, "25": 1, "26": 1, "27": 1, "28": 1, "29": 1, "30": 1, + "31": 1, "32": 1, "33": 1, "34": 1, "35": 1, "36": 1, "37": 1, "38": 1, "39": 1, "40": 1, + "41": 1, "42": 1, "43": 1, "44": 1, "45": 1, "46": 1, "47": 1, "48": 1, "49": 1, "50": 1, + "51": 1, "52": 1, "53": 1, "54": 1, "55": 1, "56": 1, "57": 1, "58": 1, "59": 1, "60": 1 + }, + "l_slots": { + "101": 1, "102": 1, "103": 1, "104": 1, "105": 1, "106": 1, "107": 1, "108": 1, "109": 1, "110": 1, + "111": 1, "112": 1, "113": 1, "114": 1, "115": 1, "116": 1, "117": 1, "118": 1, "119": 1, "120": 1 + }, + "s_slots": { + "501": 1, "502": 1, "503": 1, "504": 1, "505": 1, "506": 1, "507": 1, "508": 1, "509": 1, "510": 1, + "511": 1, "512": 1, "513": 1, "514": 1, "515": 1, "516": 1, "517": 1, "518": 1, "519": 1, "520": 1 + } + } + }, + { + "name": "MGON3-T2.2", "link_id": {"link_uuid": {"uuid": "MGON3->T2.2"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON3"}}, "endpoint_uuid": {"uuid": "port-34-out"}}, + {"device_id": {"device_uuid": {"uuid": "T2.2"}},"endpoint_uuid": {"uuid": "2"}} + ], + "optical_details": { + "length": 0, "src_port": "port-34-out", "dst_port": "2", "local_peer_port": "port-34-in", "remote_peer_port": "2", "used": false, + "c_slots": { + "1": 1, "2": 1, "3": 1, "4": 1, "5": 1, "6": 1, "7": 1, "8": 1, "9": 1, "10": 1, + "11": 1, "12": 1, "13": 1, "14": 1, "15": 1, "16": 1, "17": 1, "18": 1, "19": 1, "20": 1, + "21": 1, "22": 1, "23": 1, "24": 1, "25": 1, "26": 1, "27": 1, "28": 1, "29": 1, "30": 1, + "31": 1, "32": 1, "33": 1, "34": 1, "35": 1, "36": 1, "37": 1, "38": 1, "39": 1, "40": 1, + "41": 1, "42": 1, "43": 1, "44": 1, "45": 1, "46": 1, "47": 1, "48": 1, "49": 1, "50": 1, + "51": 1, "52": 1, "53": 1, "54": 1, "55": 1, "56": 1, "57": 1, "58": 1, "59": 1, "60": 1 + }, + "l_slots": { + "101": 1, "102": 1, "103": 1, "104": 1, "105": 1, "106": 1, "107": 1, "108": 1, "109": 1, "110": 1, + "111": 1, "112": 1, "113": 1, "114": 1, "115": 1, "116": 1, "117": 1, "118": 1, "119": 1, "120": 1 + }, + "s_slots": { + "501": 1, "502": 1, "503": 1, "504": 1, "505": 1, "506": 1, "507": 1, "508": 1, "509": 1, "510": 1, + "511": 1, "512": 1, "513": 1, "514": 1, "515": 1, "516": 1, "517": 1, "518": 1, "519": 1, "520": 1 + } + } + }, + { + "name": "MGON3-T2.3", "link_id": {"link_uuid": {"uuid": "MGON3->T2.3"}}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON3"}}, "endpoint_uuid": {"uuid": "port-35-out"}}, + {"device_id": {"device_uuid": {"uuid": "T2.3"}},"endpoint_uuid": {"uuid": "3"}} + ], + "optical_details": { + "length": 0, "src_port": "port-35-out", "dst_port": "3", "local_peer_port": "port-35-in", "remote_peer_port": "3", "used": false, + "c_slots": { + "1": 1, "2": 1, "3": 1, "4": 1, "5": 1, "6": 1, "7": 1, "8": 1, "9": 1, "10": 1, + "11": 1, "12": 1, "13": 1, "14": 1, "15": 1, "16": 1, "17": 1, "18": 1, "19": 1, "20": 1, + "21": 1, "22": 1, "23": 1, "24": 1, "25": 1, "26": 1, "27": 1, "28": 1, "29": 1, "30": 1, + "31": 1, "32": 1, "33": 1, "34": 1, "35": 1, "36": 1, "37": 1, "38": 1, "39": 1, "40": 1, + "41": 1, "42": 1, "43": 1, "44": 1, "45": 1, "46": 1, "47": 1, "48": 1, "49": 1, "50": 1, + "51": 1, "52": 1, "53": 1, "54": 1, "55": 1, "56": 1, "57": 1, "58": 1, "59": 1, "60": 1 + }, + "l_slots": { + "101": 1, "102": 1, "103": 1, "104": 1, "105": 1, "106": 1, "107": 1, "108": 1, "109": 1, "110": 1, + "111": 1, "112": 1, "113": 1, "114": 1, "115": 1, "116": 1, "117": 1, "118": 1, "119": 1, "120": 1 + }, + "s_slots": { + "501": 1, "502": 1, "503": 1, "504": 1, "505": 1, "506": 1, "507": 1, "508": 1, "509": 1, "510": 1, + "511": 1, "512": 1, "513": 1, "514": 1, "515": 1, "516": 1, "517": 1, "518": 1, "519": 1, "520": 1 + } + } + } + ] +} diff --git a/src/tests/ofc25/dump-logs.sh b/src/tests/ofc25/dump-logs.sh new file mode 100755 index 0000000000000000000000000000000000000000..234537fba3c99c493f6057d2edf29482a83d10af --- /dev/null +++ b/src/tests/ofc25/dump-logs.sh @@ -0,0 +1,55 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +rm logs -rf tmp/exec + +echo "Collecting logs for E2E..." +mkdir -p tmp/exec/e2e +kubectl logs --namespace tfs-e2e deployment/contextservice -c server > tmp/exec/e2e/context.log +kubectl logs --namespace tfs-e2e deployment/deviceservice -c server > tmp/exec/e2e/device.log +kubectl logs --namespace tfs-e2e deployment/serviceservice -c server > tmp/exec/e2e/service.log +kubectl logs --namespace tfs-e2e deployment/pathcompservice -c frontend > tmp/exec/e2e/pathcomp-frontend.log +kubectl logs --namespace tfs-e2e deployment/pathcompservice -c backend > tmp/exec/e2e/pathcomp-backend.log +kubectl logs --namespace tfs-e2e deployment/webuiservice -c server > tmp/exec/e2e/webui.log +kubectl logs --namespace tfs-e2e deployment/nbiservice -c server > tmp/exec/e2e/nbi.log +kubectl logs --namespace tfs-e2e deployment/e2e-orchestratorservice -c server > tmp/exec/e2e/e2eorch.log +printf logs "\n" + +echo "Collecting logs for IP..." +mkdir -p tmp/exec/ip +kubectl logs --namespace tfs-ip deployment/contextservice -c server > tmp/exec/ip/context.log +kubectl logs --namespace tfs-ip deployment/deviceservice -c server > tmp/exec/ip/device.log +kubectl logs --namespace tfs-ip deployment/serviceservice -c server > tmp/exec/ip/service.log +kubectl logs --namespace tfs-ip deployment/pathcompservice -c frontend > tmp/exec/ip/pathcomp-frontend.log +kubectl logs --namespace tfs-ip deployment/pathcompservice -c backend > tmp/exec/ip/pathcomp-backend.log +kubectl logs --namespace tfs-ip deployment/webuiservice -c server > tmp/exec/ip/webui.log +kubectl logs --namespace tfs-ip deployment/nbiservice -c server > tmp/exec/ip/nbi.log +kubectl logs --namespace tfs-ip deployment/vnt-managerservice -c server > tmp/exec/ip/vntm.log +printf logs "\n" + +echo "Collecting logs for OPT..." +mkdir -p tmp/exec/opt +kubectl logs --namespace tfs-opt deployment/contextservice -c server > tmp/exec/opt/context.log +kubectl logs --namespace tfs-opt deployment/deviceservice -c server > tmp/exec/opt/device.log +kubectl logs --namespace tfs-opt deployment/serviceservice -c server > tmp/exec/opt/service.log +kubectl logs --namespace tfs-opt deployment/pathcompservice -c frontend > tmp/exec/opt/pathcomp-frontend.log +kubectl logs --namespace tfs-opt deployment/pathcompservice -c backend > tmp/exec/opt/pathcomp-backend.log +kubectl logs --namespace tfs-opt deployment/webuiservice -c server > tmp/exec/opt/webui.log +kubectl logs --namespace tfs-opt deployment/nbiservice -c server > tmp/exec/opt/nbi.log +kubectl logs --namespace tfs-opt deployment/opticalcontrollerservice -c server > tmp/exec/opt/ctrl.log +printf "\n" + +echo "Done!" diff --git a/src/tests/ecoc24/nginx-ingress-controller-e2e.yaml b/src/tests/ofc25/nginx-ingress-controller-e2e.yaml similarity index 100% rename from src/tests/ecoc24/nginx-ingress-controller-e2e.yaml rename to src/tests/ofc25/nginx-ingress-controller-e2e.yaml diff --git a/src/tests/ecoc24/nginx-ingress-controller-ip.yaml b/src/tests/ofc25/nginx-ingress-controller-ip.yaml similarity index 100% rename from src/tests/ecoc24/nginx-ingress-controller-ip.yaml rename to src/tests/ofc25/nginx-ingress-controller-ip.yaml diff --git a/src/tests/ecoc24/nginx-ingress-controller-opt.yaml b/src/tests/ofc25/nginx-ingress-controller-opt.yaml similarity index 100% rename from src/tests/ecoc24/nginx-ingress-controller-opt.yaml rename to src/tests/ofc25/nginx-ingress-controller-opt.yaml diff --git a/src/tests/ecoc24/show_deploy.sh b/src/tests/ofc25/show_deploy.sh similarity index 100% rename from src/tests/ecoc24/show_deploy.sh rename to src/tests/ofc25/show_deploy.sh diff --git a/src/tests/ecoc24/tests/__init__.py b/src/tests/ofc25/tests/__init__.py similarity index 100% rename from src/tests/ecoc24/tests/__init__.py rename to src/tests/ofc25/tests/__init__.py diff --git a/src/tests/ecoc24/tests/create_service.py b/src/tests/ofc25/tests/create_service.py similarity index 100% rename from src/tests/ecoc24/tests/create_service.py rename to src/tests/ofc25/tests/create_service.py diff --git a/src/tests/ecoc24/tests/delete_service.py b/src/tests/ofc25/tests/delete_service.py similarity index 100% rename from src/tests/ecoc24/tests/delete_service.py rename to src/tests/ofc25/tests/delete_service.py diff --git a/src/tests/ecoc24/tests/test_functional_bootstrap_e2e.py b/src/tests/ofc25/tests/test_functional_bootstrap_e2e.py similarity index 78% rename from src/tests/ecoc24/tests/test_functional_bootstrap_e2e.py rename to src/tests/ofc25/tests/test_functional_bootstrap_e2e.py index 27648969b1610b01e25d21dc76bdec2279c314f1..54225ea3fe5c682636492e741a44746974502850 100644 --- a/src/tests/ecoc24/tests/test_functional_bootstrap_e2e.py +++ b/src/tests/ofc25/tests/test_functional_bootstrap_e2e.py @@ -16,6 +16,7 @@ import logging, os, time from common.Constants import DEFAULT_CONTEXT_NAME from common.proto.context_pb2 import ContextId, DeviceOperationalStatusEnum, Empty from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results, validate_empty_scenario +from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_string from common.tools.object_factory.Context import json_context_id from context.client.ContextClient import ContextClient from device.client.DeviceClient import DeviceClient @@ -24,7 +25,7 @@ from tests.Fixtures import context_client, device_client # pylint: disable=unuse LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) -DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'descriptors', 'descriptor_e2e.json') +DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'descriptors', 'topology_e2e.json') ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) def test_scenario_bootstrap( @@ -50,7 +51,7 @@ def test_scenario_devices_enabled( """ This test validates that the devices are enabled. """ - """ DEVICE_OP_STATUS_ENABLED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED + DEVICE_OP_STATUS_ENABLED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED num_devices = -1 num_devices_enabled, num_retry = 0, 0 @@ -59,9 +60,15 @@ def test_scenario_devices_enabled( response = context_client.ListDevices(Empty()) num_devices = len(response.devices) num_devices_enabled = 0 + disabled_devices = list() for device in response.devices: - if device.device_operational_status != DEVICE_OP_STATUS_ENABLED: continue - num_devices_enabled += 1 + if device.device_operational_status == DEVICE_OP_STATUS_ENABLED: + num_devices_enabled += 1 + else: + disabled_devices.append(grpc_message_to_json(device)) LOGGER.info('Num Devices enabled: {:d}/{:d}'.format(num_devices_enabled, num_devices)) - num_retry += 1 """ - assert 1 == 1 + num_retry += 1 + if num_devices_enabled != num_devices: + LOGGER.info('Disabled Devices: {:s}'.format(str(disabled_devices))) + LOGGER.info('Devices: {:s}'.format(grpc_message_to_json_string(response))) + assert num_devices_enabled == num_devices diff --git a/src/tests/ecoc24/tests/test_functional_bootstrap_ip.py b/src/tests/ofc25/tests/test_functional_bootstrap_ip.py similarity index 91% rename from src/tests/ecoc24/tests/test_functional_bootstrap_ip.py rename to src/tests/ofc25/tests/test_functional_bootstrap_ip.py index 668213a56f00d850e0a6e94d1e18f7fa7b7f8a08..e4ace756f3906a6f91f830b7b15a8646711b2620 100644 --- a/src/tests/ecoc24/tests/test_functional_bootstrap_ip.py +++ b/src/tests/ofc25/tests/test_functional_bootstrap_ip.py @@ -24,7 +24,7 @@ from tests.Fixtures import context_client, device_client # pylint: disable=unuse LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) -DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'descriptors', 'descriptor_ip.json') +DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'descriptors', 'topology_ip.json') ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) def test_scenario_bootstrap( @@ -47,10 +47,7 @@ def test_scenario_bootstrap( def test_scenario_devices_enabled( context_client : ContextClient, # pylint: disable=redefined-outer-name ) -> None: - """ - This test validates that the devices are enabled. - """ - """ DEVICE_OP_STATUS_ENABLED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED + DEVICE_OP_STATUS_ENABLED = DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED num_devices = -1 num_devices_enabled, num_retry = 0, 0 @@ -63,5 +60,4 @@ def test_scenario_devices_enabled( if device.device_operational_status != DEVICE_OP_STATUS_ENABLED: continue num_devices_enabled += 1 LOGGER.info('Num Devices enabled: {:d}/{:d}'.format(num_devices_enabled, num_devices)) - num_retry += 1 """ - assert 1 == 1 + num_retry += 1 diff --git a/src/tests/ecoc24/tests/test_functional_bootstrap_opt.py b/src/tests/ofc25/tests/test_functional_bootstrap_opt.py similarity index 98% rename from src/tests/ecoc24/tests/test_functional_bootstrap_opt.py rename to src/tests/ofc25/tests/test_functional_bootstrap_opt.py index f3097519ab53437c467763490ae6c7d09dcf7857..32893e5dd4fa8f6ca6dc601eac3882e732f539a9 100644 --- a/src/tests/ecoc24/tests/test_functional_bootstrap_opt.py +++ b/src/tests/ofc25/tests/test_functional_bootstrap_opt.py @@ -24,7 +24,7 @@ from tests.Fixtures import context_client, device_client # pylint: disable=unuse LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) -DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'descriptors', 'descriptor_opt.json') +DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'descriptors', 'topology_opt.json') ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) def test_scenario_bootstrap( diff --git a/src/tests/ofc25/tests/test_functional_cleanup_e2e.py b/src/tests/ofc25/tests/test_functional_cleanup_e2e.py new file mode 100644 index 0000000000000000000000000000000000000000..cd294ae90e704ca6dfaf9a282f74c0c0f2651e6c --- /dev/null +++ b/src/tests/ofc25/tests/test_functional_cleanup_e2e.py @@ -0,0 +1,44 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, os +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import ContextId +from common.tools.descriptor.Loader import DescriptorLoader, validate_empty_scenario +from common.tools.object_factory.Context import json_context_id +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from tests.Fixtures import context_client, device_client # pylint: disable=unused-import + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'descriptors', 'topology_e2e.json') +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) + +def test_scenario_cleanup( + context_client : ContextClient, # pylint: disable=redefined-outer-name + device_client : DeviceClient, # pylint: disable=redefined-outer-name +) -> None: + # Verify the scenario has no services/slices + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 0 + assert len(response.slice_ids) == 0 + + # Load descriptors and validate the base scenario + descriptor_loader = DescriptorLoader( + descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client) + descriptor_loader.validate() + descriptor_loader.unload() + validate_empty_scenario(context_client) diff --git a/src/tests/ecoc24/tests/test_functional_cleanup.py b/src/tests/ofc25/tests/test_functional_cleanup_ip.py similarity index 97% rename from src/tests/ecoc24/tests/test_functional_cleanup.py rename to src/tests/ofc25/tests/test_functional_cleanup_ip.py index a482c6e46559a97e3d194c6eb9dbc0a092cc39d4..87c5e2e386327a1bd8718192a9b3734368190858 100644 --- a/src/tests/ecoc24/tests/test_functional_cleanup.py +++ b/src/tests/ofc25/tests/test_functional_cleanup_ip.py @@ -24,7 +24,7 @@ from tests.Fixtures import context_client, device_client # pylint: disable=un LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) -DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'descriptors', 'topology.json') +DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'descriptors', 'topology_ip.json') ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) def test_scenario_cleanup( diff --git a/src/tests/ofc25/tests/test_functional_cleanup_opt.py b/src/tests/ofc25/tests/test_functional_cleanup_opt.py new file mode 100644 index 0000000000000000000000000000000000000000..03fa50fff002be382359a3c4a2b1aa89504d6462 --- /dev/null +++ b/src/tests/ofc25/tests/test_functional_cleanup_opt.py @@ -0,0 +1,44 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, os +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import ContextId +from common.tools.descriptor.Loader import DescriptorLoader, validate_empty_scenario +from common.tools.object_factory.Context import json_context_id +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from tests.Fixtures import context_client, device_client # pylint: disable=unused-import + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +DESCRIPTOR_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'descriptors', 'topology_opt.json') +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) + +def test_scenario_cleanup( + context_client : ContextClient, # pylint: disable=redefined-outer-name + device_client : DeviceClient, # pylint: disable=redefined-outer-name +) -> None: + # Verify the scenario has no services/slices + response = context_client.GetContext(ADMIN_CONTEXT_ID) + assert len(response.service_ids) == 0 + assert len(response.slice_ids) == 0 + + # Load descriptors and validate the base scenario + descriptor_loader = DescriptorLoader( + descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client) + descriptor_loader.validate() + descriptor_loader.unload() + validate_empty_scenario(context_client) diff --git a/src/tests/ecoc24/tests/test_functional_create_service.py b/src/tests/ofc25/tests/test_functional_create_service.py similarity index 100% rename from src/tests/ecoc24/tests/test_functional_create_service.py rename to src/tests/ofc25/tests/test_functional_create_service.py diff --git a/src/tests/ecoc24/tests/test_functional_delete_service.py b/src/tests/ofc25/tests/test_functional_delete_service.py similarity index 100% rename from src/tests/ecoc24/tests/test_functional_delete_service.py rename to src/tests/ofc25/tests/test_functional_delete_service.py diff --git a/src/tests/ecoc24/tfs-ingress-e2e.yaml b/src/tests/ofc25/tfs-ingress-e2e.yaml similarity index 54% rename from src/tests/ecoc24/tfs-ingress-e2e.yaml rename to src/tests/ofc25/tfs-ingress-e2e.yaml index c75a22cb3e7ed630c7616f6670d76b221e2b57d9..c2f4a5a6016a9c24d1ee007c6ae1b56e5847dcf6 100644 --- a/src/tests/ecoc24/tfs-ingress-e2e.yaml +++ b/src/tests/ofc25/tfs-ingress-e2e.yaml @@ -17,7 +17,28 @@ kind: Ingress metadata: name: tfs-ingress-e2e annotations: - nginx.ingress.kubernetes.io/rewrite-target: /$2 + nginx.ingress.kubernetes.io/rewrite-target: "/$2" + + # Enable websocket services and configure sticky cookies (seems not to work) + #nginx.org/websocket-services: "nbiservice" + #nginx.org/sticky-cookie-services: "serviceName=nbiservice tfs-nbi-session expires=1h path=/socket.io" + + # Enable sticky sessions (use same backend for all connections + # originated by a specific client, identified through its cookie) + nginx.ingress.kubernetes.io/affinity: "cookie" + nginx.ingress.kubernetes.io/affinity-mode: "persistent" + nginx.ingress.kubernetes.io/session-cookie-name: "tfs-nbi-session" + nginx.ingress.kubernetes.io/session-cookie-path: "/socket.io" + nginx.ingress.kubernetes.io/session-cookie-expires: "3600" + nginx.ingress.kubernetes.io/session-cookie-change-on-failure: "true" + + nginx.ingress.kubernetes.io/limit-rps: "50" # max requests per second per source IP + nginx.ingress.kubernetes.io/limit-connections: "50" # max concurrent connections per source IP + nginx.ingress.kubernetes.io/proxy-connect-timeout: "60" # max timeout for connecting to server + + # Enable long-lived connections, required for websocket/socket.io streams + nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" # max timeout between two successive read operations + nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" # max timeout between two successive write operations spec: ingressClassName: tfs-ingress-class-e2e rules: @@ -44,6 +65,13 @@ spec: name: nbiservice port: number: 8080 + - path: /()(socket.io/.*) + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 - path: /()(tfs-api/.*) pathType: Prefix backend: diff --git a/src/tests/ecoc24/tfs-ingress-ip.yaml b/src/tests/ofc25/tfs-ingress-ip.yaml similarity index 54% rename from src/tests/ecoc24/tfs-ingress-ip.yaml rename to src/tests/ofc25/tfs-ingress-ip.yaml index aefcfcffede6fd6faa9540fac9ba3dc66d582681..43bc52b4a3cb4794aa7be37ddcf03d492f894084 100644 --- a/src/tests/ecoc24/tfs-ingress-ip.yaml +++ b/src/tests/ofc25/tfs-ingress-ip.yaml @@ -17,7 +17,28 @@ kind: Ingress metadata: name: tfs-ingress-ip annotations: - nginx.ingress.kubernetes.io/rewrite-target: /$2 + nginx.ingress.kubernetes.io/rewrite-target: "/$2" + + # Enable websocket services and configure sticky cookies (seems not to work) + #nginx.org/websocket-services: "nbiservice" + #nginx.org/sticky-cookie-services: "serviceName=nbiservice tfs-nbi-session expires=1h path=/socket.io" + + # Enable sticky sessions (use same backend for all connections + # originated by a specific client, identified through its cookie) + nginx.ingress.kubernetes.io/affinity: "cookie" + nginx.ingress.kubernetes.io/affinity-mode: "persistent" + nginx.ingress.kubernetes.io/session-cookie-name: "tfs-nbi-session" + nginx.ingress.kubernetes.io/session-cookie-path: "/socket.io" + nginx.ingress.kubernetes.io/session-cookie-expires: "3600" + nginx.ingress.kubernetes.io/session-cookie-change-on-failure: "true" + + nginx.ingress.kubernetes.io/limit-rps: "50" # max requests per second per source IP + nginx.ingress.kubernetes.io/limit-connections: "50" # max concurrent connections per source IP + nginx.ingress.kubernetes.io/proxy-connect-timeout: "60" # max timeout for connecting to server + + # Enable long-lived connections, required for websocket/socket.io streams + nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" # max timeout between two successive read operations + nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" # max timeout between two successive write operations spec: ingressClassName: tfs-ingress-class-ip rules: @@ -44,6 +65,13 @@ spec: name: nbiservice port: number: 8080 + - path: /()(socket.io/.*) + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 - path: /()(tfs-api/.*) pathType: Prefix backend: diff --git a/src/tests/ecoc24/tfs-ingress-opt.yaml b/src/tests/ofc25/tfs-ingress-opt.yaml similarity index 54% rename from src/tests/ecoc24/tfs-ingress-opt.yaml rename to src/tests/ofc25/tfs-ingress-opt.yaml index 57e1f9141608c659366f028eb99800bddd870f2c..87fc29de522552488f451e5ff0e3f4f54475d267 100644 --- a/src/tests/ecoc24/tfs-ingress-opt.yaml +++ b/src/tests/ofc25/tfs-ingress-opt.yaml @@ -17,7 +17,28 @@ kind: Ingress metadata: name: tfs-ingress-opt annotations: - nginx.ingress.kubernetes.io/rewrite-target: /$2 + nginx.ingress.kubernetes.io/rewrite-target: "/$2" + + # Enable websocket services and configure sticky cookies (seems not to work) + #nginx.org/websocket-services: "nbiservice" + #nginx.org/sticky-cookie-services: "serviceName=nbiservice tfs-nbi-session expires=1h path=/socket.io" + + # Enable sticky sessions (use same backend for all connections + # originated by a specific client, identified through its cookie) + nginx.ingress.kubernetes.io/affinity: "cookie" + nginx.ingress.kubernetes.io/affinity-mode: "persistent" + nginx.ingress.kubernetes.io/session-cookie-name: "tfs-nbi-session" + nginx.ingress.kubernetes.io/session-cookie-path: "/socket.io" + nginx.ingress.kubernetes.io/session-cookie-expires: "3600" + nginx.ingress.kubernetes.io/session-cookie-change-on-failure: "true" + + nginx.ingress.kubernetes.io/limit-rps: "50" # max requests per second per source IP + nginx.ingress.kubernetes.io/limit-connections: "50" # max concurrent connections per source IP + nginx.ingress.kubernetes.io/proxy-connect-timeout: "60" # max timeout for connecting to server + + # Enable long-lived connections, required for websocket/socket.io streams + nginx.ingress.kubernetes.io/proxy-send-timeout: "3600" # max timeout between two successive read operations + nginx.ingress.kubernetes.io/proxy-read-timeout: "3600" # max timeout between two successive write operations spec: ingressClassName: tfs-ingress-class-opt rules: @@ -44,6 +65,13 @@ spec: name: nbiservice port: number: 8080 + - path: /()(socket.io/.*) + pathType: Prefix + backend: + service: + name: nbiservice + port: + number: 8080 - path: /()(tfs-api/.*) pathType: Prefix backend: diff --git a/src/tests/ofc25/undeploy.sh b/src/tests/ofc25/undeploy.sh new file mode 100755 index 0000000000000000000000000000000000000000..e86fa14b226b19099d3ceb38f886e513236cfe22 --- /dev/null +++ b/src/tests/ofc25/undeploy.sh @@ -0,0 +1,38 @@ +#!/bin/bash +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ===== Check Microk8s is ready ============================== +microk8s status --wait-ready +kubectl get pods --all-namespaces + +# ===== Cleanup old deployments ============================== +helm3 uninstall --namespace nats-e2e nats-e2e 2>/dev/null || true +helm3 uninstall --namespace nats-ip nats-ip 2>/dev/null || true +helm3 uninstall --namespace nats-opt nats-opt 2>/dev/null || true +helm3 uninstall --namespace nats nats 2>/dev/null || true +kubectl delete namespaces tfs tfs-ip tfs-opt tfs-e2e --ignore-not-found +kubectl delete namespaces qdb qdb-e2e qdb-opt qdb-ip --ignore-not-found +kubectl delete namespaces kafka kafka-ip kafka-opt kafka-e2e --ignore-not-found +kubectl delete namespaces nats nats-ip nats-opt nats-e2e --ignore-not-found + +kubectl delete -f src/tests/ofc25/nginx-ingress-controller-opt.yaml --ignore-not-found +kubectl delete -f src/tests/ofc25/nginx-ingress-controller-ip.yaml --ignore-not-found +kubectl delete -f src/tests/ofc25/nginx-ingress-controller-e2e.yaml --ignore-not-found + +# ===== Check Microk8s is ready ============================== +microk8s status --wait-ready +kubectl get pods --all-namespaces + +echo "Done!" diff --git a/src/vnt_manager/client/VNTManagerClient.py b/src/vnt_manager/client/VNTManagerClient.py index b313a590f3c5d8db64a9ae1b7b9ac89a94595f2a..4ea5d3db1b40e12e214945c7d84caf302c00cbf8 100644 --- a/src/vnt_manager/client/VNTManagerClient.py +++ b/src/vnt_manager/client/VNTManagerClient.py @@ -12,22 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -import logging - -import grpc +import grpc, logging from common.Constants import ServiceNameEnum from common.proto.context_pb2 import Empty -from common.proto.vnt_manager_pb2 import VNTSubscriptionRequest, VNTSubscriptionReply from common.proto.vnt_manager_pb2_grpc import VNTManagerServiceStub from common.Settings import get_service_host, get_service_port_grpc +from common.proto.context_pb2 import Link, LinkId, LinkIdList, LinkList from common.tools.client.RetryDecorator import delay_exponential, retry -from common.tools.grpc.Tools import grpc_message_to_json -from common.proto.context_pb2 import ( - Link, LinkId, LinkIdList, LinkList, -) from common.tools.grpc.Tools import grpc_message_to_json_string + LOGGER = logging.getLogger(__name__) MAX_RETRIES = 15 DELAY_FUNCTION = delay_exponential(initial=0.01, increment=2.0, maximum=5.0) @@ -40,10 +35,8 @@ RETRY_DECORATOR = retry( class VNTManagerClient: def __init__(self, host=None, port=None): - if not host: - host = get_service_host(ServiceNameEnum.VNTMANAGER) - if not port: - port = get_service_port_grpc(ServiceNameEnum.VNTMANAGER) + if not host: host = get_service_host(ServiceNameEnum.VNTMANAGER) + if not port: port = get_service_port_grpc(ServiceNameEnum.VNTMANAGER) self.endpoint = "{:s}:{:s}".format(str(host), str(port)) LOGGER.debug("Creating channel to {:s}...".format(str(self.endpoint))) self.channel = None @@ -61,13 +54,6 @@ class VNTManagerClient: self.channel = None self.stub = None - @RETRY_DECORATOR - def VNTSubscript(self, request: VNTSubscriptionRequest) -> VNTSubscriptionReply: - LOGGER.debug("Subscript request: {:s}".format(str(grpc_message_to_json(request)))) - response = self.stub.VNTSubscript(request) - LOGGER.debug("Subscript result: {:s}".format(str(grpc_message_to_json(response)))) - return response - @RETRY_DECORATOR def ListVirtualLinkIds(self, request: Empty) -> LinkIdList: LOGGER.debug('ListVirtualLinkIds request: {:s}'.format(grpc_message_to_json_string(request))) diff --git a/src/vnt_manager/requirements.in b/src/vnt_manager/requirements.in index 38764add745987ea115b9c8f2a9a169e6d0e3c39..d8f9537b471d5645604ff39ab5142c3ae0e7c9ba 100644 --- a/src/vnt_manager/requirements.in +++ b/src/vnt_manager/requirements.in @@ -12,4 +12,5 @@ # See the License for the specific language governing permissions and # limitations under the License. -websockets==12.0 +confluent-kafka==2.3.* +#websockets==12.0 diff --git a/src/vnt_manager/service/VNTManagerServiceServicerImpl.py b/src/vnt_manager/service/VNTManagerServiceServicerImpl.py index 46a012560bb32cbc7e862cf3cd688daac5160547..2424f5530c40708c5f5dd5aa367a8012cc98c4ef 100644 --- a/src/vnt_manager/service/VNTManagerServiceServicerImpl.py +++ b/src/vnt_manager/service/VNTManagerServiceServicerImpl.py @@ -12,170 +12,203 @@ # See the License for the specific language governing permissions and # limitations under the License. -import grpc -import json -import logging -import threading -import time -from websockets.sync.client import connect -from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME +from typing import Dict, Optional +import grpc, json, logging, uuid +from confluent_kafka import Consumer as KafkaConsumer +from confluent_kafka import Producer as KafkaProducer +from confluent_kafka import KafkaError from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method -from common.proto.context_pb2 import ContextId, Empty, Link, LinkId, LinkList, TopologyId -from common.proto.vnt_manager_pb2 import VNTSubscriptionRequest, VNTSubscriptionReply +from common.proto.context_pb2 import Empty, Link, LinkId, LinkList, LinkTypeEnum from common.proto.vnt_manager_pb2_grpc import VNTManagerServiceServicer +#from common.tools.context_queries.EndPoint import get_endpoint_names from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_string -from common.tools.object_factory.Context import json_context_id -from common.tools.object_factory.Topology import json_topology_id +from common.tools.kafka.Variables import KafkaConfig, KafkaTopic +#from common.tools.object_factory.Device import json_device_id +#from common.tools.object_factory.EndPoint import json_endpoint_id +from common.tools.object_factory.Link import json_link, json_link_id from context.client.ContextClient import ContextClient -from context.client.EventsCollector import EventsCollector -from .vntm_config_device import configure, deconfigure - -LOGGER = logging.getLogger(__name__) - -METRICS_POOL = MetricsPool("VNTManager", "RPC") - -context_client: ContextClient = ContextClient() - -JSON_ADMIN_CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME) -ADMIN_CONTEXT_ID = ContextId(**JSON_ADMIN_CONTEXT_ID) -ADMIN_TOPOLOGY_ID = TopologyId(**json_topology_id(DEFAULT_TOPOLOGY_NAME, context_id=JSON_ADMIN_CONTEXT_ID)) - -GET_EVENT_TIMEOUT = 0.5 +#from .vntm_config_device import configure, deconfigure -class VNTMEventDispatcher(threading.Thread): - def __init__(self, host, port) -> None: - LOGGER.debug('Creating VNTM connector...') - self.host = host - self.port = port - super().__init__(name='VNTMEventDispatcher', daemon=True) - self._terminate = threading.Event() - LOGGER.debug('VNTM connector created') - - def start(self) -> None: - self._terminate.clear() - return super().start() - - def stop(self): - self._terminate.set() - - def send_msg(self, msg): - try: - self.websocket.send(msg) - except Exception as e: - LOGGER.exception('Unable to send message') - - def recv_msg(self): - message = self.websocket.recv() - return message - - def run(self) -> None: - events_collector = EventsCollector( - context_client, - log_events_received = True, - activate_context_collector = True, - activate_topology_collector = True, - activate_device_collector = True, - activate_link_collector = True, - activate_service_collector = False, - activate_slice_collector = False, - activate_connection_collector = False, - ) - events_collector.start() +LOGGER = logging.getLogger(__name__) - try: - url = "ws://" + str(self.host) + ":" + str(self.port) - LOGGER.info("Connecting to events server...: {:s}".format(url)) - self.websocket = connect(url) - except Exception as ex: - MSG = 'Error connecting to {:s}' - LOGGER.exception(MSG.format(str(url))) - else: - LOGGER.info('Connected to {:s}'.format(url)) - context_id = json_context_id(DEFAULT_CONTEXT_NAME) - topology_id = json_topology_id(DEFAULT_TOPOLOGY_NAME, context_id) - - try: - topology_details = context_client.GetTopologyDetails(TopologyId(**topology_id)) - except Exception as ex: - LOGGER.warning('No topology found') - else: - self.send_msg(grpc_message_to_json_string(topology_details)) - - while not self._terminate.is_set(): - event = events_collector.get_event(block=True, timeout=GET_EVENT_TIMEOUT) - if event is None: continue - LOGGER.debug('Event type: {}'.format(event)) - topology_details = context_client.GetTopologyDetails(TopologyId(**topology_id)) - to_send = grpc_message_to_json_string(topology_details) - self.send_msg(to_send) - - LOGGER.info('Exiting') - events_collector.stop() +METRICS_POOL = MetricsPool('VNTManager', 'RPC') class VNTManagerServiceServicerImpl(VNTManagerServiceServicer): def __init__(self): - LOGGER.debug("Creating Servicer...") - LOGGER.debug("Servicer Created") + LOGGER.debug('Creating Servicer...') + self.context_client = ContextClient() + self.kafka_producer = KafkaProducer({ + 'bootstrap.servers' : KafkaConfig.get_kafka_address() + }) + self.kafka_consumer = KafkaConsumer({ + 'bootstrap.servers' : KafkaConfig.get_kafka_address(), + 'group.id' : str(uuid.uuid4()), + 'auto.offset.reset' : 'latest' + }) + self.kafka_consumer.subscribe([KafkaTopic.VNTMANAGER_RESPONSE.value]) self.links = [] - - @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) - def VNTSubscript(self, request: VNTSubscriptionRequest, context: grpc.ServicerContext) -> VNTSubscriptionReply: - LOGGER.info("Subscript request: {:s}".format(str(grpc_message_to_json(request)))) - reply = VNTSubscriptionReply() - reply.subscription = "OK" - - self.event_dispatcher = VNTMEventDispatcher(request.host, int(request.port)) - self.host = request.host - self.port = request.port - LOGGER.info('sleeping 5...') - time.sleep(5) - self.event_dispatcher.start() - return reply + LOGGER.debug('Servicer Created') @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def ListVirtualLinks(self, request : Empty, context : grpc.ServicerContext) -> LinkList: - return [link for link in context_client.ListLinks(Empty()).links if link.virtual] + links = self.context_client.ListLinks(Empty()).links + return [link for link in links if link.virtual] @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def GetVirtualLink(self, request : LinkId, context : grpc.ServicerContext) -> Link: - link = context_client.GetLink(request) + link = self.context_client.GetLink(request) return link if link.virtual else Empty() + def send_recommendation(self, vntm_request : Dict) -> str: + request_key = str(uuid.uuid4()) + vntm_request = json.dumps(vntm_request) + MSG = '[send_recommendation] request_key={:s} vntm_request={:s}' + LOGGER.info(MSG.format(str(request_key), str(vntm_request))) + self.kafka_producer.produce( + KafkaTopic.VNTMANAGER_REQUEST.value, + key=request_key.encode('utf-8'), + value=vntm_request.encode('utf-8'), + ) + self.kafka_producer.flush() + return request_key + + def send_vlink_create(self, request : Link) -> str: + return self.send_recommendation({ + 'event': 'vlink_create', 'data': grpc_message_to_json_string(request) + }) + + def send_vlink_remove(self, request : LinkId) -> str: + return self.send_recommendation({ + 'event': 'vlink_remove', 'data': grpc_message_to_json_string(request) + }) + + def wait_for_reply(self, request_key : str) -> Optional[Dict]: + LOGGER.info('[wait_for_reply] request_key={:s}'.format(str(request_key))) + + while True: + receive_msg = self.kafka_consumer.poll(2.0) + if receive_msg is None: continue + LOGGER.info('[wait_for_reply] receive_msg={:s}'.format(str(receive_msg))) + if receive_msg.error(): + if receive_msg.error().code() == KafkaError._PARTITION_EOF: continue + LOGGER.error('[wait_for_reply] Consumer error: {:s}'.format(str(receive_msg.error()))) + return None + + reply_key = receive_msg.key().decode('utf-8') + LOGGER.info('[wait_for_reply] reply_key={:s}'.format(str(reply_key))) + if reply_key == request_key: + LOGGER.info('[wait_for_reply] match!') + break + LOGGER.info('[wait_for_reply] no match... waiting...') + + json_receive_msg = json.loads(receive_msg.value().decode('utf-8')) + LOGGER.info('[wait_for_reply] json_receive_msg={:s}'.format(str(json_receive_msg))) + + if 'data' not in json_receive_msg: + MSG = 'Malformed reply: {:s}' + raise Exception(MSG.format(str(json_receive_msg))) + data = json_receive_msg['data'] + + if 'error' in data: + MSG = 'Something went wrong: {:s}' + raise Exception(MSG.format(str(data['error']))) + + if 'result' not in data: + MSG = 'Malformed reply: {:s}' + raise Exception(MSG.format(str(data))) + return data['result'] + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def SetVirtualLink(self, request : Link, context : grpc.ServicerContext) -> LinkId: try: - LOGGER.info('SETTING virtual link') - self.event_dispatcher.send_msg(grpc_message_to_json_string(request)) - # configure('CSGW1', 'xe5', 'CSGW2', 'xe5', 'ecoc2024-1') - response = self.event_dispatcher.recv_msg() - message_json = json.loads(response) - link = Link(**message_json) - context_client.SetLink(link) - except Exception as e: - MSG = 'Exception setting virtual link={:s}') + LOGGER.info('[SetVirtualLink] request={:s}'.format(grpc_message_to_json_string(request))) + request_key = self.send_vlink_create(request) + reply = self.wait_for_reply(request_key) + LOGGER.info('[SetVirtualLink] reply={:s}'.format(str(reply))) + + # At this point, we know the request is processed and an optical connection was created + + vlink_uuid = reply['vlink_uuid'] + LOGGER.info('[SetVirtualLink] vlink_uuid={:s}'.format(str(vlink_uuid))) + + vlink_name = request.name + if len(vlink_name) == 0: vlink_name = request.link_id.link_uuid.uuid + LOGGER.info('[SetVirtualLink] vlink_name={:s}'.format(str(vlink_name))) + + vlink_endpoint_ids = [ + grpc_message_to_json(endpoint_id) + for endpoint_id in request.link_endpoint_ids + ] + LOGGER.info('[SetVirtualLink] vlink_endpoint_ids={:s}'.format(str(vlink_endpoint_ids))) + + total_capacity_gbps = request.attributes.total_capacity_gbps + LOGGER.info('[SetVirtualLink] total_capacity_gbps={:s}'.format(str(total_capacity_gbps))) + + vlink = Link(**json_link( + vlink_uuid, vlink_endpoint_ids, name=vlink_name, + link_type=LinkTypeEnum.LINKTYPE_VIRTUAL, + total_capacity_gbps=total_capacity_gbps, + )) + LOGGER.info('[SetVirtualLink] vlink={:s}'.format(grpc_message_to_json_string(vlink))) + + #device_names, endpoints_data = get_endpoint_names(self.context_client, request.link_endpoint_ids) + + #device_uuid_or_name_a = vlink_endpoint_ids[ 0]['device_id']['device_uuid']['uuid'] + #device_name_a = device_names.get(device_uuid_or_name_a, device_uuid_or_name_a) + + #device_uuid_or_name_b = vlink_endpoint_ids[-1]['device_id']['device_uuid']['uuid'] + #device_name_b = device_names.get(device_uuid_or_name_b, device_uuid_or_name_b) + + #endpoint_uuid_or_name_a = vlink_endpoint_ids[ 0]['endpoint_uuid']['uuid'] + #endpoint_name_a = endpoints_data.get(endpoint_uuid_or_name_a, (endpoint_uuid_or_name_a, None)) + #endpoint_name_a = endpoint_name_a.replace('PORT-', '') + + #endpoint_uuid_or_name_b = vlink_endpoint_ids[-1]['endpoint_uuid']['uuid'] + #endpoint_name_b = endpoints_data.get(endpoint_uuid_or_name_b, (endpoint_uuid_or_name_b, None)) + #endpoint_name_b = endpoint_name_b.replace('PORT-', '') + + #network_instance_name = '-'.join([ + # device_name_a, endpoint_name_a, device_name_b, endpoint_name_b + #]) + #configure( + # device_name_a, endpoint_name_a, device_name_b, endpoint_name_b, network_instance_name + #) + + vlink_id = self.context_client.SetLink(vlink) + + MSG = 'Virtual link created, vlink_id={:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(vlink_id))) + return vlink_id + except: # pylint: disable=bare-except + MSG = 'Exception setting virtual link={:s}' LOGGER.exception(MSG.format(str(request.link_id.link_uuid.uuid))) - return request.link_id + raise @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RemoveVirtualLink(self, request : LinkId, context : grpc.ServicerContext) -> Empty: try: - LOGGER.debug('Removing virtual link') - self.event_dispatcher.send_msg(grpc_message_to_json_string(request)) + LOGGER.info('[RemoveVirtualLink] request={:s}'.format(grpc_message_to_json_string(request))) + request_key = self.send_vlink_remove(request) + reply = self.wait_for_reply(request_key) + LOGGER.info('[RemoveVirtualLink] reply={:s}'.format(str(reply))) + + # At this point, we know the request is processed and an optical connection was removed + + vlink_uuid = request.link_uuid.uuid + LOGGER.info('[RemoveVirtualLink] vlink_uuid={:s}'.format(str(vlink_uuid))) + + vlink_id = LinkId(**json_link_id(vlink_uuid)) + LOGGER.info('[RemoveVirtualLink] vlink_id={:s}'.format(grpc_message_to_json_string(vlink_id))) + # deconfigure('CSGW1', 'xe5', 'CSGW2', 'xe5', 'ecoc2024-1') - response = self.event_dispatcher.recv_msg() - message_json = json.loads(response) - link_id = LinkId(**message_json) - context_client.RemoveLink(link_id) + self.context_client.RemoveLink(vlink_id) - LOGGER.info('Removed') - except Exception as e: + MSG = 'Virtual link removed, vlink_id={:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(vlink_id))) + return Empty() + except: # pylint: disable=bare-except MSG = 'Exception removing virtual link={:s}' LOGGER.exception(MSG.format(str(request.link_uuid.uuid))) - return msg_error - else: - context_client.RemoveLink(request) - LOGGER.info('Removed') - - return Empty() + raise diff --git a/src/vnt_manager/service/__main__.py b/src/vnt_manager/service/__main__.py index c36a0ae1fb7bfa568f79bae26e53cd5d734a4f2e..089a330e11497f024937673c927587fa9a5966ce 100644 --- a/src/vnt_manager/service/__main__.py +++ b/src/vnt_manager/service/__main__.py @@ -14,6 +14,7 @@ import logging, signal, sys, threading from prometheus_client import start_http_server +from common.tools.kafka.Variables import KafkaTopic from common.Constants import ServiceNameEnum from common.Settings import ( ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, @@ -49,6 +50,8 @@ def main(): metrics_port = get_metrics_port() start_http_server(metrics_port) + KafkaTopic.create_all_topics() + # Starting VNTManager service grpc_service = VNTManagerService() grpc_service.start() diff --git a/src/vnt_manager/service/old_code.py b/src/vnt_manager/service/old_code.py new file mode 100644 index 0000000000000000000000000000000000000000..a701a1c772d3c18f4f6511a6a4520f8ec662ee68 --- /dev/null +++ b/src/vnt_manager/service/old_code.py @@ -0,0 +1,168 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import grpc +import json +import logging +import threading +import time +from websockets.sync.client import connect +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME +from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method +from common.proto.context_pb2 import ContextId, Empty, Link, LinkId, LinkList, TopologyId +from common.proto.vnt_manager_pb2 import VNTSubscriptionRequest, VNTSubscriptionReply +from common.proto.vnt_manager_pb2_grpc import VNTManagerServiceServicer +from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Topology import json_topology_id +from context.client.ContextClient import ContextClient +from context.client.EventsCollector import EventsCollector +from .vntm_config_device import configure, deconfigure + +LOGGER = logging.getLogger(__name__) + +METRICS_POOL = MetricsPool("VNTManager", "RPC") + +context_client: ContextClient = ContextClient() + +JSON_ADMIN_CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_NAME) +ADMIN_CONTEXT_ID = ContextId(**JSON_ADMIN_CONTEXT_ID) +ADMIN_TOPOLOGY_ID = TopologyId(**json_topology_id(DEFAULT_TOPOLOGY_NAME, context_id=JSON_ADMIN_CONTEXT_ID)) + +GET_EVENT_TIMEOUT = 0.5 + + +class VNTMEventDispatcher(threading.Thread): + def __init__(self, host, port) -> None: + LOGGER.debug('Creating VNTM connector...') + self.host = host + self.port = port + super().__init__(name='VNTMEventDispatcher', daemon=True) + self._terminate = threading.Event() + LOGGER.debug('VNTM connector created') + + def start(self) -> None: + self._terminate.clear() + return super().start() + + def stop(self): + self._terminate.set() + + def send_msg(self, msg): + try: + self.websocket.send(msg) + except Exception as e: + LOGGER.exception('Unable to send message') + + def recv_msg(self): + message = self.websocket.recv() + return message + + def run(self) -> None: + events_collector = EventsCollector( + context_client, + log_events_received = True, + activate_context_collector = True, + activate_topology_collector = True, + activate_device_collector = True, + activate_link_collector = True, + activate_service_collector = False, + activate_slice_collector = False, + activate_connection_collector = False, + ) + events_collector.start() + + try: + url = "ws://" + str(self.host) + ":" + str(self.port) + LOGGER.info("Connecting to events server...: {:s}".format(url)) + self.websocket = connect(url) + except Exception as ex: + MSG = 'Error connecting to {:s}' + LOGGER.exception(MSG.format(str(url))) + else: + LOGGER.info('Connected to {:s}'.format(url)) + context_id = json_context_id(DEFAULT_CONTEXT_NAME) + topology_id = json_topology_id(DEFAULT_TOPOLOGY_NAME, context_id) + + try: + topology_details = context_client.GetTopologyDetails(TopologyId(**topology_id)) + except Exception as ex: + LOGGER.warning('No topology found') + else: + self.send_msg(grpc_message_to_json_string(topology_details)) + + while not self._terminate.is_set(): + event = events_collector.get_event(block=True, timeout=GET_EVENT_TIMEOUT) + if event is None: continue + LOGGER.debug('Event type: {}'.format(event)) + topology_details = context_client.GetTopologyDetails(TopologyId(**topology_id)) + to_send = grpc_message_to_json_string(topology_details) + self.send_msg(to_send) + + LOGGER.info('Exiting') + events_collector.stop() + + +class VNTManagerServiceServicerImpl(VNTManagerServiceServicer): + def __init__(self): + LOGGER.debug("Creating Servicer...") + LOGGER.debug("Servicer Created") + self.links = [] + + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) + def ListVirtualLinks(self, request : Empty, context : grpc.ServicerContext) -> LinkList: + links = context_client.ListLinks(Empty()).links + return [link for link in links if link.virtual] + + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) + def GetVirtualLink(self, request : LinkId, context : grpc.ServicerContext) -> Link: + link = context_client.GetLink(request) + return link if link.virtual else Empty() + + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) + def SetVirtualLink(self, request : Link, context : grpc.ServicerContext) -> LinkId: + try: + LOGGER.info('SETTING virtual link') + self.event_dispatcher.send_msg(grpc_message_to_json_string(request)) + # configure('CSGW1', 'xe5', 'CSGW2', 'xe5', 'ecoc2024-1') + response = self.event_dispatcher.recv_msg() + message_json = json.loads(response) + link = Link(**message_json) + context_client.SetLink(link) + except Exception as e: + MSG = 'Exception setting virtual link={:s}') + LOGGER.exception(MSG.format(str(request.link_id.link_uuid.uuid))) + return request.link_id + + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) + def RemoveVirtualLink(self, request : LinkId, context : grpc.ServicerContext) -> Empty: + try: + LOGGER.debug('Removing virtual link') + self.event_dispatcher.send_msg(grpc_message_to_json_string(request)) + # deconfigure('CSGW1', 'xe5', 'CSGW2', 'xe5', 'ecoc2024-1') + response = self.event_dispatcher.recv_msg() + message_json = json.loads(response) + link_id = LinkId(**message_json) + context_client.RemoveLink(link_id) + + LOGGER.info('Removed') + except Exception as e: + MSG = 'Exception removing virtual link={:s}' + LOGGER.exception(MSG.format(str(request.link_uuid.uuid))) + return msg_error + else: + context_client.RemoveLink(request) + LOGGER.info('Removed') + + return Empty() diff --git a/src/webui/Dockerfile b/src/webui/Dockerfile index 0327b68ef712fbd4d89776ed348cbc00171153c9..d6e21dd1730725fad565c29a130d976248cda8cf 100644 --- a/src/webui/Dockerfile +++ b/src/webui/Dockerfile @@ -88,6 +88,8 @@ COPY --chown=webui:webui src/qkd_app/__init__.py qkd_app/__init__.py COPY --chown=webui:webui src/qkd_app/client/. qkd_app/client/ COPY --chown=webui:webui src/bgpls_speaker/__init__.py bgpls_speaker/__init__.py COPY --chown=webui:webui src/bgpls_speaker/client/. bgpls_speaker/client/ +COPY --chown=webui:webui src/vnt_manager/__init__.py vnt_manager/__init__.py +COPY --chown=webui:webui src/vnt_manager/client/. vnt_manager/client/ COPY --chown=webui:webui src/webui/. webui/ # Start the service diff --git a/src/webui/service/device/routes.py b/src/webui/service/device/routes.py index 16b86c769c5af1b69db4b669f5689043d03536bd..977f0ec4bd2be791a2379492773142b3d0dd8599 100644 --- a/src/webui/service/device/routes.py +++ b/src/webui/service/device/routes.py @@ -184,20 +184,21 @@ def logical(device_uuid: str): @device.get('<path:device_uuid>/delete') def delete(device_uuid): try: + context_client.connect() + + device_obj = get_device( + context_client, device_uuid, rw_copy=False, include_components=False, + include_config_rules=False, include_endpoints=False + ) + if device_obj is None: + flash('Device({:s}) not found'.format(str(device_uuid)), 'danger') + else: + device_client.connect() + device_client.DeleteDevice(device_obj.device_id) + device_client.close() + flash(f'Device "{device_uuid}" deleted successfully!', 'success') - # first, check if device exists! - # request: DeviceId = DeviceId() - # request.device_uuid.uuid = device_uuid - # response: Device = client.GetDevice(request) - # TODO: finalize implementation - - request = DeviceId() - request.device_uuid.uuid = device_uuid # pylint: disable=no-member - device_client.connect() - device_client.DeleteDevice(request) - device_client.close() - - flash(f'Device "{device_uuid}" deleted successfully!', 'success') + context_client.close() except Exception as e: # pylint: disable=broad-except flash(f'Problem deleting device "{device_uuid}": {e.details()}', 'danger') current_app.logger.exception(e) diff --git a/src/webui/service/link/routes.py b/src/webui/service/link/routes.py index 42f5984a3c0957a0740690ad6e37bed7438449b7..34b2af27ff3015bef72262e4a517c9c96dbc6093 100644 --- a/src/webui/service/link/routes.py +++ b/src/webui/service/link/routes.py @@ -21,10 +21,12 @@ from common.tools.context_queries.EndPoint import get_endpoint_names from common.tools.context_queries.Link import get_link from common.tools.context_queries.Topology import get_topology from context.client.ContextClient import ContextClient +from vnt_manager.client.VNTManagerClient import VNTManagerClient link = Blueprint('link', __name__, url_prefix='/link') context_client = ContextClient() +vntm_client = VNTManagerClient() @link.get('/') def home(): @@ -77,20 +79,20 @@ def detail(link_uuid: str): @link.get('<path:link_uuid>/delete') def delete(link_uuid): try: + context_client.connect() - # first, check if link exists! - # request: LinkId = LinkId() - # request.link_uuid.uuid = link_uuid - # response: Link = client.GetLink(request) - # TODO: finalize implementation + link_obj = get_link(context_client, link_uuid, rw_copy=False) + if link_obj is None: + flash('Link({:s}) not found'.format(str(link_uuid)), 'danger') + else: + link_type = link_obj.link_type + if link_type == LinkTypeEnum.LINKTYPE_VIRTUAL: + vntm_client.RemoveVirtualLink(link_obj.link_id) + else: + context_client.RemoveLink(link_obj.link_id) + flash(f'Link "{link_uuid}" deleted successfully!', 'success') - request = LinkId() - request.link_uuid.uuid = link_uuid # pylint: disable=no-member - context_client.connect() - context_client.RemoveLink(request) context_client.close() - - flash(f'Link "{link_uuid}" deleted successfully!', 'success') except Exception as e: # pylint: disable=broad-except flash(f'Problem deleting link "{link_uuid}": {e.details()}', 'danger') current_app.logger.exception(e)