diff --git a/deploy/new_monitoring.sh b/deploy/monitoring.sh similarity index 90% rename from deploy/new_monitoring.sh rename to deploy/monitoring.sh index ac1f46723af29758b05307d1717d958249938d71..6fa633a378d133d258b96a4b41dbd7de833690d9 100755 --- a/deploy/new_monitoring.sh +++ b/deploy/monitoring.sh @@ -33,11 +33,11 @@ VALUES_FILE_PROM="$VALUES_FILE_PATH/prometheus_values.yaml" # ----------------------------------------------------------- # Mimir Configuration # ----------------------------------------------------------- -RELEASE_NAME_MIMIR="mon-mimir" -CHART_REPO_NAME_MIMIR="grafana" -CHART_REPO_URL_MIMIR="https://grafana.github.io/helm-charts" -CHART_NAME_MIMIR="mimir-distributed" -VALUES_FILE_MIMIR="$VALUES_FILE_PATH/mimir_values.yaml" +# RELEASE_NAME_MIMIR="mon-mimir" +# CHART_REPO_NAME_MIMIR="grafana" +# CHART_REPO_URL_MIMIR="https://grafana.github.io/helm-charts" +# CHART_NAME_MIMIR="mimir-distributed" +# VALUES_FILE_MIMIR="$VALUES_FILE_PATH/mimir_values.yaml" # ----------------------------------------------------------- # Grafana Configuration @@ -105,12 +105,12 @@ kubectl rollout status deployment/"$RELEASE_NAME_PROM-server" -n "$NAMESPACE" || # 2) Deploy Mimir -deploy_chart "$RELEASE_NAME_MIMIR" \ - "$CHART_REPO_NAME_MIMIR" \ - "$CHART_REPO_URL_MIMIR" \ - "$CHART_NAME_MIMIR" \ - "$VALUES_FILE_MIMIR" \ - "$NAMESPACE" +# deploy_chart "$RELEASE_NAME_MIMIR" \ +# "$CHART_REPO_NAME_MIMIR" \ +# "$CHART_REPO_URL_MIMIR" \ +# "$CHART_NAME_MIMIR" \ +# "$VALUES_FILE_MIMIR" \ +# "$NAMESPACE" # Depending on how Mimir runs (StatefulSets, Deployments), you can wait for # the correct resource to be ready. For example: diff --git a/manifests/contextservice.yaml b/manifests/contextservice.yaml index 568c3a51dfe0b6908c0055ef2d44c2d183e166a8..a1cf1e99d6b142cdc556c4493cc0d7bdcc933b32 100644 --- a/manifests/contextservice.yaml +++ b/manifests/contextservice.yaml @@ -55,9 +55,15 @@ spec: readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:1010"] + initialDelaySeconds: 50 # Context's gunicorn takes 30~40 seconds to bootstrap + periodSeconds: 10 + failureThreshold: 10 livenessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:1010"] + initialDelaySeconds: 50 # Context's gunicorn takes 30~40 seconds to bootstrap + periodSeconds: 10 + failureThreshold: 10 resources: requests: cpu: 250m diff --git a/manifests/nbiservice.yaml b/manifests/nbiservice.yaml index 55accdc44dd6ec34ac379d2e09bb0df77406e3fa..2f73fc5d5fb0a6f89755188d296e532f354feb1c 100644 --- a/manifests/nbiservice.yaml +++ b/manifests/nbiservice.yaml @@ -49,16 +49,22 @@ spec: readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:9090"] + initialDelaySeconds: 50 # NBI's gunicorn takes 30~40 seconds to bootstrap + periodSeconds: 10 + failureThreshold: 10 livenessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:9090"] + initialDelaySeconds: 50 # NBI's gunicorn takes 30~40 seconds to bootstrap + periodSeconds: 10 + failureThreshold: 10 resources: requests: - cpu: 50m + cpu: 128m memory: 64Mi limits: cpu: 500m - memory: 512Mi + memory: 1Gi --- apiVersion: v1 kind: Service diff --git a/src/telemetry/backend/Config.py b/src/telemetry/backend/Config.py new file mode 100644 index 0000000000000000000000000000000000000000..73c37610dcc7ba6e76760b567f699a8be575b3e3 --- /dev/null +++ b/src/telemetry/backend/Config.py @@ -0,0 +1,20 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import os + +TRUE_VALUES = {'T', 'TRUE', 'YES', '1'} +DEVICE_EMULATED_ONLY = os.environ.get('DEVICE_EMULATED_ONLY') +LOAD_ALL_DEVICE_DRIVERS = (DEVICE_EMULATED_ONLY is None) or (DEVICE_EMULATED_ONLY.upper() not in TRUE_VALUES) diff --git a/src/telemetry/backend/Tools.py b/src/telemetry/backend/Tools.py new file mode 100644 index 0000000000000000000000000000000000000000..1b047834cbe7a9586dc1c6f1c4b181db4a06b9c0 --- /dev/null +++ b/src/telemetry/backend/Tools.py @@ -0,0 +1,28 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Dict +from common.proto.context_pb2 import DeviceConfig, ConfigActionEnum + + +def get_connect_rules(device_config : DeviceConfig) -> Dict[str, Any]: + connect_rules = dict() + for config_rule in device_config.config_rules: + if config_rule.action != ConfigActionEnum.CONFIGACTION_SET: continue + if config_rule.WhichOneof('config_rule') != 'custom': continue + if not config_rule.custom.resource_key.startswith('_connect/'): continue + connect_attribute = config_rule.custom.resource_key.replace('_connect/', '') + connect_rules[connect_attribute] = config_rule.custom.resource_value + return connect_rules + diff --git a/src/telemetry/backend/requirements.in b/src/telemetry/backend/requirements.in index 603c6e4b58629fbeb241f316d5e3b564e38a0462..36b42426d3bf06e47f5576b15bdecde2d83c0b91 100644 --- a/src/telemetry/backend/requirements.in +++ b/src/telemetry/backend/requirements.in @@ -17,4 +17,10 @@ confluent-kafka==2.3.* libyang==2.8.4 numpy==2.0.1 APScheduler==3.10.1 +deepdiff==6.7.* pygnmi==0.8.14 +ipaddress +macaddress +p4runtime==1.3.0 +googleapis-common-protos==1.69.1 +tabulate diff --git a/src/telemetry/backend/service/TelemetryBackendService.py b/src/telemetry/backend/service/TelemetryBackendService.py index 3aeee8238d3fa47d511f1b520d44bf0712fe10e7..f3a18e4f4fd078c10196eb80e6a17256ac2ad03a 100755 --- a/src/telemetry/backend/service/TelemetryBackendService.py +++ b/src/telemetry/backend/service/TelemetryBackendService.py @@ -21,6 +21,7 @@ from datetime import datetime, timezone from confluent_kafka import Producer as KafkaProducer from confluent_kafka import Consumer as KafkaConsumer from confluent_kafka import KafkaError + from common.Constants import ServiceNameEnum from common.Settings import get_service_port_grpc from common.method_wrappers.Decorator import MetricsPool @@ -29,9 +30,11 @@ from common.tools.service.GenericGrpcService import GenericGrpcService from common.tools.context_queries.Device import get_device from common.proto.kpi_manager_pb2 import KpiId +from .collector_api._Collector import _Collector +from .collector_api.DriverInstanceCache import DriverInstanceCache, get_driver from kpi_manager.client.KpiManagerClient import KpiManagerClient from context.client.ContextClient import ContextClient -from telemetry.backend.collectors.emulated.EmulatedCollector import EmulatedCollector +from telemetry.backend.service.collectors.emulated.EmulatedCollector import EmulatedCollector LOGGER = logging.getLogger(__name__) METRICS_POOL = MetricsPool('TelemetryBackend', 'backendService') @@ -41,7 +44,7 @@ class TelemetryBackendService(GenericGrpcService): Class listens for request on Kafka topic, fetches requested metrics from device. Produces metrics on both TELEMETRY_RESPONSE and VALUE kafka topics. """ - def __init__(self, cls_name : str = __name__) -> None: + def __init__(self, driver_instance_cache : DriverInstanceCache, cls_name : str = __name__) -> None: LOGGER.info('Init TelemetryBackendService') port = get_service_port_grpc(ServiceNameEnum.TELEMETRYBACKEND) super().__init__(port, cls_name=cls_name) @@ -49,9 +52,10 @@ class TelemetryBackendService(GenericGrpcService): self.kafka_consumer = KafkaConsumer({'bootstrap.servers' : KafkaConfig.get_kafka_address(), 'group.id' : 'backend', 'auto.offset.reset' : 'latest'}) - self.collector = None - self.context_client = ContextClient() - self.kpi_manager_client = KpiManagerClient() + self.driver_instance_cache = driver_instance_cache + self.collector = None + self.context_client = ContextClient() + self.kpi_manager_client = KpiManagerClient() self.active_jobs = {} def install_servicers(self): @@ -119,26 +123,31 @@ class TelemetryBackendService(GenericGrpcService): except Exception as e: LOGGER.warning("Unable to consumer message from topic: {:}. ERROR: {:}".format(KafkaTopic.TELEMETRY_REQUEST.value, e)) - def CollectorHandler(self, collector_id, kpi_id, duration, interval, stop_event): + def GenericCollectorHandler(self, collector_id, kpi_id, duration, interval, stop_event ): """ Method to handle collector request. """ - device_type, end_points = self.get_endpoint_detail(kpi_id) - - if end_points is None: - LOGGER.warning("KPI ID: {:} - Endpoints not found. Skipping...".format(kpi_id)) - return - + device_type, collector, resource_key = self.get_subscription_detail_by_kpi_id(kpi_id) if device_type and "emu" in device_type: - LOGGER.info("KPI ID: {:} - Device Type: {:} - Endpoints: {:}".format(kpi_id, device_type, end_points)) - subscription = [collector_id, end_points, duration, interval] + LOGGER.info("KPI ID: {:} - Device Type: {:} - Resource Key: {:}".format(kpi_id, device_type, resource_key)) + endpoints = self.get_endpoint_detail_by_kpi_id(kpi_id) + subscription = [collector_id, endpoints , duration, interval] self.EmulatedCollectorHandler(subscription, duration, collector_id, kpi_id, stop_event) else: - LOGGER.warning("KPI ID: {:} - Device Type: {:} - Not Supported".format(kpi_id, device_type)) + if collector: + self.collector = collector + self.collector.Connect() + resources_to_subscribe = [(resource_key, duration, interval)] + self.collector.SubscribeState(resources_to_subscribe) + while not stop_event.is_set(): + samples = list(self.collector.GetState(duration=duration, blocking=True)) + LOGGER.info("KPI: {:} - Value: {:}".format(kpi_id, samples)) + self.GenerateKpiValue(collector_id, kpi_id, samples) + time.sleep(1) + self.collector.Disconnect() def EmulatedCollectorHandler(self, subscription, duration, collector_id, kpi_id, stop_event): # EmulatedCollector - self.collector = EmulatedCollector(address="127.0.0.1", port=8000) self.collector.Connect() if not self.collector.SubscribeState(subscription): @@ -150,7 +159,6 @@ class TelemetryBackendService(GenericGrpcService): self.GenerateKpiValue(collector_id, kpi_id, samples) time.sleep(1) self.collector.Disconnect() - # self.TerminateCollector(collector_id) # No need to terminate, automatically terminated after duration. def GenerateKpiValue(self, collector_id: str, kpi_id: str, measured_kpi_value: Any): """ @@ -191,7 +199,36 @@ class TelemetryBackendService(GenericGrpcService): except: LOGGER.exception("Error terminating job: {:}".format(job_id)) - def get_endpoint_detail(self, kpi_id: str): + def get_subscription_detail_by_kpi_id(self, kpi_id: str): + """ + Method to get device_type and endpoint detail based on device_uuid. + """ + kpi_id_obj = KpiId() + kpi_id_obj.kpi_id.uuid = kpi_id + kpi_descriptor = self.kpi_manager_client.GetKpiDescriptor(kpi_id_obj) + if not kpi_descriptor: + LOGGER.warning(f"KPI ID: {kpi_id} - Descriptor not found. Skipping...") + return (None, None, None) + + device_uuid = kpi_descriptor.device_id.device_uuid.uuid + endpoint_uuid = kpi_descriptor.endpoint_id.endpoint_uuid.uuid + kpi_sample_type = kpi_descriptor.kpi_sample_type + + device = get_device( context_client = self.context_client, + device_uuid = device_uuid, + include_config_rules = False, + include_components = False, + ) + # Getting device collector (testing) + collector : _Collector = get_driver(self.driver_instance_cache, device) + # if device: + # monitoring_loops = MonitoringLoops() + # resource_key = monitoring_loops.get_resource_key(device_uuid, endpoint_uuid, kpi_sample_type) + # return (device.device_type, collector, resource_key) + LOGGER.warning(f"Device not found: {device_uuid}") + return (None, None, None) + + def get_endpoint_detail_by_kpi_id(self, kpi_id: str): """ Method to get device_type and endpoint detail based on device_uuid. """ @@ -200,7 +237,7 @@ class TelemetryBackendService(GenericGrpcService): kpi_descriptor = self.kpi_manager_client.GetKpiDescriptor(kpi_id_obj) if not kpi_descriptor: LOGGER.warning(f"KPI ID: {kpi_id} - Descriptor not found. Skipping...") - return (None, None) + return None device_id = kpi_descriptor.device_id.device_uuid.uuid endpoint_id = kpi_descriptor.endpoint_id.endpoint_uuid.uuid @@ -220,11 +257,9 @@ class TelemetryBackendService(GenericGrpcService): for sample_type in endpoint.kpi_sample_types: kpi_sample_types.append(sample_type) endpoint_dict["sample_types"] = kpi_sample_types - - return (device.device_type, endpoint_dict) - + return endpoint_dict LOGGER.warning(f"Device ID: {device_id} - Endpoint ID: {endpoint_id} - Not Found") - return (None, None) + return None def delivery_callback(self, err, msg): if err: diff --git a/src/telemetry/backend/service/__main__.py b/src/telemetry/backend/service/__main__.py index 6e77d5d6cc7e31792d737bca04fb1af0e7baa2bb..efcd3e1d4e4a9a97f85a9612dd33a6582cf1000e 100644 --- a/src/telemetry/backend/service/__main__.py +++ b/src/telemetry/backend/service/__main__.py @@ -13,13 +13,18 @@ # limitations under the License. import logging, signal, sys, threading -from prometheus_client import start_http_server +from prometheus_client.exposition import start_http_server from common.Settings import get_log_level, get_metrics_port -from .TelemetryBackendService import TelemetryBackendService +from common.Constants import ServiceNameEnum from common.tools.kafka.Variables import KafkaTopic +from .TelemetryBackendService import TelemetryBackendService + +from .collector_api.DriverFactory import DriverFactory +from .collector_api.DriverInstanceCache import DriverInstanceCache, preload_drivers +from .collectors import DRIVERS terminate = threading.Event() -LOGGER = None +LOGGER : logging.Logger = None def signal_handler(signal, frame): # pylint: disable=redefined-outer-name LOGGER.warning('Terminate signal received') @@ -43,9 +48,17 @@ def main(): metrics_port = get_metrics_port() start_http_server(metrics_port) - grpc_service = TelemetryBackendService() + # Initialize Driver framework + driver_factory = DriverFactory(DRIVERS) + driver_instance_cache = DriverInstanceCache(driver_factory) + + grpc_service = TelemetryBackendService(driver_instance_cache) grpc_service.start() + # Preload drivers + LOGGER.info('Preloading drivers...') + preload_drivers(driver_instance_cache) + # Wait for Ctrl+C or termination signal while not terminate.wait(timeout=1.0): pass diff --git a/src/telemetry/backend/service/collectors/__init__.py b/src/telemetry/backend/service/collectors/__init__.py index ab707f5cad9730bff151efbb2b5f6a66feea932c..450d508187b81369b86618cbe538ff1344e7ed47 100644 --- a/src/telemetry/backend/service/collectors/__init__.py +++ b/src/telemetry/backend/service/collectors/__init__.py @@ -46,13 +46,13 @@ if LOAD_ALL_DEVICE_DRIVERS: } ])) -if LOAD_ALL_DEVICE_DRIVERS: - from .p4.p4_collector import P4Collector # pylint: disable=wrong-import-position - DRIVERS.append( - (P4Collector, [ - { - # Real P4 Switch, specifying P4 Collector => use P4Collector - FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.P4_SWITCH, - FilterFieldEnum.DRIVER : DeviceDriverEnum.DEVICEDRIVER_P4, - } - ])) +# if LOAD_ALL_DEVICE_DRIVERS: +# from .p4.p4_collector import P4Collector # pylint: disable=wrong-import-position +# DRIVERS.append( +# (P4Collector, [ +# { +# # Real P4 Switch, specifying P4 Collector => use P4Collector +# FilterFieldEnum.DEVICE_TYPE: DeviceTypeEnum.P4_SWITCH, +# FilterFieldEnum.DRIVER : DeviceDriverEnum.DEVICEDRIVER_P4, +# } +# ])) diff --git a/src/telemetry/backend/service/collectors/gnmi_openconfig/GnmiSessionHandler.py b/src/telemetry/backend/service/collectors/gnmi_openconfig/GnmiSessionHandler.py index 825462cfbebce444cd8114e9ac5ffbbf4c0f9de6..c8dbfbc5f9a59b8cb0ae51855ff24a90dbf6dd76 100644 --- a/src/telemetry/backend/service/collectors/gnmi_openconfig/GnmiSessionHandler.py +++ b/src/telemetry/backend/service/collectors/gnmi_openconfig/GnmiSessionHandler.py @@ -38,11 +38,11 @@ class GnmiSessionHandler: self._password = settings.get('password') self._use_tls = settings.get('use_tls', False) self._channel : Optional[grpc.Channel] = None - self._stub : Optional[gNMIStub] = None - self._yang_handler = YangHandler() - self._subscriptions = Subscriptions() + self._stub : Optional[gNMIStub] = None + self._yang_handler = YangHandler() + self._subscriptions = Subscriptions() self._in_subscriptions = queue.Queue() - self._out_samples = queue.Queue() + self._out_samples = queue.Queue() def __del__(self) -> None: self._logger.info('Destroying YangValidator...') diff --git a/src/telemetry/backend/tests/Fixtures.py b/src/telemetry/backend/tests/Fixtures.py index 59f1b761ca40caf2013471c7f6fdbaa781759a0a..c9704fbcf0fd05936b0c076aaad7bcdada3827e2 100644 --- a/src/telemetry/backend/tests/Fixtures.py +++ b/src/telemetry/backend/tests/Fixtures.py @@ -27,32 +27,37 @@ LOGGER.setLevel(logging.DEBUG) @pytest.fixture(scope='session') def context_client(): - _client = ContextClient(host="10.152.183.234") + _client = ContextClient(host="10.152.183.180") _client.connect() LOGGER.info('Yielding Connected ContextClient...') yield _client + LOGGER.info('Closing ContextClient...') _client.close() @pytest.fixture(scope='session') def device_client(): - _client = DeviceClient(host="10.152.183.95") + _client = DeviceClient(host="10.152.183.212") _client.connect() LOGGER.info('Yielding Connected DeviceClient...') yield _client + LOGGER.info('Closing DeviceClient...') _client.close() @pytest.fixture(scope='session') def service_client(): - _client = ServiceClient(host="10.152.183.47") + _client = ServiceClient(host="10.152.183.98") _client.connect() - LOGGER.info('Yielding Connected DeviceClient...') + LOGGER.info('Yielding Connected ServiceClient...') yield _client + LOGGER.info('Closing ServiceClient...') _client.close() @pytest.fixture(scope='session') def kpi_manager_client(): - _client = KpiManagerClient(host="10.152.183.118") + _client = KpiManagerClient(host="10.152.183.108") + _client.connect() LOGGER.info('Yielding Connected KpiManagerClient...') yield _client - _client.close() LOGGER.info('Closed KpiManagerClient...') + _client.close() + diff --git a/src/telemetry/backend/tests/gnmi_openconfig/__init__.py b/src/telemetry/backend/tests/gnmi_openconfig/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..53d5157f750bfb085125cbd33faff1cec5924e14 --- /dev/null +++ b/src/telemetry/backend/tests/gnmi_openconfig/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/telemetry/backend/tests/gnmi_openconfig/messages_gnmi-openconfig.py b/src/telemetry/backend/tests/gnmi_openconfig/messages_gnmi-openconfig.py new file mode 100644 index 0000000000000000000000000000000000000000..99ea4bfbb96099620b296bc46f0f0660170e0553 --- /dev/null +++ b/src/telemetry/backend/tests/gnmi_openconfig/messages_gnmi-openconfig.py @@ -0,0 +1,63 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import uuid +from common.proto import kpi_manager_pb2 +from common.proto.kpi_sample_types_pb2 import KpiSampleType + +def _create_kpi_pkt_recevied( + descriptor_name : str = "Test_kpi_packet_received", + sample_type : KpiSampleType = KpiSampleType.KPISAMPLETYPE_PACKETS_RECEIVED, # type: ignore + device_uuid : uuid.UUID = uuid.UUID("a8695f53-ba2e-57bd-b586-edf2b5e054b1"), + endpoint_uuid : uuid.UUID = uuid.UUID("c51c2784-0377-5851-993b-46804c89cfc9") + ): + _create_kpi_descriptor( + descriptor_name = descriptor_name, + sample_type = sample_type, + device_uuid = device_uuid, + endpoint_uuid = endpoint_uuid + ) + +def _create_kpi_pkt_transmitted( + descriptor_name : str = "Test_kpi_packet_received", + sample_type : KpiSampleType = KpiSampleType.KPISAMPLETYPE_PACKETS_TRANSMITTED, # type: ignore + device_uuid : uuid.UUID = uuid.UUID("a8695f53-ba2e-57bd-b586-edf2b5e054b1"), + endpoint_uuid : uuid.UUID = uuid.UUID("c51c2784-0377-5851-993b-46804c89cfc9") + ): + _create_kpi_descriptor( + descriptor_name = descriptor_name, + sample_type = sample_type, + device_uuid = device_uuid, + endpoint_uuid = endpoint_uuid + ) + +def _create_kpi_descriptor( + descriptor_name : str, + sample_type : KpiSampleType, # type: ignore + device_uuid : uuid.UUID, + endpoint_uuid : uuid.UUID + ): + _create_kpi_request = kpi_manager_pb2.KpiDescriptor() + _create_kpi_request.kpi_id.kpi_id.uuid = str(uuid.uuid4()) + # _create_kpi_request.kpi_id.kpi_id.uuid = "6e22f180-ba28-4641-b190-2287bf448888" + # _create_kpi_request.kpi_id.kpi_id.uuid = "f974b6cc-095f-4767-b8c1-3457b383fb99" + _create_kpi_request.kpi_description = descriptor_name + _create_kpi_request.kpi_sample_type = sample_type + _create_kpi_request.device_id.device_uuid.uuid = device_uuid + _create_kpi_request.service_id.service_uuid.uuid = 'SERVICE_ID_2' + _create_kpi_request.slice_id.slice_uuid.uuid = 'SLICE_1' + _create_kpi_request.endpoint_id.endpoint_uuid.uuid = endpoint_uuid + _create_kpi_request.connection_id.connection_uuid.uuid = 'CONN_3' + _create_kpi_request.link_id.link_uuid.uuid = 'LINK_5' + return _create_kpi_request diff --git a/src/telemetry/backend/tests/gnmi_openconfig/storage/Storage.py b/src/telemetry/backend/tests/gnmi_openconfig/storage/Storage.py new file mode 100644 index 0000000000000000000000000000000000000000..e42e81522483ce9c3ab5fc1b627c867529562dcd --- /dev/null +++ b/src/telemetry/backend/tests/gnmi_openconfig/storage/Storage.py @@ -0,0 +1,23 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .StorageEndpoints import StorageEndpoints +from .StorageInterface import StorageInterface +from .StorageNetworkInstance import StorageNetworkInstance + +class Storage: + def __init__(self) -> None: + self.endpoints = StorageEndpoints() + self.interfaces = StorageInterface() + self.network_instances = StorageNetworkInstance() diff --git a/src/telemetry/backend/tests/gnmi_openconfig/storage/StorageEndpoints.py b/src/telemetry/backend/tests/gnmi_openconfig/storage/StorageEndpoints.py new file mode 100644 index 0000000000000000000000000000000000000000..ec8445a4d1169e6ebffa67d62e72c3fe5c5268fb --- /dev/null +++ b/src/telemetry/backend/tests/gnmi_openconfig/storage/StorageEndpoints.py @@ -0,0 +1,75 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +from typing import Dict, List, Tuple +from .Tools import compose_resources + +RE_RESKEY_ENDPOINT = re.compile(r'^\/endpoints\/endpoint\[([^\]]+)\]$') + +ENDPOINT_PACKET_SAMPLE_TYPES : Dict[int, str] = { + 101: '/openconfig-interfaces:interfaces/interface[name={:s}]/state/counters/out-pkts', + 102: '/openconfig-interfaces:interfaces/interface[name={:s}]/state/counters/in-pkts', + 201: '/openconfig-interfaces:interfaces/interface[name={:s}]/state/counters/out-octets', + 202: '/openconfig-interfaces:interfaces/interface[name={:s}]/state/counters/in-octets', +} + +class Endpoints: + STRUCT : List[Tuple[str, List[str]]] = [ + ('/endpoints/endpoint[{:s}]', ['uuid', 'type', 'sample_types']), + ] + + def __init__(self) -> None: + self._items : Dict[str, Dict] = dict() + + def add(self, ep_uuid : str, resource_value : Dict) -> None: + item = self._items.setdefault(ep_uuid, dict()) + item['uuid'] = ep_uuid + + for _, field_names in Endpoints.STRUCT: + field_names = set(field_names) + item.update({k:v for k,v in resource_value.items() if k in field_names}) + + item['sample_types'] = { + sample_type_id : sample_type_path.format(ep_uuid) + for sample_type_id, sample_type_path in ENDPOINT_PACKET_SAMPLE_TYPES.items() + } + + def get(self, ep_uuid : str) -> Dict: + return self._items.get(ep_uuid) + + def remove(self, ep_uuid : str) -> None: + self._items.pop(ep_uuid, None) + + def compose_resources(self) -> List[Dict]: + return compose_resources(self._items, Endpoints.STRUCT) + +class StorageEndpoints: + def __init__(self) -> None: + self.endpoints = Endpoints() + + def populate(self, resources : List[Tuple[str, Dict]]) -> None: + for resource_key, resource_value in resources: + match = RE_RESKEY_ENDPOINT.match(resource_key) + if match is not None: + self.endpoints.add(match.group(1), resource_value) + continue + + MSG = 'Unhandled Resource Key: {:s} => {:s}' + raise Exception(MSG.format(str(resource_key), str(resource_value))) + + def get_expected_config(self) -> List[Tuple[str, Dict]]: + expected_config = list() + expected_config.extend(self.endpoints.compose_resources()) + return expected_config diff --git a/src/telemetry/backend/tests/gnmi_openconfig/storage/StorageInterface copy.py b/src/telemetry/backend/tests/gnmi_openconfig/storage/StorageInterface copy.py new file mode 100644 index 0000000000000000000000000000000000000000..3385ccfa85edaddaf70803c235c1116c0ec3b93e --- /dev/null +++ b/src/telemetry/backend/tests/gnmi_openconfig/storage/StorageInterface copy.py @@ -0,0 +1,134 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +from typing import Dict, List, Tuple +from .Tools import compose_resources + +PREFIX = r'^\/interface\[([^\]]+)\]' +RE_RESKEY_INTERFACE = re.compile(PREFIX + r'$') +RE_RESKEY_ETHERNET = re.compile(PREFIX + r'\/ethernet$') +RE_RESKEY_SUBINTERFACE = re.compile(PREFIX + r'\/subinterface\[([^\]]+)\]$') +#RE_RESKEY_IPV4_ADDRESS = re.compile(PREFIX + r'\/subinterface\[([^\]]+)\]\/ipv4\[([^\]]+)\]$') + +class Interfaces: + STRUCT : List[Tuple[str, List[str]]] = [ + ('/interface[{:s}]', ['name', 'type', 'admin-status', 'oper-status', 'management', 'mtu', 'ifindex', + 'hardware-port', 'transceiver']), + ('/interface[{:s}]/ethernet', ['port-speed', 'negotiated-port-speed', 'mac-address', 'hw-mac-address']), + ] + + def __init__(self) -> None: + self._items : Dict[str, Dict] = dict() + + def add(self, if_name : str, resource_value : Dict) -> None: + item = self._items.setdefault(if_name, dict()) + item['name'] = if_name + for _, field_names in Interfaces.STRUCT: + field_names = set(field_names) + item.update({k:v for k,v in resource_value.items() if k in field_names}) + + def get(self, if_name : str) -> Dict: + return self._items.get(if_name) + + def remove(self, if_name : str) -> None: + self._items.pop(if_name, None) + + def compose_resources(self) -> List[Dict]: + return compose_resources(self._items, Interfaces.STRUCT) + +class SubInterfaces: + STRUCT : List[Tuple[str, List[str]]] = [ + ('/interface[{:s}]/subinterface[{:d}]', ['index']), + ] + + def __init__(self) -> None: + self._items : Dict[Tuple[str, int], Dict] = dict() + + def add(self, if_name : str, subif_index : int) -> None: + item = self._items.setdefault((if_name, subif_index), dict()) + item['index'] = subif_index + + def get(self, if_name : str, subif_index : int) -> Dict: + return self._items.get((if_name, subif_index)) + + def remove(self, if_name : str, subif_index : int) -> None: + self._items.pop((if_name, subif_index), None) + + def compose_resources(self) -> List[Dict]: + return compose_resources(self._items, SubInterfaces.STRUCT) + +class IPv4Addresses: + STRUCT : List[Tuple[str, List[str]]] = [ + ('/interface[{:s}]/subinterface[{:d}]', ['index', 'address_ip', 'address_prefix', 'origin']), + ] + + def __init__(self) -> None: + self._items : Dict[Tuple[str, int], Dict] = dict() + + def add(self, if_name : str, subif_index : int, ipv4_address : str, resource_value : Dict) -> None: + item = self._items.setdefault((if_name, subif_index), dict()) + item['index' ] = subif_index + item['address_ip' ] = ipv4_address + item['origin' ] = resource_value.get('origin') + item['address_prefix'] = resource_value.get('prefix') + + def get(self, if_name : str, subif_index : int, ipv4_address : str) -> Dict: + return self._items.get((if_name, subif_index)) + + def remove(self, if_name : str, subif_index : int, ipv4_address : str) -> None: + self._items.pop((if_name, subif_index), None) + + def compose_resources(self) -> List[Dict]: + return compose_resources(self._items, IPv4Addresses.STRUCT) + +class StorageInterface: + def __init__(self) -> None: + self.interfaces = Interfaces() + self.subinterfaces = SubInterfaces() + self.ipv4_addresses = IPv4Addresses() + + def populate(self, resources : List[Tuple[str, Dict]]) -> None: + for resource_key, resource_value in resources: + match = RE_RESKEY_INTERFACE.match(resource_key) + if match is not None: + self.interfaces.add(match.group(1), resource_value) + continue + + match = RE_RESKEY_ETHERNET.match(resource_key) + if match is not None: + self.interfaces.add(match.group(1), resource_value) + continue + + match = RE_RESKEY_SUBINTERFACE.match(resource_key) + if match is not None: + self.subinterfaces.add(match.group(1), int(match.group(2))) + address_ip = resource_value.get('address_ip') + self.ipv4_addresses.add(match.group(1), int(match.group(2)), address_ip, resource_value) + continue + + #match = RE_RESKEY_IPV4_ADDRESS.match(resource_key) + #if match is not None: + # self.ipv4_addresses.add(match.group(1), int(match.group(2)), match.group(3), resource_value) + # continue + + MSG = 'Unhandled Resource Key: {:s} => {:s}' + raise Exception(MSG.format(str(resource_key), str(resource_value))) + + def get_expected_config(self) -> List[Tuple[str, Dict]]: + expected_config = list() + expected_config.extend(self.interfaces.compose_resources()) + #expected_config.extend(self.subinterfaces.compose_resources()) + expected_config.extend(self.ipv4_addresses.compose_resources()) + return expected_config diff --git a/src/telemetry/backend/tests/gnmi_openconfig/storage/StorageInterface.py b/src/telemetry/backend/tests/gnmi_openconfig/storage/StorageInterface.py new file mode 100644 index 0000000000000000000000000000000000000000..3fb9a7cc1a4834377b3a1d9c6d514f3a1b7cd893 --- /dev/null +++ b/src/telemetry/backend/tests/gnmi_openconfig/storage/StorageInterface.py @@ -0,0 +1,133 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +from typing import Dict, List, Tuple +from .Tools import compose_resources + +PREFIX = r'^\/interface\[([^\]]+)\]' +RE_RESKEY_INTERFACE = re.compile(PREFIX + r'$') +RE_RESKEY_ETHERNET = re.compile(PREFIX + r'\/ethernet$') +RE_RESKEY_SUBINTERFACE = re.compile(PREFIX + r'\/subinterface\[([^\]]+)\]$') +RE_RESKEY_IPV4_ADDRESS = re.compile(PREFIX + r'\/subinterface\[([^\]]+)\]\/ipv4\[([^\]]+)\]$') + +class Interfaces: + STRUCT : List[Tuple[str, List[str]]] = [ + ('/interface[{:s}]', ['name', 'type', 'admin-status', 'oper-status', 'management', 'mtu', 'ifindex', 'description', + 'hardware-port', 'transceiver']), # added description here + ('/interface[{:s}]/ethernet', ['port-speed', 'negotiated-port-speed', 'mac-address', 'hw-mac-address']), + ] + + def __init__(self) -> None: + self._items : Dict[str, Dict] = dict() + + def add(self, if_name : str, resource_value : Dict) -> None: + item = self._items.setdefault(if_name, dict()) + item['name'] = if_name + for _, field_names in Interfaces.STRUCT: + field_names = set(field_names) + item.update({k:v for k,v in resource_value.items() if k in field_names}) + + def get(self, if_name : str) -> Dict: + return self._items.get(if_name) + + def remove(self, if_name : str) -> None: + self._items.pop(if_name, None) + + def compose_resources(self) -> List[Dict]: + return compose_resources(self._items, Interfaces.STRUCT) + +class SubInterfaces: + STRUCT : List[Tuple[str, List[str]]] = [ + ('/interface[{:s}]/subinterface[{:d}]', ['index', 'address_ip']), # added 'address_ip' + ] + + def __init__(self) -> None: + self._items : Dict[Tuple[str, int], Dict] = dict() + + def add(self, if_name : str, subif_index : int, resource_value : Dict = None) -> None: + item = self._items.setdefault((if_name, subif_index), dict()) + item['index'] = subif_index + if resource_value and 'address_ip' in resource_value: + item['address_ip'] = resource_value['address_ip'] # added 'address_ip' + + def get(self, if_name : str, subif_index : int) -> Dict: + return self._items.get((if_name, subif_index)) + + def remove(self, if_name : str, subif_index : int) -> None: + self._items.pop((if_name, subif_index), None) + + def compose_resources(self) -> List[Dict]: + return compose_resources(self._items, SubInterfaces.STRUCT) + +class IPv4Addresses: + STRUCT : List[Tuple[str, List[str]]] = [ + ('/interface[{:s}]/subinterface[{:d}]/ipv4[{:s}]', ['ip', 'origin', 'prefix']), + ] + + def __init__(self) -> None: + self._items : Dict[Tuple[str, int, str], Dict] = dict() + + def add(self, if_name : str, subif_index : int, ipv4_address : str, resource_value : Dict) -> None: + item = self._items.setdefault((if_name, subif_index, ipv4_address), dict()) + item['ip' ] = ipv4_address + item['origin'] = resource_value.get('origin') + item['prefix'] = resource_value.get('prefix') + + def get(self, if_name : str, subif_index : int, ipv4_address : str) -> Dict: + return self._items.get((if_name, subif_index, ipv4_address)) + + def remove(self, if_name : str, subif_index : int, ipv4_address : str) -> None: + self._items.pop((if_name, subif_index, ipv4_address), None) + + def compose_resources(self) -> List[Dict]: + return compose_resources(self._items, IPv4Addresses.STRUCT) + +class StorageInterface: + def __init__(self) -> None: + self.interfaces = Interfaces() + self.subinterfaces = SubInterfaces() + self.ipv4_addresses = IPv4Addresses() + + def populate(self, resources : List[Tuple[str, Dict]]) -> None: + for resource_key, resource_value in resources: + match = RE_RESKEY_INTERFACE.match(resource_key) + if match is not None: + self.interfaces.add(match.group(1), resource_value) + continue + + match = RE_RESKEY_ETHERNET.match(resource_key) + if match is not None: + self.interfaces.add(match.group(1), resource_value) + continue + + match = RE_RESKEY_SUBINTERFACE.match(resource_key) + if match is not None: + self.subinterfaces.add(match.group(1), int(match.group(2)), resource_value) + continue + + match = RE_RESKEY_IPV4_ADDRESS.match(resource_key) + if match is not None: + self.ipv4_addresses.add(match.group(1), int(match.group(2)), match.group(3), resource_value) + continue + + MSG = 'Unhandled Resource Key: {:s} => {:s}' + raise Exception(MSG.format(str(resource_key), str(resource_value))) + + def get_expected_config(self) -> List[Tuple[str, Dict]]: + expected_config = list() + expected_config.extend(self.interfaces.compose_resources()) + expected_config.extend(self.subinterfaces.compose_resources()) + expected_config.extend(self.ipv4_addresses.compose_resources()) + return expected_config diff --git a/src/telemetry/backend/tests/gnmi_openconfig/storage/StorageNetworkInstance.py b/src/telemetry/backend/tests/gnmi_openconfig/storage/StorageNetworkInstance.py new file mode 100644 index 0000000000000000000000000000000000000000..c3f15c2126267fc10d6a108120e89d42d1855772 --- /dev/null +++ b/src/telemetry/backend/tests/gnmi_openconfig/storage/StorageNetworkInstance.py @@ -0,0 +1,219 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +from typing import Dict, List, Tuple +from .Tools import compose_resources + +PREFIX = r'^\/network\_instance\[([^\]]+)\]' +RE_RESKEY_NET_INST = re.compile(PREFIX + r'$') +RE_RESKEY_INTERFACE = re.compile(PREFIX + r'\/interface\[([^\]]+)\]$') +RE_RESKEY_PROTOCOL = re.compile(PREFIX + r'\/protocols?\[([^\]]+)\]$') +RE_RESKEY_PROTO_STATIC = re.compile(PREFIX + r'\/protocol\[([^\]]+)\]\/static\_routes\[([^\]]+)\]$') +RE_RESKEY_TABLE = re.compile(PREFIX + r'\/table\[([^\,]+)\,([^\]]+)\]$') +RE_RESKEY_VLAN = re.compile(PREFIX + r'\/vlan\[([^\]]+)\]$') + +class NetworkInstances: + STRUCT : List[Tuple[str, List[str]]] = [ + ('/network_instance[{:s}]', ['name', 'type']), + ] + + def __init__(self) -> None: + self._items : Dict[str, Dict] = dict() + + def add(self, ni_name : str, resource_value : Dict) -> None: + item = self._items.setdefault(ni_name, dict()) + item['name'] = ni_name + item['type'] = resource_value.get('type') + + def get(self, ni_name : str) -> Dict: + return self._items.get(ni_name) + + def remove(self, ni_name : str) -> None: + self._items.pop(ni_name, None) + + def compose_resources(self) -> List[Dict]: + return compose_resources(self._items, NetworkInstances.STRUCT) + +class Interfaces: + STRUCT : List[Tuple[str, List[str]]] = [ + ('/network_instance[{:s}]/interface[{:s}.{:d}]', ['name', 'id', 'if_name', 'sif_index']), + ] + + def __init__(self) -> None: + self._items : Dict[Tuple[str, str], Dict] = dict() + + def add(self, ni_name : str, if_name : str, sif_index : int) -> None: + item = self._items.setdefault((ni_name, if_name, sif_index), dict()) + item['name' ] = ni_name + item['id' ] = '{:s}.{:d}'.format(if_name, sif_index) + item['if_name' ] = if_name + item['sif_index'] = sif_index + + def get(self, ni_name : str, if_name : str, sif_index : int) -> Dict: + return self._items.get((ni_name, if_name, sif_index)) + + def remove(self, ni_name : str, if_name : str, sif_index : int) -> None: + self._items.pop((ni_name, if_name, sif_index), None) + + def compose_resources(self) -> List[Dict]: + return compose_resources(self._items, Interfaces.STRUCT) + +class Protocols: + STRUCT : List[Tuple[str, List[str]]] = [ + ('/network_instance[{:s}]/protocols[{:s}]', ['identifier', 'name', 'protocol_name']), + ] + + def __init__(self) -> None: + self._items : Dict[Tuple[str, str], Dict] = dict() + + def add(self, ni_name : str, protocol : str) -> None: + item = self._items.setdefault((ni_name, protocol), dict()) + item['identifier' ] = protocol + item['name' ] = ni_name + item['protocol_name'] = protocol + + def get(self, ni_name : str, protocol : str) -> Dict: + return self._items.get((ni_name, protocol)) + + def remove(self, ni_name : str, protocol : str) -> None: + self._items.pop((ni_name, protocol), None) + + def compose_resources(self) -> List[Dict]: + return compose_resources(self._items, Protocols.STRUCT) + +class StaticRoutes: + STRUCT : List[Tuple[str, List[str]]] = [ + ('/network_instance[{:s}]/protocol[{:s}]/static_routes[{:s}]', ['prefix', 'next_hops']), + ] + + def __init__(self) -> None: + self._items : Dict[Tuple[str, str, str], Dict] = dict() + + def add(self, ni_name : str, protocol : str, prefix : str, resource_value : Dict) -> None: + item = self._items.setdefault((ni_name, protocol, prefix), dict()) + item['prefix' ] = prefix + item['next_hops'] = resource_value.get('next_hops') + + def get(self, ni_name : str, protocol : str, prefix : str) -> Dict: + return self._items.get((ni_name, protocol, prefix)) + + def remove(self, ni_name : str, protocol : str, prefix : str) -> None: + self._items.pop((ni_name, protocol, prefix), None) + + def compose_resources(self) -> List[Dict]: + return compose_resources(self._items, StaticRoutes.STRUCT) + +class Tables: + STRUCT : List[Tuple[str, List[str]]] = [ + ('/network_instance[{:s}]/table[{:s},{:s}]', ['protocol', 'address_family']), + ] + + def __init__(self) -> None: + self._items : Dict[Tuple[str, str, str], Dict] = dict() + + def add(self, ni_name : str, protocol : str, address_family : str) -> None: + item = self._items.setdefault((ni_name, protocol, address_family), dict()) + item['protocol' ] = protocol + item['address_family'] = address_family + + def get(self, ni_name : str, protocol : str, address_family : str) -> Dict: + return self._items.get((ni_name, protocol, address_family)) + + def remove(self, ni_name : str, protocol : str, address_family : str) -> None: + self._items.pop((ni_name, protocol, address_family), None) + + def compose_resources(self) -> List[Dict]: + return compose_resources(self._items, Tables.STRUCT) + +class Vlans: + STRUCT : List[Tuple[str, List[str]]] = [ + ('/network_instance[{:s}]/vlan[{:d}]', ['vlan_id', 'name', 'members']), + ] + + def __init__(self) -> None: + self._items : Dict[Tuple[str, int], Dict] = dict() + + def add(self, ni_name : str, vlan_id : int, resource_value : Dict) -> None: + item = self._items.setdefault((ni_name, vlan_id), dict()) + item['vlan_id'] = vlan_id + item['name' ] = resource_value.get('name') + item['members'] = sorted(resource_value.get('members')) + + def get(self, ni_name : str, vlan_id : int) -> Dict: + return self._items.get((ni_name, vlan_id)) + + def remove(self, ni_name : str, vlan_id : int) -> None: + self._items.pop((ni_name, vlan_id), None) + + def compose_resources(self) -> List[Dict]: + return compose_resources(self._items, Vlans.STRUCT) + +class StorageNetworkInstance: + def __init__(self) -> None: + self.network_instances = NetworkInstances() + self.interfaces = Interfaces() + self.protocols = Protocols() + self.protocol_static = StaticRoutes() + self.tables = Tables() + self.vlans = Vlans() + + def populate(self, resources : List[Tuple[str, Dict]]) -> None: + for resource_key, resource_value in resources: + match = RE_RESKEY_NET_INST.match(resource_key) + if match is not None: + self.network_instances.add(match.group(1), resource_value) + continue + + match = RE_RESKEY_INTERFACE.match(resource_key) + if match is not None: + if_id = match.group(2) + if_id_parts = if_id.split('.') + if_name = if_id_parts[0] + sif_index = 0 if len(if_id_parts) == 1 else int(if_id_parts[1]) + self.interfaces.add(match.group(1), if_name, sif_index) + continue + + match = RE_RESKEY_PROTOCOL.match(resource_key) + if match is not None: + self.protocols.add(match.group(1), match.group(2)) + continue + + match = RE_RESKEY_PROTO_STATIC.match(resource_key) + if match is not None: + self.protocol_static.add(match.group(1), match.group(2), match.group(3), resource_value) + continue + + match = RE_RESKEY_TABLE.match(resource_key) + if match is not None: + self.tables.add(match.group(1), match.group(2), match.group(3)) + continue + + match = RE_RESKEY_VLAN.match(resource_key) + if match is not None: + self.vlans.add(match.group(1), int(match.group(2)), resource_value) + continue + + MSG = 'Unhandled Resource Key: {:s} => {:s}' + raise Exception(MSG.format(str(resource_key), str(resource_value))) + + def get_expected_config(self) -> List[Tuple[str, Dict]]: + expected_config = list() + expected_config.extend(self.network_instances.compose_resources()) + expected_config.extend(self.interfaces.compose_resources()) + expected_config.extend(self.protocols.compose_resources()) + expected_config.extend(self.protocol_static.compose_resources()) + expected_config.extend(self.tables.compose_resources()) + expected_config.extend(self.vlans.compose_resources()) + return expected_config diff --git a/src/telemetry/backend/tests/gnmi_openconfig/storage/Tools.py b/src/telemetry/backend/tests/gnmi_openconfig/storage/Tools.py new file mode 100644 index 0000000000000000000000000000000000000000..dc34a7c0d1c0f5d9a9a9df4af6207bf31f555b69 --- /dev/null +++ b/src/telemetry/backend/tests/gnmi_openconfig/storage/Tools.py @@ -0,0 +1,33 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict, List, Tuple + +def compose_resources( + storage : Dict[Tuple, Dict], config_struct : List[Tuple[str, List[str]]] +) -> List[Dict]: + expected_config = list() + + for resource_key_fields, resource_value_data in storage.items(): + for resource_key_template, resource_key_field_names in config_struct: + if isinstance(resource_key_fields, (str, int, float, bool)): resource_key_fields = (resource_key_fields,) + resource_key = resource_key_template.format(*resource_key_fields) + resource_value = { + field_name : resource_value_data[field_name] + for field_name in resource_key_field_names + if field_name in resource_value_data and resource_value_data[field_name] is not None + } + expected_config.append((resource_key, resource_value)) + + return expected_config diff --git a/src/telemetry/backend/tests/gnmi_openconfig/storage/__init__.py b/src/telemetry/backend/tests/gnmi_openconfig/storage/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..53d5157f750bfb085125cbd33faff1cec5924e14 --- /dev/null +++ b/src/telemetry/backend/tests/gnmi_openconfig/storage/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/telemetry/backend/tests/gnmi_openconfig/test_gnmi_openconfig_collector.py b/src/telemetry/backend/tests/gnmi_openconfig/test_gnmi_openconfig_collector.py new file mode 100644 index 0000000000000000000000000000000000000000..c29798ccd8354479867dc50d17928048f00bd696 --- /dev/null +++ b/src/telemetry/backend/tests/gnmi_openconfig/test_gnmi_openconfig_collector.py @@ -0,0 +1,250 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict +import time +import pytest +import logging +from ..add_devices import load_topology +from common.tools.context_queries.Device import get_device, add_device_to_topology +from ..Fixtures import context_client, device_client, service_client, kpi_manager_client +from common.proto.context_pb2 import EndPointId, DeviceId, TopologyId, ContextId , Empty +from src.telemetry.backend.service.collectors.gnmi_openconfig.GnmiOpenConfigCollector import GnmiOpenConfigCollector +from .storage.Storage import Storage +from common.proto.kpi_manager_pb2 import KpiDescriptor +from device.service.monitoring.ResourceKeyMapper import ResourceKeyMapper + +from telemetry.backend.service.collector_api._Collector import RESOURCE_ENDPOINTS, RESOURCE_INTERFACES, RESOURCE_NETWORK_INSTANCES +from .tools.manage_config import ( + check_config_endpoints, check_config_interfaces, check_config_network_instances, get_config +) +# from .messages_gnmi-openconfig import _create_kpi_pkt_recevied, _create_kpi_pkt_transmitted + + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +@pytest.fixture(autouse=True) +def log_all_methods(request): + LOGGER.info(f" >>>>> Starting test: {request.node.name} ") + yield + LOGGER.info(f" <<<<< Finished test: {request.node.name} ") + +##### Collector FIXTURE ##### + +COLLECTOR_SETTING_ADDRESS = '10.1.8.59' +# COLLECTOR_SETTING_ADDRESS = '10.1.1.86' +COLLECTOR_SETTING_PORT = 6030 +COLLECTOR_SETTING_USERNAME = 'admin' +COLLECTOR_SETTING_PASSWORD = 'admin' +COLLECTOR_SETTING_USE_TLS = False + +@pytest.fixture(scope='session') +def collector(): + _collector = GnmiOpenConfigCollector( + COLLECTOR_SETTING_ADDRESS, + COLLECTOR_SETTING_PORT, + secure = False, + username = COLLECTOR_SETTING_USERNAME, + password = COLLECTOR_SETTING_PASSWORD, + use_tls = COLLECTOR_SETTING_USE_TLS + ) + LOGGER.info("Yielding GnmiOpenConfigCollector instance ...") + _collector.Connect() + yield _collector + time.sleep(1) + _collector.Disconnect() + LOGGER.info("Terminating GnmiOpenConfigCollector instance ...") + + ##### STORAGE FIXTURE ######### + +@pytest.fixture(scope='session') +def storage(): + yield Storage() + + ##### NETWORK INSTANCE DETAILS ######### + +NETWORK_INSTANCES = [ + { + 'name': 'test-l3-svc', + 'type': 'L3VRF', + 'interfaces': [ + {'name': 'Ethernet1', 'index': 0, 'ipv4_addr': '192.168.1.1', 'ipv4_prefix': 24, 'enabled': True}, + {'name': 'Ethernet10', 'index': 0, 'ipv4_addr': '192.168.10.1', 'ipv4_prefix': 24, 'enabled': True}, + ], + 'static_routes': [ + {'prefix': '172.0.0.0/24', 'next_hop': '172.16.0.2', 'metric': 1}, + {'prefix': '172.2.0.0/24', 'next_hop': '172.16.0.3', 'metric': 1}, + ] + } +] + + ##### TEST METHODS ########### + +# ---------- GET Methods TESTS ------------------------ + +# # Working Correctly +# def test_get_endpoints( +# collector : GnmiOpenConfigCollector, # pylint: disable=redefined-outer-name +# storage : Storage # pylint: disable=redefined-outer-name +# ) -> None: +# results_getconfig = get_config(collector, [RESOURCE_ENDPOINTS]) +# storage.endpoints.populate(results_getconfig) +# check_config_endpoints(collector, storage) + +# # Working Correctly +# def test_get_interfaces( +# collector : GnmiOpenConfigCollector, # pylint: disable=redefined-outer-name +# storage : Storage, # pylint: disable=redefined-outer-name +# ) -> None: +# results_getconfig = get_config(collector, [RESOURCE_INTERFACES]) +# storage.interfaces.populate(results_getconfig) +# check_config_interfaces(collector, storage) + +# # Working Correctly +# def test_get_network_instances( +# collector : GnmiOpenConfigCollector, # pylint: disable=redefined-outer-name +# storage : Storage, # pylint: disable=redefined-outer-name +# ) -> None: +# results_getconfig = get_config(collector, [RESOURCE_NETWORK_INSTANCES]) +# storage.network_instances.populate(results_getconfig) +# check_config_network_instances(collector, storage) + +# ---------- Subscribe Methods TESTS ------------------------ + + + +# def test_subscribe_endpoint( +# collector : GnmiOpenConfigCollector # pylint: disable=redefined-outer-name +# ): +# kpi_pkt_transmitted = _create_kpi_pkt_recevied() +# kpi_kpi_received = _create_kpi_pkt_transmitted() +# assert isinstance(kpi_pkt_transmitted, KpiDescriptor) +# assert isinstance(kpi_kpi_received, KpiDescriptor) + +# device_uuid = kpi_pkt_transmitted.device_id.device_uuid.uuid +# endpoint_uuid = kpi_pkt_transmitted.endpoint_id.endpoint_uuid.uuid +# kpi_sample_type = kpi_pkt_transmitted.kpi_sample_type +# LOGGER.info(f"Device ID: {device_uuid}") +# LOGGER.info(f"Endpoint ID: {endpoint_uuid}") +# LOGGER.info(f"KPI Sample Type ID: {kpi_sample_type}") + +# resource_key = "interfaces/interface/state/counters" +# sampling_duration = 60.0 # seconds +# sampling_interval = 5.0 # seconds + +# resources_to_subscribe = [(resource_key, sampling_duration, sampling_interval)] +# LOGGER.info(f"Resource to be subscribed: {resources_to_subscribe}") +# collector.SubscribeState(resources_to_subscribe) +# LOGGER.info(f"Resource is sucessfully subscribed: {resources_to_subscribe}") + +# ----- Add Topology ----- +# def test_add_to_topology(context_client, device_client, service_client): +# load_topology(context_client, device_client) + +# ---------------------------- +# DELETE CONTEXT and TOPOLOGY - START +# ---------------------------- +# ----- List Conetxts and topology ids ----- +# def test_list_contextIds(context_client, device_client): +# context_topology = [] +# empty = Empty() +# response = context_client.ListContexts(empty) +# for context in response.contexts: +# # LOGGER.info(f"Context ID: {context.context_id.context_uuid.uuid}") +# for topology in context.topology_ids: +# LOGGER.info(f"Topology ID: {topology.topology_uuid.uuid}") +# context_topology.append((str(context.context_id.context_uuid.uuid), str(topology.topology_uuid.uuid))) +# assert response + +# # # ----- Delete Device by Id ----- +# empty = Empty() +# response = context_client.ListDeviceIds(empty) +# for device_id in response.device_ids: +# # response = get_device(context_client = context_client, device_uuid = device.device_uuid.uuid, include_config_rules = False, include_components = False) +# # if response: +# # # LOGGER.info(f"Device type: {response.device_type}") +# # for endpoint in response.device_endpoints: +# # LOGGER.info(f"Endpoint: {endpoint}") +# # # if endpoint.endpoint_id.endpoint_uuid.uuid +# response = device_client.DeleteDevice(device_id) +# assert response == empty + +# # ----- Delete Topology ----- +# for contexts in context_topology: +# _context_id, _topology_id = contexts +# LOGGER.info(f"Deleting Topology: {_topology_id} from Context: {_context_id}") +# context_id = ContextId() +# context_id.context_uuid.uuid = _context_id + +# topology_id = TopologyId() +# topology_id.topology_uuid.uuid = _topology_id +# topology_id.context_id.CopyFrom(context_id) + +# response = context_client.RemoveTopology(topology_id) +# LOGGER.info(f"Deleted Topology: {topology_id}") +# assert response + +# # ----- Delete Context ----- +# for context in context_topology: +# _context_id, _ = context + +# context_id = ContextId() +# context_id.context_uuid.uuid = _context_id + +# response = context_client.RemoveContext(context_id) +# LOGGER.info(f"Deleted Context: {context_id}") +# assert response + +# ---------------------------- +# DELETE CONTEXT and TOPOLOGY - END +# ---------------------------- + + +# # ----------------------------- +# # GET ENDPOINTS - START +# # ----------------------------- + +# # ----- Get endpoint detail using device ID ----- +# def test_get_device_details(context_client): +# empty = Empty() +# response = context_client.ListDeviceIds(empty) +# for device in response.device_ids: +# response = get_device(context_client = context_client, device_uuid = device.device_uuid.uuid, include_config_rules = False, include_components = False) +# # response = get_device(context_client = context_client, device_uuid = "1290fb71-bf15-5528-8b69-2d2fabe1fa18", include_config_rules = False, include_components = False) +# if response: +# LOGGER.info(f"Device type: {response.device_type}") +# for endpoint in response.device_endpoints: +# LOGGER.info(f"Endpoint: {endpoint}") +# if endpoint.endpoint_id.endpoint_uuid.uuid == '36571df2-bac1-5909-a27d-5f42491d2ff0': +# endpoint_dict = {} +# kpi_sample_types = [] +# # LOGGER.info(f"Endpoint: {endpoint}") +# # LOGGER.info(f"Enpoint_uuid: {endpoint.endpoint_id.endpoint_uuid.uuid}") +# endpoint_dict["uuid"] = endpoint.endpoint_id.endpoint_uuid.uuid +# # LOGGER.info(f"Enpoint_name: {endpoint.name}") +# endpoint_dict["name"] = endpoint.name +# # LOGGER.info(f"Enpoint_type: {endpoint.endpoint_type}") +# endpoint_dict["type"] = endpoint.endpoint_type +# for sample_type in endpoint.kpi_sample_types: +# # LOGGER.info(f"Enpoint_sample_types: {sample_type}") +# kpi_sample_types.append(sample_type) +# endpoint_dict["sample_types"] = kpi_sample_types +# LOGGER.info(f"Extracted endpoint dict: {endpoint_dict}") +# else: +# LOGGER.info(f"Endpoint not matched") +# LOGGER.info(f"Device Type: {type(response)}") +# assert response is not None + +# ---------------------------------------------- diff --git a/src/telemetry/backend/tests/gnmi_openconfig/tools/__init__.py b/src/telemetry/backend/tests/gnmi_openconfig/tools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..53d5157f750bfb085125cbd33faff1cec5924e14 --- /dev/null +++ b/src/telemetry/backend/tests/gnmi_openconfig/tools/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/telemetry/backend/tests/gnmi_openconfig/tools/check_updates.py b/src/telemetry/backend/tests/gnmi_openconfig/tools/check_updates.py new file mode 100644 index 0000000000000000000000000000000000000000..d3cb2ac3e50f662a0b16d356e1df60ba5644ff80 --- /dev/null +++ b/src/telemetry/backend/tests/gnmi_openconfig/tools/check_updates.py @@ -0,0 +1,22 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Iterable, List, Tuple + +def check_updates(results : Iterable[Tuple[str, bool]], format_str : str, item_ids : List[Tuple]) -> None: + results = set(results) + assert len(results) == len(item_ids) + for item_id_fields in item_ids: + if isinstance(item_id_fields, (str, int, float, bool)): item_id_fields = (item_id_fields,) + assert (format_str.format(*item_id_fields), True) in results diff --git a/src/telemetry/backend/tests/gnmi_openconfig/tools/manage_config.py b/src/telemetry/backend/tests/gnmi_openconfig/tools/manage_config.py new file mode 100644 index 0000000000000000000000000000000000000000..041a837219a3f304f40e4debdaebde6f582023aa --- /dev/null +++ b/src/telemetry/backend/tests/gnmi_openconfig/tools/manage_config.py @@ -0,0 +1,89 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy, deepdiff, logging, time +from typing import Callable, Dict, List, Tuple, Union +from telemetry.backend.service.collector_api._Collector import ( + RESOURCE_ENDPOINTS, RESOURCE_INTERFACES, RESOURCE_NETWORK_INSTANCES, + RESOURCE_ROUTING_POLICIES, RESOURCE_SERVICES +) +# from device.service.drivers.gnmi_openconfig.GnmiOpenConfigDriver import GnmiOpenConfigDriver +from src.telemetry.backend.service.collectors.gnmi_openconfig.GnmiOpenConfigCollector import GnmiOpenConfigCollector + +from telemetry.backend.tests.gnmi_openconfig.storage.Storage import Storage +from .result_config_adapters import adapt_endpoint, adapt_interface, adapt_network_instance + +LOGGER = logging.getLogger(__name__) + +def get_config(collector : GnmiOpenConfigCollector, resources_to_get : List[str]) -> List[Tuple[str, Dict]]: + LOGGER.info('[get_config] resources_to_get = {:s}'.format(str(resources_to_get))) + results_getconfig = collector.GetConfig(resources_to_get) + LOGGER.info('[get_config] results_getconfig = {:s}'.format(str(results_getconfig))) + return results_getconfig + +def check_expected_config( + collector : GnmiOpenConfigCollector, resources_to_get : List[str], expected_config : List[Dict], + func_adapt_returned_config : Callable[[Tuple[str, Dict]], Tuple[str, Dict]] = lambda x: x, + max_retries : int = 1, retry_delay : float = 0.5 +) -> List[Dict]: + LOGGER.info('expected_config = {:s}'.format(str(expected_config))) + + num_retry = 0 + return_data = None + while num_retry < max_retries: + results_getconfig = get_config(collector, resources_to_get) + return_data = copy.deepcopy(results_getconfig) + + results_getconfig = [ + func_adapt_returned_config(resource_key, resource_value) + for resource_key, resource_value in results_getconfig + ] + + diff_data = deepdiff.DeepDiff(sorted(expected_config), sorted(results_getconfig)) + num_diffs = len(diff_data) + if num_diffs == 0: break + # let the device take some time to reconfigure + time.sleep(retry_delay) + num_retry += 1 + + if num_diffs > 0: LOGGER.error('Differences[{:d}]:\n{:s}'.format(num_diffs, str(diff_data.pretty()))) + assert num_diffs == 0 + return return_data + +def check_config_endpoints( + collector : GnmiOpenConfigCollector, storage : Storage, + max_retries : int = 1, retry_delay : float = 0.5 +) -> List[Dict]: + return check_expected_config( + collector, [RESOURCE_ENDPOINTS], storage.endpoints.get_expected_config(), + adapt_endpoint, max_retries=max_retries, retry_delay=retry_delay + ) + +def check_config_interfaces( + collector : GnmiOpenConfigCollector, storage : Storage, + max_retries : int = 1, retry_delay : float = 0.5 +) -> List[Dict]: + return check_expected_config( + collector, [RESOURCE_INTERFACES], storage.interfaces.get_expected_config(), + adapt_interface, max_retries=max_retries, retry_delay=retry_delay + ) + +def check_config_network_instances( + collector : GnmiOpenConfigCollector, storage : Storage, + max_retries : int = 1, retry_delay : float = 0.5 +) -> List[Dict]: + return check_expected_config( + collector, [RESOURCE_NETWORK_INSTANCES], storage.network_instances.get_expected_config(), + adapt_network_instance, max_retries=max_retries, retry_delay=retry_delay + ) diff --git a/src/telemetry/backend/tests/gnmi_openconfig/tools/request_composers.py b/src/telemetry/backend/tests/gnmi_openconfig/tools/request_composers.py new file mode 100644 index 0000000000000000000000000000000000000000..0a58d4b24948f38d9ed79894934ba11adac5f23d --- /dev/null +++ b/src/telemetry/backend/tests/gnmi_openconfig/tools/request_composers.py @@ -0,0 +1,44 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Dict, Tuple + +def interface(if_name, sif_index, ipv4_address, ipv4_prefix, enabled) -> Tuple[str, Dict]: + str_path = '/interface[{:s}]'.format(if_name) + str_data = { + 'name': if_name, 'enabled': enabled, 'sub_if_index': sif_index, 'sub_if_enabled': enabled, + 'sub_if_ipv4_enabled': enabled, 'sub_if_ipv4_address': ipv4_address, 'sub_if_ipv4_prefix': ipv4_prefix + } + return str_path, str_data + +def network_instance(ni_name, ni_type) -> Tuple[str, Dict]: + str_path = '/network_instance[{:s}]'.format(ni_name) + str_data = { + 'name': ni_name, 'type': ni_type + } + return str_path, str_data + +def network_instance_static_route(ni_name, prefix, next_hop_index, next_hop, metric=1) -> Tuple[str, Dict]: + str_path = '/network_instance[{:s}]/static_route[{:s}]'.format(ni_name, prefix) + str_data = { + 'name': ni_name, 'prefix': prefix, 'next_hop_index': next_hop_index, 'next_hop': next_hop, 'metric': metric + } + return str_path, str_data + +def network_instance_interface(ni_name, if_name, sif_index) -> Tuple[str, Dict]: + str_path = '/network_instance[{:s}]/interface[{:s}.{:d}]'.format(ni_name, if_name, sif_index) + str_data = { + 'name': ni_name, 'if_name': if_name, 'sif_index': sif_index + } + return str_path, str_data diff --git a/src/telemetry/backend/tests/gnmi_openconfig/tools/result_config_adapters.py b/src/telemetry/backend/tests/gnmi_openconfig/tools/result_config_adapters.py new file mode 100644 index 0000000000000000000000000000000000000000..9db5382ecc7d8bf58d5f48c43e49a9ace19a1222 --- /dev/null +++ b/src/telemetry/backend/tests/gnmi_openconfig/tools/result_config_adapters.py @@ -0,0 +1,29 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +from typing import Dict, Tuple + +def adapt_endpoint(resource_key : str, resource_value : Dict) -> Tuple[str, Dict]: + return resource_key, resource_value + +def adapt_interface(resource_key : str, resource_value : Dict) -> Tuple[str, Dict]: + return resource_key, resource_value + +def adapt_network_instance(resource_key : str, resource_value : Dict) -> Tuple[str, Dict]: + match = re.match(r'^\/network\_instance\[([^\]]+)\]\/vlan\[([^\]]+)\]$', resource_key) + if match is not None: + members = resource_value.get('members') + if len(members) > 0: resource_value['members'] = sorted(members) + return resource_key, resource_value diff --git a/src/telemetry/backend/tests/test_backend.py b/src/telemetry/backend/tests/test_backend.py index 1329aa969a4fed5baa887dd12d120f41eb56c2fa..820aff8c4ca0b9d3e66a390306cffb198f39ec96 100644 --- a/src/telemetry/backend/tests/test_backend.py +++ b/src/telemetry/backend/tests/test_backend.py @@ -22,8 +22,7 @@ from .add_devices import load_topology from common.tools.context_queries.Topology import get_topology from common.Constants import DEFAULT_CONTEXT_NAME from common.tools.context_queries.Device import get_device, add_device_to_topology -# from common.tools.context_queries.EndPoint import get_endpoint_names -from .EndPoint import get_endpoint_names # modofied version of get_endpoint_names +from common.tools.context_queries.EndPoint import get_endpoint_names from common.proto.context_pb2 import EndPointId, DeviceId, TopologyId, ContextId , Empty from common.proto.kpi_manager_pb2 import KpiId diff --git a/src/telemetry/backend/tests/test_emulated.py b/src/telemetry/backend/tests/test_emulated.py index feb5b1f7f92de4016f3bcb8eff8e17b843bf0c3e..7b40d6b878e45626049e564feb5c6178711671e7 100644 --- a/src/telemetry/backend/tests/test_emulated.py +++ b/src/telemetry/backend/tests/test_emulated.py @@ -15,7 +15,7 @@ import logging import time import pytest -from telemetry.backend.collectors.emulated.EmulatedCollector import EmulatedCollector +from telemetry.backend.service.collectors.emulated.EmulatedCollector import EmulatedCollector from telemetry.backend.tests.messages_emulated import ( create_test_configuration, create_specific_config_keys, diff --git a/src/telemetry/backend/tests/test_gnmi_collector.py b/src/telemetry/backend/tests/test_gnmi_collector.py deleted file mode 100644 index 3c7a6a85a104a2b1bcd8b8d82bb469c63adfbec9..0000000000000000000000000000000000000000 --- a/src/telemetry/backend/tests/test_gnmi_collector.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pytest -import logging -from unittest.mock import patch, MagicMock -from src.telemetry.backend.collectors.gnmi_openconfig.GnmiOpenConfigCollector import GnmiOpenConfigCollector - -LOGGER = logging.getLogger(__name__) -LOGGER.setLevel(logging.DEBUG) - -@pytest.fixture(autouse=True) -def log_all_methods(request): - LOGGER.info(f" >>>>> Starting test: {request.node.name} ") - yield - LOGGER.info(f" <<<<< Finished test: {request.node.name} ") - -@pytest.fixture() -def gnmi_openconfig_driver(): - collector = GnmiOpenConfigCollector( - "192.168.1.1", - 57400, - secure = True, - username = "admin", - password = "secret", - use_tls = False - ) - LOGGER.info("Yielding GnmiOpenConfigCollector collector instance ...") - yield collector - LOGGER.info("Terminating GnmiOpenConfigCollector collector instance ...") - -# def test_init(gnmi_openconfig_driver): -# # Test initializing the driver with all parameters -# driver = gnmi_openconfig_driver -# assert driver.address == "192.168.1.1" -# assert driver.port == 57400 - - - diff --git a/src/telemetry/backend/tests/topology.json b/src/telemetry/backend/tests/topology.json deleted file mode 100644 index 6416130b924441e959fcdb7001b7c1b51df172d8..0000000000000000000000000000000000000000 --- a/src/telemetry/backend/tests/topology.json +++ /dev/null @@ -1,148 +0,0 @@ -{ - "contexts": [ - {"context_id": {"context_uuid": {"uuid": "admin"}}} - ], - "topologies": [ - {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}} - ], - "devices": [ - { - "device_id": {"device_uuid": {"uuid": "DE1"}}, "device_type": "emu-packet-router", "device_drivers": [0], - "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [ - {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, - {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, - {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ - {"sample_types": [101, 102], "type": "copper/internal", "uuid": "1/1"}, - {"sample_types": [103, 102], "type": "copper/internal", "uuid": "1/2"}, - {"sample_types": [201, 202], "type": "copper/internal", "uuid": "2/1"}, - {"sample_types": [202, 203], "type": "copper/internal", "uuid": "2/2"}, - {"sample_types": [201, 203], "type": "copper/internal", "uuid": "2/3"}, - {"sample_types": [101, 103], "type": "copper/internal", "uuid": "2/4"} - ]}}} - ]} - }, - { - "device_id": {"device_uuid": {"uuid": "DE2"}}, "device_type": "emu-packet-router", "device_drivers": [0], - "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [ - {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, - {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, - {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ - {"sample_types": [101, 103], "type": "copper/internal", "uuid": "1/1"}, - {"sample_types": [103, 101], "type": "copper/internal", "uuid": "1/2"}, - {"sample_types": [202, 201], "type": "copper/internal", "uuid": "2/1"}, - {"sample_types": [203, 201], "type": "copper/internal", "uuid": "2/2"}, - {"sample_types": [203, 202], "type": "copper/internal", "uuid": "2/3"}, - {"sample_types": [102 ], "type": "copper/internal", "uuid": "2/4"} - ]}}} - ]} - }, - { - "device_id": {"device_uuid": {"uuid": "DE3"}}, "device_type": "emu-packet-router", "device_drivers": [0], - "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [ - {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, - {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, - {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ - {"sample_types": [], "type": "copper/internal", "uuid": "1/1"}, - {"sample_types": [], "type": "copper/internal", "uuid": "1/2"}, - {"sample_types": [], "type": "copper/internal", "uuid": "2/1"}, - {"sample_types": [], "type": "copper/internal", "uuid": "2/2"}, - {"sample_types": [], "type": "copper/internal", "uuid": "2/3"}, - {"sample_types": [], "type": "copper/internal", "uuid": "2/4"} - ]}}} - ]} - }, - { - "device_id": {"device_uuid": {"uuid": "DE4"}}, "device_type": "emu-packet-router", "device_drivers": [0], - "device_endpoints": [], "device_operational_status": 0, "device_config": {"config_rules": [ - {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, - {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, - {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ - {"sample_types": [], "type": "copper/internal", "uuid": "1/1"}, - {"sample_types": [], "type": "copper/internal", "uuid": "1/2"}, - {"sample_types": [], "type": "copper/internal", "uuid": "2/1"}, - {"sample_types": [], "type": "copper/internal", "uuid": "2/2"}, - {"sample_types": [], "type": "copper/internal", "uuid": "2/3"}, - {"sample_types": [], "type": "copper/internal", "uuid": "2/4"} - ]}}} - ]} - } - ], - "links": [ - - { - "link_id": {"link_uuid": {"uuid": "DE1/2/2==DE2/2/1"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "DE1"}}, "endpoint_uuid": {"uuid": "2/2"}}, - {"device_id": {"device_uuid": {"uuid": "DE2"}}, "endpoint_uuid": {"uuid": "2/1"}} - ] - }, - { - "link_id": {"link_uuid": {"uuid": "DE1/2/3==DE3/2/1"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "DE1"}}, "endpoint_uuid": {"uuid": "2/3"}}, - {"device_id": {"device_uuid": {"uuid": "DE3"}}, "endpoint_uuid": {"uuid": "2/1"}} - ] - }, - { - "link_id": {"link_uuid": {"uuid": "DE1/2/4==DE4/2/1"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "DE1"}}, "endpoint_uuid": {"uuid": "2/4"}}, - {"device_id": {"device_uuid": {"uuid": "DE4"}}, "endpoint_uuid": {"uuid": "2/1"}} - ] - }, - - { - "link_id": {"link_uuid": {"uuid": "DE2/2/1==DE1/2/2"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "DE2"}}, "endpoint_uuid": {"uuid": "2/1"}}, - {"device_id": {"device_uuid": {"uuid": "DE1"}}, "endpoint_uuid": {"uuid": "2/2"}} - ] - }, - { - "link_id": {"link_uuid": {"uuid": "DE2/2/3==DE3/2/2"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "DE2"}}, "endpoint_uuid": {"uuid": "2/3"}}, - {"device_id": {"device_uuid": {"uuid": "DE3"}}, "endpoint_uuid": {"uuid": "2/2"}} - ] - }, - { - "link_id": {"link_uuid": {"uuid": "DE2/2/4==DE4/2/2"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "DE2"}}, "endpoint_uuid": {"uuid": "2/4"}}, - {"device_id": {"device_uuid": {"uuid": "DE4"}}, "endpoint_uuid": {"uuid": "2/2"}} - ] - }, - - { - "link_id": {"link_uuid": {"uuid": "DE3/2/1==DE1/2/3"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "DE3"}}, "endpoint_uuid": {"uuid": "2/1"}}, - {"device_id": {"device_uuid": {"uuid": "DE1"}}, "endpoint_uuid": {"uuid": "2/3"}} - ] - }, - { - "link_id": {"link_uuid": {"uuid": "DE3/2/2==DE2/2/3"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "DE3"}}, "endpoint_uuid": {"uuid": "2/2"}}, - {"device_id": {"device_uuid": {"uuid": "DE2"}}, "endpoint_uuid": {"uuid": "2/3"}} - ] - }, - { - "link_id": {"link_uuid": {"uuid": "DE4/2/2==DE2/2/4"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "DE4"}}, "endpoint_uuid": {"uuid": "2/2"}}, - {"device_id": {"device_uuid": {"uuid": "DE2"}}, "endpoint_uuid": {"uuid": "2/4"}} - ] - }, - - { - "link_id": {"link_uuid": {"uuid": "DE4/2/1==DE1/2/4"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "DE4"}}, "endpoint_uuid": {"uuid": "2/1"}}, - {"device_id": {"device_uuid": {"uuid": "DE1"}}, "endpoint_uuid": {"uuid": "2/4"}} - ] - }, - { - "link_id": {"link_uuid": {"uuid": "DE4/2/2==DE2/2/4"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "DE4"}}, "endpoint_uuid": {"uuid": "2/2"}}, - {"device_id": {"device_uuid": {"uuid": "DE2"}}, "endpoint_uuid": {"uuid": "2/4"}} - ] - }, - { - "link_id": {"link_uuid": {"uuid": "DE4/2/3==DE3/2/4"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "DE4"}}, "endpoint_uuid": {"uuid": "2/3"}}, - {"device_id": {"device_uuid": {"uuid": "DE3"}}, "endpoint_uuid": {"uuid": "2/4"}} - ] - } - ] -}