diff --git a/.gitignore b/.gitignore index d5af4f7f61348537a2d01f9ee356f9cfb1e19c34..1b9e692a37af46fdb04e318d79eb08ac8e6e6eb5 100644 --- a/.gitignore +++ b/.gitignore @@ -177,6 +177,8 @@ cython_debug/ # Sqlite *.db +#temp files to test telemetry +src/telemetry/backend/tempFiles/ # TeraFlowSDN-generated files tfs_runtime_env_vars.sh tfs_runtime_env_vars*.sh diff --git a/proto/kpi_sample_types.proto b/proto/kpi_sample_types.proto index 5fcda6df95c6797b4ae03ca72ec78d114e965cbd..a8e25e809c80ce06b2939c8030b96c154a222de3 100644 --- a/proto/kpi_sample_types.proto +++ b/proto/kpi_sample_types.proto @@ -31,6 +31,8 @@ enum KpiSampleType { KPISAMPLETYPE_ML_CONFIDENCE = 401; //. can be used by both optical and L3 without any issue KPISAMPLETYPE_OPTICAL_SECURITY_STATUS = 501; //. can be used by both optical and L3 without any issue + KPISAMPLETYPE_OPTICAL_POWER_TOTAL_INPUT = 502; + KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER = 503; KPISAMPLETYPE_L3_UNIQUE_ATTACK_CONNS = 601; KPISAMPLETYPE_L3_TOTAL_DROPPED_PACKTS = 602; diff --git a/scripts/run_mon_test.sh b/scripts/run_mon_test.sh new file mode 100755 index 0000000000000000000000000000000000000000..874e6bcda018811eb4ca0be8176b5e48165b41fe --- /dev/null +++ b/scripts/run_mon_test.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +PROJECTDIR=`pwd` + +cd $PROJECTDIR/src +#RCFILE=$PROJECTDIR/coverage/.coveragerc + +export KFK_SERVER_ADDRESS='127.0.0.1:9092' + +CRDB_SQL_ADDRESS=$(kubectl get service cockroachdb-public --namespace crdb -o jsonpath='{.spec.clusterIP}') +export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_analytics?sslmode=require" + +python3 -m pytest --log-level=DEBUG --log-cli-level=INFO --verbose \ + service/service/monitoring.py diff --git a/scripts/run_tests_locally-analytics-backend.sh b/scripts/run_tests_locally-analytics-backend.sh index 4688942713d42dd23481af2388277a685d187f32..fae768939d1f476815bda56acdf8a765f604be1d 100755 --- a/scripts/run_tests_locally-analytics-backend.sh +++ b/scripts/run_tests_locally-analytics-backend.sh @@ -24,5 +24,8 @@ export KFK_SERVER_ADDRESS='127.0.0.1:9092' CRDB_SQL_ADDRESS=$(kubectl get service cockroachdb-public --namespace crdb -o jsonpath='{.spec.clusterIP}') export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_analytics?sslmode=require" -python3 -m pytest --log-level=DEBUG --log-cli-level=INFO --verbose \ - analytics/backend/tests/test_backend.py +# python3 -m pytest --log-level=DEBUG --log-cli-level=INFO --verbose \ +# analytics/backend/tests/test_backend.py + +python3 -m pytest --log-level=DEBUG --log-cli-level=DEBUG --verbose \ + analytics/backend/tests/test_backend.py::test_start_analytics_backend_for_mgon_agent diff --git a/scripts/run_tests_locally-analytics-frontend.sh b/scripts/run_tests_locally-analytics-frontend.sh index 3d9fcd2904bd15f16860f9c3cdc97638806d8c48..4a90ec0600ced8ddc336e8f9435f8023910a9d34 100755 --- a/scripts/run_tests_locally-analytics-frontend.sh +++ b/scripts/run_tests_locally-analytics-frontend.sh @@ -24,4 +24,6 @@ CRDB_SQL_ADDRESS=$(kubectl get service cockroachdb-public --namespace crdb -o js export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_analytics?sslmode=require" python3 -m pytest --log-level=DEBUG --log-cli-level=INFO --verbose \ - analytics/frontend/tests/test_frontend.py + analytics/frontend/tests/test_frontend.py::test_StartAnalyzer_MGON_Agent +# python3 -m pytest --log-level=DEBUG --log-cli-level=INFO --verbose \ +# analytics/frontend/tests/test_frontend.py diff --git a/scripts/run_tests_locally-telemetry-gnmi.sh b/scripts/run_tests_locally-telemetry-gnmi.sh index a3a5f2b9d61c3b36b13d5703661e3198b83b85bc..6825e42a51bfe414d37e4031d9f17438d92ffe89 100755 --- a/scripts/run_tests_locally-telemetry-gnmi.sh +++ b/scripts/run_tests_locally-telemetry-gnmi.sh @@ -17,14 +17,14 @@ PROJECTDIR=`pwd` cd $PROJECTDIR/src RCFILE=$PROJECTDIR/coverage/.coveragerc -export KFK_SERVER_ADDRESS='127.0.0.1:9094' +export KFK_SERVER_ADDRESS='127.0.0.1:9092' # This is unit test (should be tested with container-lab running) python3 -m pytest --log-level=info --log-cli-level=info --verbose \ - telemetry/backend/tests/gnmi_oc/test_unit_GnmiOpenConfigCollector.py + telemetry/backend/tests/gnmi_oc/test_unit_GnmiOpenConfigCollector.py::test_full_workflow # This is integration test (should be tested with container-lab running) -python3 -m pytest --log-level=info --log-cli-level=info --verbose \ - telemetry/backend/tests/gnmi_oc/test_integration_GnmiOCcollector.py +# python3 -m pytest --log-level=info --log-cli-level=info --verbose \ +# telemetry/backend/tests/gnmi_oc/test_integration_GnmiOCcollector.py echo "Bye!" diff --git a/src/analytics/backend/service/Streamer.py b/src/analytics/backend/service/Streamer.py index 91d89504f52598f89ac77516f6a007f838eefe98..a8c9bffc6a3c918573ac9a1cbbcb274d0276ba8b 100644 --- a/src/analytics/backend/service/Streamer.py +++ b/src/analytics/backend/service/Streamer.py @@ -25,13 +25,14 @@ from analytics.backend.service.AnalyzerHelper import AnalyzerHelper logger = logging.getLogger(__name__) class DaskStreamer(threading.Thread): - def __init__(self, key, input_kpis, output_kpis, thresholds, - batch_size = 5, - batch_duration = None, - window_size = None, - cluster_instance = None, - producer_instance = AnalyzerHelper.initialize_kafka_producer() - ): + def __init__( + self, key, input_kpis, output_kpis, thresholds, + batch_size = 5, + batch_duration = None, + window_size = None, + cluster_instance = None, + producer_instance = AnalyzerHelper.initialize_kafka_producer() + ) -> None: super().__init__() self.key = key self.input_kpis = input_kpis diff --git a/src/analytics/backend/tests/messages_analyzer.py b/src/analytics/backend/tests/messages_analyzer.py index bed594300be8f3f97047bb6d685ae7afa3f1dfec..963cf37e921e52a679000692728507bf756f89b2 100644 --- a/src/analytics/backend/tests/messages_analyzer.py +++ b/src/analytics/backend/tests/messages_analyzer.py @@ -15,6 +15,25 @@ import pandas as pd from analytics.backend.service.AnalyzerHandlers import Handlers +def create_analysis_request_message_for_mgon_agent(): + return { + "request_id": "test_request_mgon_001", + "oper_mode": "test_mode", + "input_kpi_list": ["6e22f180-ba28-4641-b190-2287bf448888"], + "output_kpi_list": ["6e22f180-ba28-4641-b190-2287bf181818"], + "task_type": Handlers.AGGREGATION_HANDLER.value, + "task_parameter": { + "avg": [-5, -20], + }, + "duration": 90, + "batch_duration": 20, + "window_size": None, + "batch_size": 5, + "interval": 5, + } + + + def get_input_kpi_list(): return ["1e22f180-ba28-4641-b190-2287bf446666", "6e22f180-ba28-4641-b190-2287bf448888", 'kpi_3'] diff --git a/src/analytics/frontend/tests/messages.py b/src/analytics/frontend/tests/messages.py index 4dc5b96b86f50ffd99bca40a4174f38b54f50832..563de0592e6bf33fdd3966bc424dbe119a4cc970 100644 --- a/src/analytics/frontend/tests/messages.py +++ b/src/analytics/frontend/tests/messages.py @@ -18,6 +18,8 @@ from common.proto.kpi_manager_pb2 import KpiId from common.proto.analytics_frontend_pb2 import ( AnalyzerOperationMode, AnalyzerId, Analyzer, AnalyzerFilter ) +# function to create analyzer based on + def create_analyzer_id(): _create_analyzer_id = AnalyzerId() _create_analyzer_id.analyzer_id.uuid = str(uuid.uuid4()) @@ -119,6 +121,58 @@ def create_analyzer_filter(): return _create_analyzer_filter +def create_analyzer_for_mgon_agent(): + """ + Create analyzer for MGON agent test with aggregation handler. + Returns: + Analyzer: Configured analyzer for MGON agent testing + """ + _create_analyzer = Analyzer() + + # Set analyzer ID + _create_analyzer.analyzer_id.analyzer_id.uuid = str(uuid.uuid4()) + + # Set algorithm name and operation mode + _create_analyzer.algorithm_name = "Test_MGON_Aggregation" + _create_analyzer.operation_mode = AnalyzerOperationMode.ANALYZEROPERATIONMODE_STREAMING + + # Input KPI ID + _input_kpi_id = KpiId() + _input_kpi_id.kpi_id.uuid = "6e22f180-ba28-4641-b190-2287bf448888" + _create_analyzer.input_kpi_ids.append(_input_kpi_id) + + # Output KPI ID + _output_kpi_id = KpiId() + _output_kpi_id.kpi_id.uuid = "6e22f180-ba28-4641-b190-2287bf181818" + _create_analyzer.output_kpi_ids.append(_output_kpi_id) + + # Task parameters - aggregation with average threshold + _threshold_dict = { + "task_type": Handlers.AGGREGATION_HANDLER.value, + "task_parameter": { + "avg": [-5, -10], + }, + } + + _create_analyzer.parameters['thresholds'] = json.dumps(_threshold_dict) + _create_analyzer.parameters['window_size'] = "0" # No sliding window + _create_analyzer.parameters['window_slider'] = "0" + _create_analyzer.parameters['store_aggregate'] = str(False) + _create_analyzer.parameters['interval'] = "5" # Interval for batch processing + + # Duration of the analyzer + _create_analyzer.duration_s = 900 + + # Batch duration settings + _create_analyzer.batch_min_duration_s = 20 + _create_analyzer.batch_max_duration_s = 20 + + # Batch size settings + _create_analyzer.batch_min_size = 5 + _create_analyzer.batch_max_size = 5 + + return _create_analyzer + # Added for testing to remove the dependency on the backend service from enum import Enum diff --git a/src/analytics/frontend/tests/test_frontend.py b/src/analytics/frontend/tests/test_frontend.py index b49062a8144a72aa3e7d4e59cb324b0bc4bae9f0..6532d6ee3a0f1a10a61d4f0619ba53b0bd0c3f8f 100644 --- a/src/analytics/frontend/tests/test_frontend.py +++ b/src/analytics/frontend/tests/test_frontend.py @@ -28,7 +28,7 @@ from common.tools.kafka.Variables import KafkaTopic from common.proto.analytics_frontend_pb2 import AnalyzerId, AnalyzerList from analytics.frontend.client.AnalyticsFrontendClient import AnalyticsFrontendClient from analytics.frontend.service.AnalyticsFrontendService import AnalyticsFrontendService -from analytics.frontend.tests.messages import ( create_analyzer_id, create_analyzer, +from analytics.frontend.tests.messages import ( create_analyzer_for_mgon_agent, create_analyzer_id, create_analyzer, create_analyzer_filter ) from analytics.frontend.service.AnalyticsFrontendServiceServicerImpl import AnalyticsFrontendServiceServicerImpl from apscheduler.schedulers.background import BackgroundScheduler @@ -38,15 +38,13 @@ from apscheduler.triggers.interval import IntervalTrigger ########################### # Tests Setup ########################### - +LOGGER = logging.getLogger(__name__) LOCAL_HOST = '127.0.0.1' ANALYTICS_FRONTEND_PORT = 10000 + int(get_service_port_grpc(ServiceNameEnum.ANALYTICS)) os.environ[get_env_var_name(ServiceNameEnum.ANALYTICS, ENVVAR_SUFIX_SERVICE_HOST )] = str(LOCAL_HOST) os.environ[get_env_var_name(ServiceNameEnum.ANALYTICS, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(ANALYTICS_FRONTEND_PORT) -LOGGER = logging.getLogger(__name__) - @pytest.fixture(scope='session') def analyticsFrontend_service(): LOGGER.info('Initializing AnalyticsFrontendService...') @@ -93,10 +91,10 @@ def log_all_methods(request): ########################### # --- "test_validate_kafka_topics" should be executed before the functionality tests --- -def test_validate_kafka_topics(): - LOGGER.debug(" >>> test_validate_kafka_topics: START <<< ") - response = KafkaTopic.create_all_topics() - assert isinstance(response, bool) +# def test_validate_kafka_topics(): +# LOGGER.debug(" >>> test_validate_kafka_topics: START <<< ") +# response = KafkaTopic.create_all_topics() +# assert isinstance(response, bool) # To test start and stop listener together def test_StartAnalyzers(analyticsFrontend_client): @@ -133,3 +131,15 @@ def test_StartAnalyzers(analyticsFrontend_client): # for response in class_obj.StartResponseListener(analyzer_id.analyzer_id.uuid): # LOGGER.debug(response) # assert isinstance(response, tuple) + +# Additional tests can be added below following the same structure +# --- MGON Agent Tests --- +def test_StartAnalyzer_MGON_Agent(analyticsFrontend_client): + added_analyzer_id = analyticsFrontend_client.StartAnalyzer(create_analyzer_for_mgon_agent()) + LOGGER.debug(str(added_analyzer_id)) + LOGGER.info("waiting for timer to complete 3000 seconds ...") + time.sleep(60) + LOGGER.info('--> StopAnalyzer after timer completion') + response = analyticsFrontend_client.StopAnalyzer(added_analyzer_id) + LOGGER.debug(str(response)) +# --- End of MGON Agent Tests --- diff --git a/src/automation/service/AutomationServiceServicerImpl.py b/src/automation/service/AutomationServiceServicerImpl.py index 5c31b11bd8c86887e354cdcdd40463cc6b5c12ef..11b4d5e6a70349718d28e0d7b6653e4fa98971ff 100644 --- a/src/automation/service/AutomationServiceServicerImpl.py +++ b/src/automation/service/AutomationServiceServicerImpl.py @@ -42,6 +42,8 @@ class AutomationServiceServicerImpl(AutomationServiceServicer): targetService = context_client.GetService(request.target_service_id) telemetryService = context_client.GetService(request.telemetry_service_id) + LOGGER.info(f"Target service type: {targetService.service_type}") + LOGGER.info(f"Telemetry service type: {telemetryService.service_type}") handler_cls = self.get_service_handler_based_on_service_types( targetService.service_type, telemetryService.service_type, ZSM_SERVICE_HANDLERS @@ -98,10 +100,12 @@ class AutomationServiceServicerImpl(AutomationServiceServicer): return ZSMService() def get_service_handler_based_on_service_types( - self, targetServiceType ,telemetryServiceType , ZSM_SERVICE_HANDLERS + self, targetServiceType, telemetryServiceType, ZSM_SERVICE_HANDLERS ): flag = True for handler_cls, filters in ZSM_SERVICE_HANDLERS: + LOGGER.info(f"Handler: {handler_cls}") # <<--P4INTZSMPlugin + LOGGER.info(f"Filters: {filters}") # <--ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY for filter in filters: flag = self.check_if_requested_services_pass_filter_criteria( filter, targetServiceType, telemetryServiceType @@ -115,6 +119,7 @@ class AutomationServiceServicerImpl(AutomationServiceServicer): ): flag = True for filter_key, filter_value in filter.items(): + LOGGER.info(f"Filter value: {filter_value}") # <--ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY if filter_value in ZSM_FILTER_FIELD_ALLOWED_VALUES[filter_key.value]: if filter_key.value == ZSMFilterFieldEnum.TARGET_SERVICE_TYPE.value: if filter_value != targetServiceType: diff --git a/src/automation/service/zsm_handler_api/ZSMFilterFields.py b/src/automation/service/zsm_handler_api/ZSMFilterFields.py index 7b00de5bc474f8ac4a6a711acf9f7200723ddaae..e6c2fb8441dfd0c01d0ea28f57584ad3e711a38b 100644 --- a/src/automation/service/zsm_handler_api/ZSMFilterFields.py +++ b/src/automation/service/zsm_handler_api/ZSMFilterFields.py @@ -16,19 +16,21 @@ from enum import Enum from common.proto.context_pb2 import ServiceTypeEnum class ZSMFilterFieldEnum(Enum): - TARGET_SERVICE_TYPE = 'target_service_type' + TARGET_SERVICE_TYPE = 'target_service_type' TELEMETRY_SERVICE_TYPE = 'telemetry_service_type' TARGET_SERVICE_TYPE_VALUES = { - ServiceTypeEnum.SERVICETYPE_L2NM + ServiceTypeEnum.SERVICETYPE_L2NM, + ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY, } TELEMETRY_SERVICE_TYPE_VALUES = { - ServiceTypeEnum.SERVICETYPE_INT + ServiceTypeEnum.SERVICETYPE_INT, + ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY, } # Maps filter fields to allowed values per Filter field. # If no restriction (free text) None is specified ZSM_FILTER_FIELD_ALLOWED_VALUES = { - ZSMFilterFieldEnum.TARGET_SERVICE_TYPE.value : TARGET_SERVICE_TYPE_VALUES, + ZSMFilterFieldEnum.TARGET_SERVICE_TYPE.value : TARGET_SERVICE_TYPE_VALUES, ZSMFilterFieldEnum.TELEMETRY_SERVICE_TYPE.value : TELEMETRY_SERVICE_TYPE_VALUES, } diff --git a/src/automation/service/zsm_handlers/OpticalZSMPlugin.py b/src/automation/service/zsm_handlers/OpticalZSMPlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..6baacfde6d14bdd7c090237d8dca76944ec23e69 --- /dev/null +++ b/src/automation/service/zsm_handlers/OpticalZSMPlugin.py @@ -0,0 +1,104 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import grpc, logging +from uuid import uuid4 +from common.proto.analytics_frontend_pb2 import AnalyzerId +from common.proto.policy_pb2 import PolicyRuleState +from common.proto.automation_pb2 import ZSMCreateRequest, ZSMService + +from analytics.frontend.client.AnalyticsFrontendClient import AnalyticsFrontendClient +from automation.client.PolicyClient import PolicyClient +from context.client.ContextClient import ContextClient +from automation.service.zsm_handler_api._ZSMHandler import _ZSMHandler + + +LOGGER = logging.getLogger(__name__) + + +class OpticalZSMPlugin(_ZSMHandler): + def __init__(self): + LOGGER.info('Init OpticalZSMPlugin') + + def zsmCreate(self, request : ZSMCreateRequest, context : grpc.ServicerContext): # type: ignore + context_client = ContextClient() + policy_client = PolicyClient() + analytics_frontend_client = AnalyticsFrontendClient() + + # Verify the input target service ID + try: + target_service_id = context_client.GetService(request.target_service_id) + except grpc.RpcError as ex: + LOGGER.exception(f'Unable to get target service:\n{str(target_service_id)}') + if ex.code() != grpc.StatusCode.NOT_FOUND: raise # pylint: disable=no-member + context_client.close() + return self._zsm_create_response_empty() + + # Verify the input telemetry service ID + try: + telemetry_service_id = context_client.GetService(request.telemetry_service_id) + except grpc.RpcError as ex: + LOGGER.exception(f'Unable to get telemetry service:\n{str(telemetry_service_id)}') + if ex.code() != grpc.StatusCode.NOT_FOUND: raise # pylint: disable=no-member + context_client.close() + return self._zsm_create_response_empty() + + # Start an analyzer + try: + analyzer_id: AnalyzerId = analytics_frontend_client.StartAnalyzer(request.analyzer) # type: ignore + LOGGER.info('Analyzer_id:\n{:s}'.format(str(analyzer_id))) + except grpc.RpcError as ex: + LOGGER.exception(f'Unable to start Analyzer:\n{str(request.analyzer)}') + if ex.code() != grpc.StatusCode.NOT_FOUND: raise # pylint: disable=no-member + context_client.close() + analytics_frontend_client.close() + return self._zsm_create_response_empty() + + # Create a policy + try: + LOGGER.info(f'Policy:\n{str(request.policy)}') + policy_rule_state: PolicyRuleState = policy_client.PolicyAddService(request.policy) # type: ignore + LOGGER.info(f'Policy rule state:\n{policy_rule_state}') + except Exception as ex: + LOGGER.exception(f'Unable to create policy:\n{str(request.policy)}') + LOGGER.exception(ex.code()) + # ToDo: Investigate why PolicyAddService throws exception + # if ex.code() != grpc.StatusCode.NOT_FOUND: raise # pylint: disable=no-member + # context_client.close() + # policy_client.close() + # return self._zsm_create_response_empty() + + context_client.close() + analytics_frontend_client.close() + policy_client.close() + return self._zsm_create_response(request) + + def zsmDelete(self): + LOGGER.info('zsmDelete method') + + def zsmGetById(self): + LOGGER.info('zsmGetById method') + + def zsmGetByService(self): + LOGGER.info('zsmGetByService method') + + def _zsm_create_response(self, request): + response = ZSMService() + automation_id = str(uuid4()) + response.zsmServiceId.uuid.uuid = automation_id + response.serviceId.service_uuid.uuid = request.target_service_id.service_uuid.uuid + return response + + def _zsm_create_response_empty(self): + return ZSMService() diff --git a/src/automation/service/zsm_handlers/__init__.py b/src/automation/service/zsm_handlers/__init__.py index dcb533e614cc480d70f562f1d790a00d66e518e6..79e703ba8e860e13f43cb7f207b11fd5f26b38ba 100644 --- a/src/automation/service/zsm_handlers/__init__.py +++ b/src/automation/service/zsm_handlers/__init__.py @@ -15,6 +15,7 @@ from common.proto.context_pb2 import ServiceTypeEnum from ..zsm_handler_api.ZSMFilterFields import ZSMFilterFieldEnum from automation.service.zsm_handlers.P4INTZSMPlugin import P4INTZSMPlugin +from automation.service.zsm_handlers.OpticalZSMPlugin import OpticalZSMPlugin ZSM_SERVICE_HANDLERS = [ (P4INTZSMPlugin, [ @@ -22,5 +23,11 @@ ZSM_SERVICE_HANDLERS = [ ZSMFilterFieldEnum.TARGET_SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_L2NM, ZSMFilterFieldEnum.TELEMETRY_SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_INT, } + ]), + (OpticalZSMPlugin, [ + { + ZSMFilterFieldEnum.TARGET_SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY, + ZSMFilterFieldEnum.TELEMETRY_SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY, + } ]) ] diff --git a/src/device/service/drivers/oc_driver/OCDriver.py b/src/device/service/drivers/oc_driver/OCDriver.py index f7c093ba35e7039e4df120dfd178a58579bc10ba..e1757b4eff7570b3be93d976eea0aa11b2076df4 100644 --- a/src/device/service/drivers/oc_driver/OCDriver.py +++ b/src/device/service/drivers/oc_driver/OCDriver.py @@ -180,7 +180,7 @@ def edit_config( str_config_messages=delete_optical_band(resources) else : str_config_messages=disable_media_channel(resources) - + logger.info(f"messages ,{str_config_messages} ") for str_config_message in str_config_messages: # configuration of the received templates if str_config_message is None: raise UnsupportedResourceKeyException("CONFIG") diff --git a/src/opticalcontroller/OpticalController.py b/src/opticalcontroller/OpticalController.py index 3e7cc28c5099553d56afa840acc7caf43874cede..90c2c246cd8d873dd6f637ab3f220eef25532e5d 100644 --- a/src/opticalcontroller/OpticalController.py +++ b/src/opticalcontroller/OpticalController.py @@ -69,24 +69,26 @@ class AddLightpath(Resource): #@optical.route('/AddFlexLightpath///') -@optical.route('/AddFlexLightpath///', - defaults={"bidir": 1, "band": None}) -@optical.route('/AddFlexLightpath////', - defaults={"band": None}) -@optical.route('/AddFlexLightpath/////',) +@optical.route('/AddFlexLightpath////', + defaults={"bidir": 1, "band": None, "obx_idx": None}) +@optical.route('/AddFlexLightpath/////', + defaults={"band": None, "obx_idx": None}) +@optical.route('/AddFlexLightpath//////', + defaults={"obx_idx": None}) +@optical.route('/AddFlexLightpath///////') @optical.response(200, 'Success') @optical.response(404, 'Error, not found') class AddFlexLightpath(Resource): @staticmethod - def put(src, dst, bitrate, bidir=1, band=None): + def put(src, dst, bitrate, pref, bidir=0, band=None, obx_idx = None ): - print("INFO: New FlexLightpath request from {} to {} with rate {} ".format(src, dst, bitrate)) + print("INFO: New MGON request from {} to {} with rate {} and band {}".format(src, dst, bitrate, band)) t0 = time.time()*1000.0 - if debug: - rsa.g.printGraph() + #if debug: + # rsa.g.printGraph() if rsa is not None: - flow_id, optical_band_id = rsa.rsa_fs_computation(src, dst, bitrate, bidir, band) + flow_id, optical_band_id = rsa.rsa_fs_computation(src, dst, bitrate, bidir, band, obx_idx, pref) if flow_id is not None: if rsa.db_flows[flow_id]["op-mode"] == 0: return 'No path found', 404 @@ -106,14 +108,40 @@ class AddFlexLightpath(Resource): return rsa.optical_bands[optical_band_id], 200 else: return "Error", 404 + +@optical.route('/AddAlienFLexLightpath//////', + defaults={"bidir": 0}) +@optical.route('/AddAlienFLexLightpath///////') +@optical.response(200, 'Success') +@optical.response(404, 'Error, not found') +class AddAlienFLexLightpath(Resource): + @staticmethod + def put(src, s_port, dst, d_port, band, obx_idx, bidir=0): + + print("INFO: New Alien MGON request from {} to {} with band {}".format(src, dst, band)) + t0 = time.time()*1000.0 + #if debug: + # rsa.g.printGraph() + + if rsa is not None: + flow_id = rsa.rsa_fs_alien_computation(src, s_port, dst, d_port, band, bidir, obx_idx) + if flow_id is not None: + if not rsa.db_flows[flow_id]["is_active"]: + return 'No path found', 404 + t1 = time.time() * 1000.0 + elapsed = t1 - t0 + print("INFO: time elapsed = {} ms".format(elapsed)) + return rsa.db_flows[flow_id], 200 + + # @optical.route('/DelFlexLightpath////') -@optical.route('/DelFlexLightpath////') -@optical.route('/DelFlexLightpath/////') +@optical.route('/DelFlexLightpath///') +@optical.route('/DelFlexLightpath////') @optical.response(200, 'Success') @optical.response(404, 'Error, not found') class DelFLightpath(Resource): @staticmethod - def delete( src, dst, bitrate, o_band_id=None, flow_id=None): + def delete( src, dst, flow_id, o_band_id=None): flow = None match1=False ob_id=None @@ -121,13 +149,13 @@ class DelFLightpath(Resource): if flow_id in rsa.db_flows.keys(): flow = rsa.db_flows[flow_id] - match1 = flow["src"] == src and flow["dst"] == dst and flow["bitrate"] == bitrate + match1 = flow["src"] == src and flow["dst"] == dst ob_id = flow["parent_opt_band"] if 'parent_opt_band' in flow else None flow['is_active']=False if flow is not None: bidir = flow["bidir"] if bidir: - match2 = flow["src"] == dst and flow["dst"] == src and flow["bitrate"] == bitrate + match2 = flow["src"] == dst and flow["dst"] == src if match1 or match2: ob_id = flow["parent_opt_band"] if 'parent_opt_band' in flow else None rsa.db_flows[flow_id]["is_active"] = False @@ -330,6 +358,28 @@ class GetBand(Resource): return rsa.optical_bands[ob_idx], 200 return {}, 404 +@optical.route('/ReconfigFlexLightpath/') +@optical.response(200, 'Success') +@optical.response(404, 'Error, not found') +class ReconfigFlexLightpath(Resource): + @staticmethod + def put(flow_id_val): + print("INFO: Reconfiguring optical {}".format(flow_id_val)) + t0 = time.time()*1000.0 + if rsa is not None: + flow_idx, optical_band_id = rsa.rsa_fs_recomputation(flow_id_val) + if flow_idx is not None: + if rsa.db_flows[flow_idx]["op-mode"] == 0: + return 'No path found', 404 + t1 = time.time() * 1000.0 + elapsed = t1 - t0 + print("INFO: time elapsed = {} ms".format(elapsed)) + print(flow_idx, optical_band_id) + return rsa.db_flows[flow_idx], 200 + else: + return "Error", 404 + else: + return "Error", 404 @optical.route('/GetLinks') @optical.response(200, 'Success') diff --git a/src/opticalcontroller/RSA.py b/src/opticalcontroller/RSA.py index 8c445fe7202928e7878f054205fb5841face76ed..dc008725bdd2271551c4fda17c6c2275a87686a5 100644 --- a/src/opticalcontroller/RSA.py +++ b/src/opticalcontroller/RSA.py @@ -17,10 +17,12 @@ from opticalcontroller.dijkstra import * from opticalcontroller.tools import * from opticalcontroller.variables import * +''' LOGGER = logging.getLogger(__name__) def print(*args) -> None: LOGGER.info(' '.join([str(a) for a in args])) +''' class RSA(): def __init__(self, nodes, links): @@ -66,18 +68,19 @@ class RSA(): def init_link_slots2(self): if full_links: + print("2026 initialize full spectrum") for l in self.links_dict["optical_links"]: fib = l["optical_details"] #fib = self.links_dict[l]["fibers"][f] if len(fib["c_slots"]) > 0: for c in range(0, Nc): - fib["c_slots"][c] = 1 + fib["c_slots"][str(c)] = 1 if len(fib["l_slots"]) > 0: for c in range(0, Nl): - fib["l_slots"][c] = 1 + fib["l_slots"][str(c)] = 1 if len(fib["s_slots"]) > 0: for c in range(0, Ns): - fib["s_slots"][c] = 1 + fib["s_slots"][str(c)] = 1 if debug: print(fib) for l1 in self.links_dict["optical_links"]: @@ -143,7 +146,31 @@ class RSA(): self.g.reset_graph() return links, path - def get_slots(self, links, slots, optical_band_id=None): + def compute_disjoint_path(self, src, dst, path1=None): + if path1 == None: + path1 = shortest_path(self.g, self.g.get_vertex(src), self.g.get_vertex(dst)) + path = disjoint_path(self.g, src, dst, path1, False) + print("INFO: Path from {} to {} with distance: {}".format(src, dst, self.g.get_vertex(dst).get_distance())) + if debug: + print(path) + links = [] + for i in range(0, len(path) - 1): + s = path[i] + if debug: + print(s) + if i < len(path) - 1: + d = path[i + 1] + link_id = "{}-{}".format(s, d) + if debug: + #print(link_id, self.links_dict[link_id]) + print(link_id, self.get_link_by_name(link_id)) + + links.append(link_id) + self.g.reset_graph() + return links, path + + + def get_slots(self, links, slots, optical_band_id=None, old_band_x=None): if isinstance(slots, int): val_c = slots @@ -240,6 +267,7 @@ class RSA(): l_slots[l] = combine(l_slots[l], consecutives(fib["l_slots"], val_l)) l_found = 1''' if optical_band_id is not None: + print(f"NEW_DISJOINT: {self.optical_bands[optical_band_id]}") if "c_slots" in self.optical_bands[optical_band_id].keys(): if len(self.optical_bands[optical_band_id]["c_slots"]) > 0: a_c = c_sts @@ -252,18 +280,27 @@ class RSA(): if "l_slots" in self.optical_bands[optical_band_id].keys(): if len(self.optical_bands[optical_band_id]["l_slots"]) > 0: a_l = l_sts - b_l = consecutives(self.optical_bands[optical_band_id]["l_slots"], val_c) + b_l = consecutives(self.optical_bands[optical_band_id]["l_slots"], val_l) l_sts = common_slots(a_l, b_l) else: l_sts = [] if "s_slots" in self.optical_bands[optical_band_id].keys(): if len(self.optical_bands[optical_band_id]["s_slots"]) > 0: a_s = s_sts - b_s = consecutives(str_list_to_int(self.optical_bands[optical_band_id]["s_slots"].keys()), val_c) + b_s = consecutives(self.optical_bands[optical_band_id]["s_slots"], val_s) s_sts = common_slots(a_s, b_s) else: s_sts = [] - + if old_band_x == "c_slots": + l_sts = [] + s_sts = [] + if old_band_x == "l_slots": + c_sts = [] + l_sts = [] + if old_band_x == "s_slots": + c_sts = [] + a_sts = [] + return c_sts, l_sts, s_sts def update_link(self, fib, slots, band): @@ -330,7 +367,7 @@ class RSA(): #update_optical_band(optical_bands=self.optical_bands,optical_band_id=optical_band_id,band=band,link=link) - def del_flow(self, flow,flow_id, o_b_id = None): + def del_flow(self, flow, flow_id, o_b_id = None): flows = flow["flows"] band = flow["band_type"] slots = flow["slots"] @@ -438,9 +475,9 @@ class RSA(): print(f"del_flow_fib {fib } and band {band}") print(f"del_flow { str_list_to_int(fib[band].keys())}") - print(f"invoking restore_link_2 fib: {fib} , slots {slots} , band {band} ") - #self.restore_link(fib, slots, band) - self.restore_link_2(fib, slots, band, link = link) + print(f"invoking restore_link fib: {fib} , slots {slots} , band {band} ") + self.restore_link(fib, slots, band) + #self.restore_link_2(fib, slots, band, link = link) self.optical_bands[o_b_id]["is_active"]=False @@ -460,7 +497,8 @@ class RSA(): fib = rlink["optical_details"] #fib = self.get_link_by_name(r_l)["optical_details"] if list_in_list(slots, str_list_to_int(fib[band].keys())): - self.restore_link_2(fib, slots, band, link=rlink) + self.restore_link(fib, slots, band) + #self.restore_link_2(fib, slots, band, link=rlink) if debug: print(fib[band]) #changed according to TFS development @@ -476,8 +514,7 @@ class RSA(): print(f"delete band del_band") self.del_band(flow,flow_id,o_b_id=o_b_id) else : - self.del_flow(flow,flow_id=flow_id,o_b_id=o_b_id) - + self.del_flow(flow,flow_id=flow_id,o_b_id=o_b_id) def get_fibers_forward(self, links, slots, band): @@ -571,10 +608,10 @@ class RSA(): return fiber_list #function invoked for lightpaths and OB - def select_slots_and_ports(self, links, n_slots, c, l, s, bidir): + def select_slots_and_ports(self, links, n_slots, c, l, s, bidir, preferred=None): if debug: print (links, n_slots, c, l, s, bidir, self.c_slot_number, self.l_slot_number, self.s_slot_number) - band, slots = slot_selection(c, l, s, n_slots, self.c_slot_number, self.l_slot_number, self.s_slot_number) + band, slots = slot_selection(c, l, s, n_slots, self.c_slot_number, self.l_slot_number, self.s_slot_number, preferred) if debug: print (band, slots) if band is None: @@ -648,16 +685,18 @@ class RSA(): print(self.links_dict) band, slots = slot_selection(c, l, s, n_slots, self.c_slot_number, self.l_slot_number, self.s_slot_number) if band is None: - print("No slots available in the three bands") + print("ERROR: No slots available in the three bands") return None, None, None, None, None if debug: - print(band, slots) + print(f"INFO: XXXX {band}, {slots}") + self.get_fibers_forward(links, slots, band) if bidir: self.get_fibers_backward(links, slots, band) #fibers_f = self.get_fibers_forward(links, slots, band) self.update_optical_band(o_band_id, slots, band) + print("INFO: 1") #fibers_b = [] #if bidir: # fibers_b = self.get_fibers_backward(links, fibers_f, slots, band) @@ -686,7 +725,8 @@ class RSA(): #r_inport = self.links_dict[add]['fibers'][f]["local_peer_port"] r_inport = lx["local_peer_port"] t_flows[src]["b"] = {"in": r_inport, "out": port_0} - + print("INFO: 2") + #R1 rules t_flows[dst] = {} t_flows[dst]["f"] = {} @@ -733,13 +773,86 @@ class RSA(): #r_inport = self.links_dict[drop]['fibers'][f]["remote_peer_port"] r_inport = ly["remote_peer_port"] t_flows[dst]["b"] = {"in": port_0, "out": r_inport} + print("INFO: 3") + + #if debug: + # print(self.links_dict) + #if debug: + # print(t_flows) + print("INFO: Flow matrix computed for Flex Lightpath") - if debug: - print(self.links_dict) + return t_flows, band, slots, {}, {} - if debug: - print(t_flows) - print("INFO: Flow matrix computed for Flex Lightpath") + #function ivoked for fs lightpaths only + def alien_select_slots_and_ports_fs(self, src_port, dst_port, n_slots, c, l, s, bidir, o_band_id): + print("PDP: inside flow creation") + band, slots = slot_selection(c, l, s, n_slots, self.c_slot_number, self.l_slot_number, self.s_slot_number) + if band is None: + print("PDP ERROR: No slots available in the three bands") + return None, None, None, None, None + print(f"PDP: {band}, {slots}") + #if debug: + # print(f"INFO: XXXX {band}, {slots}") + + self.update_optical_band(o_band_id, slots, band) + print("INFO: 1") + + t_flows = {} + + ''' + #flows_add_side + src, dst = add.split("-") + lx = self.get_link_by_name(add)["optical_details"] + #outport = self.links_dict[add]['fibers'][f]["src_port"] + outport = lx["src_port"] + #T1 rules + t_flows[src] = {} + t_flows[src]["f"] = {} + t_flows[src]["b"] = {} + t_flows[src]["f"] = {"in": port_0, "out": outport} + if bidir: + #r_inport = self.links_dict[add]['fibers'][f]["local_peer_port"] + r_inport = lx["local_peer_port"] + t_flows[src]["b"] = {"in": r_inport, "out": port_0} + print("INFO: 2") + ''' + src = self.optical_bands[o_band_id]["src"] + dst = self.optical_bands[o_band_id]["dst"] + #R1 rules + t_flows[src] = {} + t_flows[src]["f"] = {} + t_flows[src]["b"] = {} + opt_band_src_port = self.optical_bands[o_band_id]["src_port"] + t_flows[src]["f"] = {"in": src_port, "out": opt_band_src_port} + #to modify to peer ports + ''' + if bidir: + #r_inport = self.links_dict[add]['fibers'][f]["local_peer_port"] + r_inport = lx["local_peer_port"] + t_flows[src]["b"] = {"in": r_inport, "out": port_0} + if bidir: + rev_opt_band_dst_port = self.optical_bands[o_band_id]["rev_dst_port"] + #r_outport = self.links_dict[add]['fibers'][f]["remote_peer_port"] + r_outport = lx["remote_peer_port"] + t_flows[dst]["b"] = {"in": rev_opt_band_dst_port, "out": r_outport} + ''' + + #flows_drop_side + # R2 rules + + t_flows[dst] = {} + t_flows[dst]["f"] = {} + t_flows[dst]["b"] = {} + opt_band_dst_port = self.optical_bands[o_band_id]["dst_port"] + t_flows[dst]["f"] = {"in": opt_band_dst_port, "out": dst_port} + ''' + if bidir: + rev_opt_band_src_port = self.optical_bands[o_band_id]["rev_src_port"] + #r_inport = self.links_dict[drop]['fibers'][f]["local_peer_port"] + r_inport = ly["local_peer_port"] + t_flows[src]["b"] = {"in": r_inport, "out": rev_opt_band_src_port} + ''' + print("PDP: Flow matrix computed for Alien Flex Lightpath") return t_flows, band, slots, {}, {} @@ -758,17 +871,20 @@ class RSA(): self.db_flows[self.flow_id]["bitrate"] = rate self.db_flows[self.flow_id]["bidir"] = bidir + #@Chafy links, path = self.compute_path(src, dst) if len(path) < 1: self.null_values(self.flow_id) return self.flow_id op, num_slots = map_rate_to_slot(rate) + #@Chafy c_slots, l_slots, s_slots = self.get_slots(links, num_slots) if debug: print(c_slots) print(l_slots) print(s_slots) + #@Chafy if len(c_slots) > 0 or len(l_slots) > 0 or len(s_slots) > 0: flow_list, band_range, slots, fiber_f, fiber_b = self.select_slots_and_ports(links, num_slots, c_slots, l_slots, s_slots, bidir) @@ -828,10 +944,10 @@ class RSA(): self.optical_bands[ob_id]["s_slots"] = [] self.optical_bands[ob_id]["served_lightpaths"] = [] self.optical_bands[ob_id]["reverse_optical_band_id"] = 0 - self.db_flows[self.flow_id]["parent_opt_band"] = 0 - self.db_flows[self.flow_id]["new_optical_band"] = 0 + #self.db_flows[flow_id]["parent_opt_band"] = 0 + #self.db_flows[flow_id]["new_optical_band"] = 0 - def create_optical_band(self, links, path, bidir, num_slots): + def create_optical_band(self, links, path, bidir, num_slots, old_band_x=None, preferred=None): print("INFO: Creating optical-band of {} slots".format(num_slots)) if self.opt_band_id == 0: self.opt_band_id += 1 @@ -880,14 +996,13 @@ class RSA(): if bidir: self.optical_bands[back_opt_band_id]["src"] = path[-1] ''' - - c_slots, l_slots, s_slots = self.get_slots(links, num_slots) + c_slots, l_slots, s_slots = self.get_slots(links, num_slots, optical_band_id=None, old_band_x=old_band_x) if debug: print(c_slots) print(l_slots) print(s_slots) if len(c_slots) > 0 or len(l_slots) > 0 or len(s_slots) > 0: - flow_list, band_range, slots, fiber_f, fiber_b = self.select_slots_and_ports(links, num_slots, c_slots, l_slots, s_slots, bidir) + flow_list, band_range, slots, fiber_f, fiber_b = self.select_slots_and_ports(links, num_slots, c_slots, l_slots, s_slots, bidir, preferred) if debug: print(flow_list, band_range, slots, fiber_f, fiber_b) f0, band = frequency_converter(band_range, slots) @@ -895,7 +1010,7 @@ class RSA(): print(f0, band) print("INFO: RSA completed for optical band") if flow_list is None: - self.null_values(self.flow_id) + self.null_values_ob(self.opt_band_id) return self.flow_id, [] #slots_i = [] #for i in slots: @@ -983,18 +1098,130 @@ class RSA(): result.append(ob_id) return result - def rsa_fs_computation(self, src, dst, rate, bidir, band): - num_slots_ob = "full_band" + def get_alien_slots(self, optical_band_id, num_slots): + print(f"ALIEN SLOTS: {self.optical_bands[optical_band_id]}") + if "c_slots" in self.optical_bands[optical_band_id].keys(): + if len(self.optical_bands[optical_band_id]["c_slots"]) > 0: + #a_c = c_sts + #MOD + c_sts = consecutives(self.optical_bands[optical_band_id]["c_slots"], num_slots) + #c_sts = common_slots(a_c, b_c) + else: + c_sts = [] + if "l_slots" in self.optical_bands[optical_band_id].keys(): + if len(self.optical_bands[optical_band_id]["l_slots"]) > 0: + #a_l = l_sts + l_sts = consecutives(self.optical_bands[optical_band_id]["l_slots"], num_slots) + #l_sts = common_slots(a_l, b_l) + else: + l_sts = [] + if "s_slots" in self.optical_bands[optical_band_id].keys(): + if len(self.optical_bands[optical_band_id]["s_slots"]) > 0: + #a_s = s_sts + s_sts = consecutives(self.optical_bands[optical_band_id]["s_slots"], num_slots) + #s_sts = common_slots(a_s, b_s) + else: + s_sts = [] + return c_sts, l_sts, s_sts + + + def rsa_fs_alien_computation(self, src, s_port, dst, d_port, band, bidir, obx_idx): + if self.flow_id == 0: + self.flow_id += 1 + else: + if (self.db_flows[self.flow_id]["bidir"] == 1): + self.flow_id += 2 + else: + self.flow_id += 1 + if self.nodes_dict[src]["type"] == "OC-ROADM" and self.nodes_dict[dst]["type"] == "OC-ROADM": + if obx_idx in self.optical_bands.keys(): + #optical_band = self.optical_bands[obx_idx] + num_slots = map_band_to_slot(band) + self.db_flows[self.flow_id] = {} + self.db_flows[self.flow_id]["flow_id"] = self.flow_id + self.db_flows[self.flow_id]["src"] = src + self.db_flows[self.flow_id]["dst"] = dst + self.db_flows[self.flow_id]["bitrate"] = None + self.db_flows[self.flow_id]["bidir"] = bidir + self.db_flows[self.flow_id]["src_port"] = s_port + self.db_flows[self.flow_id]["dst_port"] = d_port + + c_slots, l_slots, s_slots = self.get_alien_slots(obx_idx, num_slots) + if debug: + print(f"PDP: {c_slots}") + print(f"PDP: {l_slots}") + print(f"PDP: {s_slots}") + if len(c_slots) >= num_slots or len(l_slots) >= num_slots or len(s_slots) >= num_slots: + flow_list, band_range, slots, fiber_f, fiber_b = self.alien_select_slots_and_ports_fs(s_port, d_port, num_slots, + c_slots, l_slots, s_slots, bidir, + obx_idx) + f0, band = frequency_converter(band_range, slots) + if debug: + print(f0, band) + print("INFO: RSA completed for Alien Flex Lightpath with OB already in place") + if flow_list is None: + self.null_values(self.flow_id) + return self.flow_id + slots_i = [] + for i in slots: + slots_i.append(int(i)) + # return links, path, flow_list, band_range, slots, fiber_f, fiber_b, op, num_slots, f0, band + # links, path, flows, bx, slots, fiber_f, fiber_b, op, n_slots, f0, band + self.db_flows[self.flow_id]["flows"] = flow_list + self.db_flows[self.flow_id]["band_type"] = band_range + self.db_flows[self.flow_id]["slots"] = slots_i + self.db_flows[self.flow_id]["fiber_forward"] = fiber_f + self.db_flows[self.flow_id]["fiber_backward"] = fiber_b + self.db_flows[self.flow_id]["op-mode"] = None + self.db_flows[self.flow_id]["n_slots"] = num_slots + self.db_flows[self.flow_id]["links"] = [] + self.db_flows[self.flow_id]["path"] = [] + self.db_flows[self.flow_id]["band"] = band + self.db_flows[self.flow_id]["freq"] = f0 + self.db_flows[self.flow_id]["is_active"] = True + self.db_flows[self.flow_id]["parent_opt_band"] = obx_idx + self.db_flows[self.flow_id]["new_optical_band"] = 0 + self.optical_bands[obx_idx]["served_lightpaths"].append(self.flow_id) + ''' + if bidir: + rev_ob_id = self.optical_bands[ob_id]["reverse_optical_band_id"] + self.optical_bands[rev_ob_id]["served_lightpaths"].append(flow_id) + ''' + return self.flow_id + else: + self.null_values(self.flow_id) + return self.flow_id + + else: + print("error") + self.null_values(self.flow_id) + return self.flow_id + + def rsa_fs_computation(self, src, dst, rate, bidir, band, bandx_id, preferred=None): if band is not None: num_slots_ob = map_band_to_slot(band) print(band, num_slots_ob) + else: + num_slots_ob = "full_band" if self.nodes_dict[src]["type"] == "OC-ROADM" and self.nodes_dict[dst]["type"] == "OC-ROADM": print("INFO: ROADM to ROADM connection") - links, path = self.compute_path(src, dst) + old_band_x = None + if bandx_id != None: + if bandx_id in self.optical_bands.keys(): + path_x = self.optical_bands[bandx_id]["path"] + old_band_x = self.optical_bands[bandx_id]["band_type"] + links, path = self.compute_disjoint_path(src, dst, path_x) + else: + links, path = self.compute_disjoint_path(src, dst, None) + if len(path) < 1: + print("INFO: no disjoint path found, installing in the shortest path") + links, path = self.compute_path(src, dst) + else: + links, path = self.compute_path(src, dst) if len(path) < 1: self.null_values_ob(self.opt_band_id) - return self.flow_id, [] - optical_band_id, temp_links = self.create_optical_band(links, path, bidir, num_slots_ob) + return self.opt_band_id, [] + optical_band_id, temp_links = self.create_optical_band(links, path, bidir, num_slots_ob, old_band_x, preferred) return None, optical_band_id print("INFO: TP to TP connection") if self.flow_id == 0: @@ -1004,6 +1231,9 @@ class RSA(): self.flow_id += 2 else: self.flow_id += 1 + if band is not None: + num_slots_ob = map_band_to_slot(band) + print(band, num_slots_ob) self.db_flows[self.flow_id] = {} self.db_flows[self.flow_id]["flow_id"] = self.flow_id self.db_flows[self.flow_id]["src"] = src @@ -1031,9 +1261,121 @@ class RSA(): existing_ob = self.get_optical_bands(roadm_src, roadm_dst) if len(existing_ob) > 0: + #first checking if provided band id is passed + if preferred is not None: + ob_id = int(preferred) + if "is_active" in self.optical_bands[ob_id].keys(): + is_active = self.optical_bands[ob_id]["is_active"] + if is_active: + op, num_slots = map_rate_to_slot(rate) + if debug: + print(temp_links2) + c_slots, l_slots, s_slots = self.get_slots(temp_links2, num_slots, ob_id) + if debug: + print(c_slots) + print(l_slots) + print(s_slots) + if len(c_slots) >= num_slots or len(l_slots) >= num_slots or len(s_slots) >= num_slots: + flow_list, band_range, slots, fiber_f, fiber_b = self.select_slots_and_ports_fs(temp_links2, num_slots, + c_slots, + l_slots, s_slots, bidir, + ob_id) + f0, band = frequency_converter(band_range, slots) + if debug: + print(f0, band) + print("INFO: RSA completed for Flex Lightpath with OB already in place") + if flow_list is not None: + slots_i = [] + for i in slots: + slots_i.append(int(i)) + # return links, path, flow_list, band_range, slots, fiber_f, fiber_b, op, num_slots, f0, band + # links, path, flows, bx, slots, fiber_f, fiber_b, op, n_slots, f0, band + self.db_flows[self.flow_id]["flows"] = flow_list + self.db_flows[self.flow_id]["band_type"] = band_range + self.db_flows[self.flow_id]["slots"] = slots_i + self.db_flows[self.flow_id]["fiber_forward"] = fiber_f + self.db_flows[self.flow_id]["fiber_backward"] = fiber_b + self.db_flows[self.flow_id]["op-mode"] = op + self.db_flows[self.flow_id]["n_slots"] = num_slots + self.db_flows[self.flow_id]["links"] = temp_links2 + self.db_flows[self.flow_id]["path"] = temp_path + self.db_flows[self.flow_id]["band"] = band + self.db_flows[self.flow_id]["freq"] = f0 + self.db_flows[self.flow_id]["is_active"] = True + self.db_flows[self.flow_id]["parent_opt_band"] = ob_id + self.db_flows[self.flow_id]["new_optical_band"] = 0 + self.optical_bands[ob_id]["served_lightpaths"].append(self.flow_id) + ''' + if bidir: + rev_ob_id = self.optical_bands[ob_id]["reverse_optical_band_id"] + self.optical_bands[rev_ob_id]["served_lightpaths"].append(flow_id) + ''' + return self.flow_id, ob_id + else: + print("not enough slots") + print("trying to extend OB {}".format(ob_id)) + new_slots = self.extend_optical_band(ob_id, band=None) + if len(new_slots) > 0: + band_type = self.optical_bands[ob_id]["band_type"] + c_slots = [] + l_slots = [] + s_slots = [] + if band_type == "c_slots": + c_slots = new_slots + elif band_type == "l_slots": + l_slots = new_slots + else: + s_slots = new_slots + op, num_slots = map_rate_to_slot(rate) + if debug: + print(temp_links2) + c_slots, l_slots, s_slots = self.get_slots(temp_links2, num_slots, ob_id) + if debug: + print(c_slots) + print(l_slots) + print(s_slots) + if len(c_slots) >= num_slots or len(l_slots) >= num_slots or len(s_slots) >= num_slots: + flow_list, band_range, slots, fiber_f, fiber_b = self.select_slots_and_ports_fs( + temp_links2, num_slots, + c_slots, + l_slots, s_slots, bidir, + ob_id) + f0, band = frequency_converter(band_range, slots) + if debug: + print(f0, band) + print("INFO: RSA completed for Flex Lightpath with OB already in place") + if flow_list is not None: + slots_i = [] + for i in slots: + slots_i.append(int(i)) + # return links, path, flow_list, band_range, slots, fiber_f, fiber_b, op, num_slots, f0, band + # links, path, flows, bx, slots, fiber_f, fiber_b, op, n_slots, f0, band + self.db_flows[self.flow_id]["flows"] = flow_list + self.db_flows[self.flow_id]["band_type"] = band_range + self.db_flows[self.flow_id]["slots"] = slots_i + self.db_flows[self.flow_id]["fiber_forward"] = fiber_f + self.db_flows[self.flow_id]["fiber_backward"] = fiber_b + self.db_flows[self.flow_id]["op-mode"] = op + self.db_flows[self.flow_id]["n_slots"] = num_slots + self.db_flows[self.flow_id]["links"] = temp_links2 + self.db_flows[self.flow_id]["path"] = temp_path + self.db_flows[self.flow_id]["band"] = band + self.db_flows[self.flow_id]["freq"] = f0 + self.db_flows[self.flow_id]["is_active"] = True + self.db_flows[self.flow_id]["parent_opt_band"] = ob_id + #self.db_flows[flow_id]["new_optical_band"] = 1 + self.db_flows[self.flow_id]["new_optical_band"] = 2 + self.optical_bands[ob_id]["served_lightpaths"].append(self.flow_id) + ''' + if bidir: + rev_ob_id = self.optical_bands[ob_id]["reverse_optical_band_id"] + self.optical_bands[rev_ob_id]["served_lightpaths"].append(flow_id) + ''' + return self.flow_id, ob_id + else: + print("it is not possible to allocate connection in extended OB {}".format(ob_id)) + #checking other existing OB print("INFO: Evaluating existing OB {}".format(existing_ob)) - #first checking in existing OB - ob_found = 0 for ob_id in existing_ob: if "is_active" in self.optical_bands[ob_id].keys(): is_active = self.optical_bands[ob_id]["is_active"] @@ -1082,7 +1424,7 @@ class RSA(): ''' if bidir: rev_ob_id = self.optical_bands[ob_id]["reverse_optical_band_id"] - self.optical_bands[rev_ob_id]["served_lightpaths"].append(self.flow_id) + self.optical_bands[rev_ob_id]["served_lightpaths"].append(flow_id) ''' return self.flow_id, ob_id else: @@ -1143,13 +1485,13 @@ class RSA(): self.db_flows[self.flow_id]["freq"] = f0 self.db_flows[self.flow_id]["is_active"] = True self.db_flows[self.flow_id]["parent_opt_band"] = ob_id - #self.db_flows[self.flow_id]["new_optical_band"] = 1 + #self.db_flows[flow_id]["new_optical_band"] = 1 self.db_flows[self.flow_id]["new_optical_band"] = 2 self.optical_bands[ob_id]["served_lightpaths"].append(self.flow_id) ''' if bidir: rev_ob_id = self.optical_bands[ob_id]["reverse_optical_band_id"] - self.optical_bands[rev_ob_id]["served_lightpaths"].append(self.flow_id) + self.optical_bands[rev_ob_id]["served_lightpaths"].append(flow_id) ''' return self.flow_id, ob_id else: @@ -1203,11 +1545,253 @@ class RSA(): ''' if bidir: rev_ob_id = self.optical_bands[optical_band_id]["reverse_optical_band_id"] - self.optical_bands[rev_ob_id]["served_lightpaths"].append(self.flow_id) + self.optical_bands[rev_ob_id]["served_lightpaths"].append(flow_id) ''' return self.flow_id, optical_band_id + def move_flow(self, flow_id, slots, band, links, bidir, o_b_id = None): + for l in links: + link = self.get_link_by_name(l) + fib = link["optical_details"] + self.restore_link(fib, slots, band) + #self.restore_link_2(fib, slots, band, link=link) + if o_b_id is not None: + if debug: + print("restoring OB") + print(f"invoking restore_optical_band o_b_id: {o_b_id} , slots {slots} , band {band} ") + self.restore_optical_band(o_b_id, slots, band) + if flow_id in self.optical_bands[o_b_id]["served_lightpaths"]: + if flow_id in self.optical_bands[o_b_id]["served_lightpaths"]: + self.optical_bands[o_b_id]["served_lightpaths"].remove(flow_id) + + #self.restore_optical_band_2(o_b_id, slots, band,links) + if bidir: + for l in links: + r_l = reverse_link(l) + if debug: + print(r_l) + rlink = self.get_link_by_name(r_l) + fib = rlink["optical_details"] + #fib = self.get_link_by_name(r_l)["optical_details"] + if list_in_list(slots, str_list_to_int(fib[band].keys())): + self.restore_link(fib, slots, band) + #self.restore_link_2(fib, slots, band, link=rlink) + if debug: + print(fib[band]) + + return True + + def rsa_fs_recomputation(self, flow_idy): + flow_idx = int(flow_idy) + print(f"INFO: Reconfiguring connection {flow_idx}") + if flow_idx not in self.db_flows.keys(): + print(f"ERROR: key not present {flow_idx}") + return None, 0 + #self.db_flows[flow_idx] = {} + op = self.db_flows[flow_idx]["op-mode"] + if op is None: + return self.alien_reconfig(flow_idx) + src = self.db_flows[flow_idx]["src"] + dst = self.db_flows[flow_idx]["dst"] + rate = self.db_flows[flow_idx]["bitrate"] + bidir = self.db_flows[flow_idx]["bidir"] + flow_list = self.db_flows[flow_idx]["flows"] + band_type = self.db_flows[flow_idx]["band_type"] + slots_init = self.db_flows[flow_idx]["slots"] + fiber_f = self.db_flows[flow_idx]["fiber_forward"] + fiber_b = self.db_flows[flow_idx]["fiber_backward"] + num_slots = self.db_flows[flow_idx]["n_slots"] + links = self.db_flows[flow_idx]["links"] + path = self.db_flows[flow_idx]["path"] + band = self.db_flows[flow_idx]["band"] + f0 = self.db_flows[flow_idx]["freq"] + ob_idx = self.db_flows[flow_idx]["parent_opt_band"] + r1 = "" + r2 = "" + if len(links) == 2: + [t1, r1] = links[0].split("-") + [r2, t2] = links[1].split("-") + else: + return 0, 0 + existing_ob = self.get_optical_bands(r1, r2) + if len(existing_ob) > 0: + print("INFO: Trying to move connection to an existing OB") + #first checking in existing OB + for ob_id in existing_ob: + if ob_id == ob_idx: + continue + if "is_active" in self.optical_bands[ob_id].keys(): + is_active = self.optical_bands[ob_id]["is_active"] + if not is_active: + continue + op, num_slots = map_rate_to_slot(rate) + if debug: + print(links) + + c_slots, l_slots, s_slots = self.get_slots(links, num_slots, ob_id) + if debug: + print("OFC26 available slots pre") + print(c_slots) + print(l_slots) + print(s_slots) + if band_type == "c_slots": + l_slots =[] + s_slots = [] + elif band_type == "l_slots": + c_slots = [] + s_slots = [] + elif band_type == "s_slots": + c_slots = [] + l_slots =[] + if debug: + print("OFC26 available slots after reset due to band") + print(c_slots) + print(l_slots) + print(s_slots) + if len(c_slots) >= num_slots or len(l_slots) >= num_slots or len(s_slots) >= num_slots: + flow_list, band_range, slots, fiber_f, fiber_b = self.select_slots_and_ports_fs(links, num_slots, + c_slots, + l_slots, s_slots, bidir, + ob_id) + + f0, band = frequency_converter(band_range, slots) + if debug: + print(f0, band) + print("INFO: RSA completed for Flex Lightpath with OB already in place") + if flow_list is None: + continue + slots_i = [] + for i in slots: + slots_i.append(int(i)) + self.db_flows[flow_idx]["flows"] = flow_list + self.db_flows[flow_idx]["band_type"] = band_range + self.db_flows[flow_idx]["slots"] = slots_i + self.db_flows[flow_idx]["fiber_forward"] = fiber_f + self.db_flows[flow_idx]["fiber_backward"] = fiber_b + self.db_flows[flow_idx]["op-mode"] = op + self.db_flows[flow_idx]["n_slots"] = num_slots + #self.db_flows[flow_idx]["links"] = temp_links2 + #self.db_flows[flow_idx]["path"] = temp_path + self.db_flows[flow_idx]["band"] = band + self.db_flows[flow_idx]["freq"] = f0 + self.db_flows[flow_idx]["is_active"] = True + self.db_flows[flow_idx]["parent_opt_band"] = ob_id + self.db_flows[flow_idx]["new_optical_band"] = 0 + self.optical_bands[ob_id]["served_lightpaths"].append(flow_idx) + ''' + if bidir: + rev_ob_id = self.optical_bands[ob_id]["reverse_optical_band_id"] + self.optical_bands[rev_ob_id]["served_lightpaths"].append(flow_id) + ''' + self.move_flow(flow_idx, slots_init, band_type, links, bidir, ob_idx) + return flow_idx, ob_id + else: + continue + print("not enough slots") + return None, 0 + + + def alien_reconfig(self, flow_idx): + print(f"INFO: Reconfiguring alien connection {flow_idx}") + if flow_idx not in self.db_flows.keys(): + print(f"ERROR: key not present {flow_idx}") + return None, 0 + op = self.db_flows[flow_idx]["op-mode"] + src = self.db_flows[flow_idx]["src"] + dst = self.db_flows[flow_idx]["dst"] + rate = self.db_flows[flow_idx]["bitrate"] + bidir = self.db_flows[flow_idx]["bidir"] + flow_list = self.db_flows[flow_idx]["flows"] + band_type = self.db_flows[flow_idx]["band_type"] + slots_init = self.db_flows[flow_idx]["slots"] + fiber_f = self.db_flows[flow_idx]["fiber_forward"] + fiber_b = self.db_flows[flow_idx]["fiber_backward"] + num_slots = self.db_flows[flow_idx]["n_slots"] + links = self.db_flows[flow_idx]["links"] + path = self.db_flows[flow_idx]["path"] + band = self.db_flows[flow_idx]["band"] + f0 = self.db_flows[flow_idx]["freq"] + ob_idx = self.db_flows[flow_idx]["parent_opt_band"] + existing_ob = self.get_optical_bands(src, dst) + if len(existing_ob) > 0: + print("INFO: Trying to move connection to an existing OB") + #first checking in existing OB + for ob_id in existing_ob: + if ob_id == ob_idx: + continue + if not band_type in self.optical_bands[ob_id].keys(): + continue + if "is_active" in self.optical_bands[ob_id].keys(): + is_active = self.optical_bands[ob_id]["is_active"] + if not is_active: + continue + c_slots, l_slots, s_slots = self.get_alien_slots(ob_id, num_slots) + if debug: + print("OFC26 available slots pre") + print(c_slots) + print(l_slots) + print(s_slots) + if band_type == "c_slots": + l_slots =[] + s_slots = [] + elif band_type == "l_slots": + c_slots = [] + s_slots = [] + elif band_type == "s_slots": + c_slots = [] + l_slots =[] + if debug: + print("OFC26 available slots after reset due to band") + print(c_slots) + print(l_slots) + print(s_slots) + s_port = flow_list[src]["f"]["in"] + d_port = flow_list[dst]["f"]["out"] + #{'MGON1': {'f': {'in': 'port-25-in', 'out': 'port-9-out'}, 'b': {}}, 'MGON3': {'f': {'in': 'port-1-in', 'out': 'port-25-out'}, 'b': {}}} + + if len(c_slots) >= num_slots or len(l_slots) >= num_slots or len(s_slots) >= num_slots: + flow_list, band_range, slots, fiber_f, fiber_b = self.alien_select_slots_and_ports_fs(s_port, d_port, num_slots, + c_slots, l_slots, s_slots, bidir, + ob_id) + f0, band = frequency_converter(band_range, slots) + + if debug: + print(f0, band) + print("INFO: RSA completed for alien Flex Lightpath with OB already in place") + if flow_list is None: + continue + slots_i = [] + for i in slots: + slots_i.append(int(i)) + self.db_flows[flow_idx]["flows"] = flow_list + self.db_flows[flow_idx]["band_type"] = band_range + self.db_flows[flow_idx]["slots"] = slots_i + self.db_flows[flow_idx]["fiber_forward"] = fiber_f + self.db_flows[flow_idx]["fiber_backward"] = fiber_b + #self.db_flows[flow_idx]["op-mode"] = op + self.db_flows[flow_idx]["n_slots"] = num_slots + #self.db_flows[flow_idx]["links"] = temp_links2 + #self.db_flows[flow_idx]["path"] = temp_path + self.db_flows[flow_idx]["band"] = band + self.db_flows[flow_idx]["freq"] = f0 + self.db_flows[flow_idx]["is_active"] = True + self.db_flows[flow_idx]["parent_opt_band"] = ob_id + self.db_flows[flow_idx]["new_optical_band"] = 0 + self.optical_bands[ob_id]["served_lightpaths"].append(flow_idx) + ''' + if bidir: + rev_ob_id = self.optical_bands[ob_id]["reverse_optical_band_id"] + self.optical_bands[rev_ob_id]["served_lightpaths"].append(flow_id) + ''' + self.move_flow(flow_idx, slots_init, band_type, links, bidir, ob_idx) + return flow_idx, ob_id + else: + continue + print("not enough slots") + return None, 0 + + def extend_optical_band(self, ob_id, band=None): ob = self.optical_bands[ob_id] links = ob["links"] diff --git a/src/opticalcontroller/dijkstra.py b/src/opticalcontroller/dijkstra.py index d26622ca73eb70b6c1dcea8e0fc5053463215ed6..0beb0fd1d8e113249d18be6a3d8d163f52a2e1b6 100644 --- a/src/opticalcontroller/dijkstra.py +++ b/src/opticalcontroller/dijkstra.py @@ -129,9 +129,20 @@ class Graph: self.vert_dict[frm].add_neighbor(self.vert_dict[to], [port_frm, w]) self.vert_dict[to].add_neighbor(self.vert_dict[frm], [port_to, w]) + ''' def del_edge(self, frm, to, cost = 0): self.vert_dict[frm].del_neighbor(self.vert_dict[to]) self.vert_dict[to].del_neighbor(self.vert_dict[frm]) + ''' + + def del_edge(self, frm, to, cost=0): + if frm in self.vert_dict and to in self.vert_dict: + v_from = self.vert_dict[frm] + v_to = self.vert_dict[to] + if v_to in v_from.adjacent: + v_from.del_neighbor(v_to) + if v_from in v_to.adjacent: + v_to.del_neighbor(v_from) def get_vertices(self): return self.vert_dict.keys() @@ -142,6 +153,45 @@ class Graph: def get_previous(self, current): return self.previous + def copy(self): + """ + Returns a deep copy of the graph (vertices, edges, ports, and weights). + """ + new_graph = Graph() + + # First, create all vertices + for node_id in self.vert_dict: + new_graph.add_vertex(node_id) + + # Then, add all edges with the same attributes + for v in self: + for neighbor in v.get_connections(): + frm = v.get_id() + to = neighbor.get_id() + port_frm = v.get_port(neighbor) + port_to = neighbor.get_port(v) + weight = v.get_weight(neighbor) + + # To avoid adding the same undirected edge twice + if frm < to: + new_graph.add_edge(frm, to, port_frm, port_to, weight) + + return new_graph + + def copy2(self): + new_g = Graph() + # Copy vertices + for node_id in self.vert_dict: + new_g.add_vertex(node_id) + # Copy edges + for frm in self.vert_dict: + for to in self.vert_dict[frm].adjacent: + port_frm, weight = self.vert_dict[frm].adjacent[to] + port_to, _ = self.vert_dict[to].adjacent[frm] + if not new_g.get_vertex(frm).adjacent.get(new_g.get_vertex(to)): + new_g.add_edge(frm, to.get_id(), port_frm, port_to, weight) + return new_g + def shortest(v, path): if v.previous: path.append(v.previous.get_id()) @@ -198,6 +248,90 @@ def shortest_path(graph, src, dst): shortest(target, path) return path[::-1] + +def compute_disjoint_paths(graph, src, dst, k=2, disjoint_type="link", debug=False): + """ + Compute up to k disjoint shortest paths between src and dst using Dijkstra. + disjoint_type: "link" (edge-disjoint) or "node" (vertex-disjoint) + """ + + paths = [] + removed_edges = [] # Keep track of removed edges + removed_nodes = [] # Keep track of removed nodes + + for i in range(k): + # Compute shortest path using the existing Dijkstra-based function + path = shortest_path(graph, src, dst) + + # Stop if no valid path found + if not path or len(path) < 2: + if debug: + print(f"[INFO] No more disjoint paths found after {i} iterations.") + break + + paths.append(path) + if debug: + print(f"[INFO] Path {i+1}: {path}") + + # Depending on disjointness type, remove edges or nodes from graph + if disjoint_type == "link": + for u, v in zip(path[:-1], path[1:]): + if debug: + print(f" Removing edge {u}-{v}") + removed_edges.append((u, v)) + # Remove edge in both directions + graph.del_edge(u, v) + + elif disjoint_type == "node": + # Remove intermediate nodes (not source or destination) + for n in path[1:-1]: + if debug: + print(f" Removing node {n}") + removed_nodes.append(n) + # Remove all edges involving this node + v = graph.get_vertex(n) + if v is not None: + for neighbor in list(v.get_connections()): + graph.del_edge(n, neighbor.get_id()) + graph.del_Vertex(n) + else: + raise ValueError("disjoint_type must be 'link' or 'node'") + + # Reset distances & visited flags for the next run + graph.reset_graph() + + if debug: + print(f"[INFO] Found {len(paths)} disjoint paths.") + + return paths + +def disjoint_path(graph, src_id, dst_id, pathz, debug=False): + g2 = graph.copy() + src = g2.get_vertex(src_id) + dst = g2.get_vertex(dst_id) + removed_edges = [] # Keep track of removed edges + removed_nodes = [] # Keep track of removed nodes + for u, v in zip(pathz[:-1], pathz[1:]): + if debug: + print(f" Removing edge {u}-{v}") + removed_edges.append((u, v)) + # Remove edge in both directions + g2.del_edge(u, v) + # Compute shortest path using the existing Dijkstra-based function + g2.reset_graph() + pathx = shortest_path(g2, src, dst) + + # Stop if no valid path found + if not pathx or len(pathx) < 2: + if debug: + print(f"[INFO] No more disjoint paths found.") + return [] + + g2.reset_graph() + + return pathx + + if __name__ == '__main__': print("Testing Algo") @@ -210,15 +344,15 @@ if __name__ == '__main__': g.add_vertex('e') g.add_vertex('f') - g.add_edge('a', 'b', 7) - g.add_edge('a', 'c', 9) - g.add_edge('a', 'f', 14) - g.add_edge('b', 'c', 10) - g.add_edge('b', 'd', 15) - g.add_edge('c', 'd', 11) - g.add_edge('c', 'f', 2) - g.add_edge('d', 'e', 6) - g.add_edge('e', 'f', 9) + g.add_edge('a', 'b', 1, 1, 7) + g.add_edge('a', 'c', 2, 1, 9) + g.add_edge('a', 'f', 3, 1, 14) + g.add_edge('b', 'c', 2, 2, 10) + g.add_edge('b', 'd', 3, 1, 15) + g.add_edge('c', 'd', 3, 2, 11) + g.add_edge('c', 'f', 4, 2, 2) + g.add_edge('d', 'e', 4, 1, 6) + g.add_edge('e', 'f', 2, 3, 9) """print ('Graph data:') @@ -235,6 +369,17 @@ if __name__ == '__main__': path = [target.get_id()] shortest(target, path) print ('The shortest path : %s' %(path[::-1]))""" - - p = shortest_path(g, g.get_vertex('a'), g.get_vertex('e')) - print(p) + #print(g.printGraph()) + pat = shortest_path(g, g.get_vertex('a'), g.get_vertex('e')) + print(pat) + + #paths = compute_disjoint_paths(g, g.get_vertex('a'), g.get_vertex('e'), k=2, disjoint_type="link", debug=False) + #paths = compute_disjoint_paths(g, g.get_vertex('a'), g.get_vertex('e'), k=2, disjoint_type="link", debug=False) + #print(paths) + path2 = compute_disjoint_path(g, 'a', 'e', pat, False) + print(path2) + + pat = shortest_path(g, g.get_vertex('a'), g.get_vertex('d')) + print(pat) + path2 = compute_disjoint_path(g, 'a', 'd', pat, False) + print(path2) \ No newline at end of file diff --git a/src/opticalcontroller/tools.py b/src/opticalcontroller/tools.py index b9a3e79b6f95fc7e2723716f23bb7e710866ec2e..dfca580f6cfe80646fa5de2c9388e499a5353029 100644 --- a/src/opticalcontroller/tools.py +++ b/src/opticalcontroller/tools.py @@ -242,7 +242,7 @@ def get_links_to_node(topology, node): return result -def slot_selection(c, l, s, n_slots, Nc, Nl, Ns): +def slot_selection(c, l, s, n_slots, Nc, Nl, Ns, preferred=None): # First Fit if isinstance(n_slots, int): @@ -253,14 +253,47 @@ def slot_selection(c, l, s, n_slots, Nc, Nl, Ns): slot_c = Nc slot_l = Nl slot_s = Ns - if len(c) >= slot_c: - return "c_slots", c[0: slot_c] - elif len(l) >= slot_l: - return "l_slots", l[0: slot_l] - elif len(s) >= slot_s: - return "s_slots", s[0: slot_s] + if preferred == None or preferred == "ANY": + if len(c) >= slot_c: + return "c_slots", c[0: slot_c] + elif len(l) >= slot_l: + return "l_slots", l[0: slot_l] + elif len(s) >= slot_s: + return "s_slots", s[0: slot_s] + else: + return None, None else: - return None, None + if preferred == "C_BAND": + if len(c) >= slot_c: + return "c_slots", c[0: slot_c] + elif len(l) >= slot_l: + return "l_slots", l[0: slot_l] + elif len(s) >= slot_s: + return "s_slots", s[0: slot_s] + else: + return None, None + elif preferred == "L_BAND": + if len(l) >= slot_l: + return "l_slots", l[0: slot_l] + elif len(c) >= slot_c: + return "c_slots", c[0: slot_c] + elif len(s) >= slot_s: + return "s_slots", s[0: slot_s] + else: + return None, None + elif preferred == "S_BAND": + if len(s) >= slot_s: + return "s_slots", s[0: slot_s] + elif len(l) >= slot_l: + return "l_slots", l[0: slot_l] + elif len(c) >= slot_c: + return "c_slots", c[0: slot_c] + else: + return None, None + else: + logging.INFO("PDP: wrong preferred value") + return None, None + def handle_slot (slot_field, slot): for key,value in slot.items() : diff --git a/src/opticalcontroller/variables.py b/src/opticalcontroller/variables.py index 23fbaad24111df3746dc184288589476075f1c30..d271ddeca4ee448898cdf079d8afa65c67676904 100644 --- a/src/opticalcontroller/variables.py +++ b/src/opticalcontroller/variables.py @@ -23,4 +23,4 @@ Nc = 320 #Nc = 10 Ns = 720 -full_links = 0 +full_links = 1 diff --git a/src/policy/src/main/java/org/etsi/tfs/policy/policy/CommonPolicyServiceImpl.java b/src/policy/src/main/java/org/etsi/tfs/policy/policy/CommonPolicyServiceImpl.java index 074464cf42bd1d94fe9d6494e985cfd345034219..e3841139e106ed0ea120a3b1d315494902b58b20 100644 --- a/src/policy/src/main/java/org/etsi/tfs/policy/policy/CommonPolicyServiceImpl.java +++ b/src/policy/src/main/java/org/etsi/tfs/policy/policy/CommonPolicyServiceImpl.java @@ -393,6 +393,8 @@ public class CommonPolicyServiceImpl { final var policyRuleTypeService = new PolicyRuleTypeService(policyRuleService); final var policyRule = new PolicyRule(policyRuleTypeService); contextService.setPolicyRule(policyRule).subscribe().with(x -> {}); + + LOGGER.infof("Policy Rule state is now [%s]", policyRuleState.toString()); } public void setPolicyRuleDeviceToContext( @@ -404,6 +406,6 @@ public class CommonPolicyServiceImpl { final var policyRuleTypeService = new PolicyRuleTypeDevice(policyRuleDevice); final var policyRule = new PolicyRule(policyRuleTypeService); - contextService.setPolicyRule(policyRule).subscribe().with(x -> {}); + final var policyRuleId = contextService.setPolicyRule(policyRule).subscribe().with(x -> {}); } } diff --git a/src/policy/target/generated-sources/grpc/kpi_sample_types/KpiSampleTypes.java b/src/policy/target/generated-sources/grpc/kpi_sample_types/KpiSampleTypes.java index 0c98ddbb4625c12c345226ec5654e475188619f9..3f4a7a2a6d81affb2837cc465268d9085a134466 100644 --- a/src/policy/target/generated-sources/grpc/kpi_sample_types/KpiSampleTypes.java +++ b/src/policy/target/generated-sources/grpc/kpi_sample_types/KpiSampleTypes.java @@ -71,6 +71,14 @@ public final class KpiSampleTypes { * KPISAMPLETYPE_OPTICAL_SECURITY_STATUS = 501; */ KPISAMPLETYPE_OPTICAL_SECURITY_STATUS(501), + /** + * KPISAMPLETYPE_OPTICAL_POWER_TOTAL_INPUT = 502; + */ + KPISAMPLETYPE_OPTICAL_POWER_TOTAL_INPUT(502), + /** + * KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER = 503; + */ + KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER(503), /** * KPISAMPLETYPE_L3_UNIQUE_ATTACK_CONNS = 601; */ @@ -280,6 +288,16 @@ public final class KpiSampleTypes { */ public static final int KPISAMPLETYPE_OPTICAL_SECURITY_STATUS_VALUE = 501; + /** + * KPISAMPLETYPE_OPTICAL_POWER_TOTAL_INPUT = 502; + */ + public static final int KPISAMPLETYPE_OPTICAL_POWER_TOTAL_INPUT_VALUE = 502; + + /** + * KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER = 503; + */ + public static final int KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER_VALUE = 503; + /** * KPISAMPLETYPE_L3_UNIQUE_ATTACK_CONNS = 601; */ @@ -503,6 +521,10 @@ public final class KpiSampleTypes { return KPISAMPLETYPE_ML_CONFIDENCE; case 501: return KPISAMPLETYPE_OPTICAL_SECURITY_STATUS; + case 502: + return KPISAMPLETYPE_OPTICAL_POWER_TOTAL_INPUT; + case 503: + return KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER; case 601: return KPISAMPLETYPE_L3_UNIQUE_ATTACK_CONNS; case 602: @@ -628,7 +650,7 @@ public final class KpiSampleTypes { private static com.google.protobuf.Descriptors.FileDescriptor descriptor; static { - java.lang.String[] descriptorData = { "\n\026kpi_sample_types.proto\022\020kpi_sample_typ" + "es*\346\r\n\rKpiSampleType\022\031\n\025KPISAMPLETYPE_UN" + "KNOWN\020\000\022%\n!KPISAMPLETYPE_PACKETS_TRANSMI" + "TTED\020e\022\"\n\036KPISAMPLETYPE_PACKETS_RECEIVED" + "\020f\022!\n\035KPISAMPLETYPE_PACKETS_DROPPED\020g\022$\n" + "\037KPISAMPLETYPE_BYTES_TRANSMITTED\020\311\001\022!\n\034K" + "PISAMPLETYPE_BYTES_RECEIVED\020\312\001\022 \n\033KPISAM" + "PLETYPE_BYTES_DROPPED\020\313\001\022+\n&KPISAMPLETYP" + "E_LINK_TOTAL_CAPACITY_GBPS\020\255\002\022*\n%KPISAMP" + "LETYPE_LINK_USED_CAPACITY_GBPS\020\256\002\022 \n\033KPI" + "SAMPLETYPE_ML_CONFIDENCE\020\221\003\022*\n%KPISAMPLE" + "TYPE_OPTICAL_SECURITY_STATUS\020\365\003\022)\n$KPISA" + "MPLETYPE_L3_UNIQUE_ATTACK_CONNS\020\331\004\022*\n%KP" + "ISAMPLETYPE_L3_TOTAL_DROPPED_PACKTS\020\332\004\022&" + "\n!KPISAMPLETYPE_L3_UNIQUE_ATTACKERS\020\333\004\0220" + "\n+KPISAMPLETYPE_L3_UNIQUE_COMPROMISED_CL" + "IENTS\020\334\004\022,\n\'KPISAMPLETYPE_L3_SECURITY_ST" + "ATUS_CRYPTO\020\335\004\022%\n KPISAMPLETYPE_SERVICE_" + "LATENCY_MS\020\275\005\0221\n,KPISAMPLETYPE_PACKETS_T" + "RANSMITTED_AGG_OUTPUT\020\315\010\022.\n)KPISAMPLETYP" + "E_PACKETS_RECEIVED_AGG_OUTPUT\020\316\010\022-\n(KPIS" + "AMPLETYPE_PACKETS_DROPPED_AGG_OUTPUT\020\317\010\022" + "/\n*KPISAMPLETYPE_BYTES_TRANSMITTED_AGG_O" + "UTPUT\020\261\t\022,\n\'KPISAMPLETYPE_BYTES_RECEIVED" + "_AGG_OUTPUT\020\262\t\022+\n&KPISAMPLETYPE_BYTES_DR" + "OPPED_AGG_OUTPUT\020\263\t\0220\n+KPISAMPLETYPE_SER" + "VICE_LATENCY_MS_AGG_OUTPUT\020\245\r\022\036\n\031KPISAMP" + "LETYPE_INT_SEQ_NUM\020\321\017\022\035\n\030KPISAMPLETYPE_I" + "NT_TS_ING\020\322\017\022\035\n\030KPISAMPLETYPE_INT_TS_EGR" + "\020\323\017\022\036\n\031KPISAMPLETYPE_INT_HOP_LAT\020\324\017\022\"\n\035K" + "PISAMPLETYPE_INT_PORT_ID_ING\020\325\017\022\"\n\035KPISA" + "MPLETYPE_INT_PORT_ID_EGR\020\326\017\022\"\n\035KPISAMPLE" + "TYPE_INT_QUEUE_OCCUP\020\327\017\022\037\n\032KPISAMPLETYPE" + "_INT_QUEUE_ID\020\330\017\022#\n\036KPISAMPLETYPE_INT_HO" + "P_LAT_SW01\020\265\020\022#\n\036KPISAMPLETYPE_INT_HOP_L" + "AT_SW02\020\266\020\022#\n\036KPISAMPLETYPE_INT_HOP_LAT_" + "SW03\020\267\020\022#\n\036KPISAMPLETYPE_INT_HOP_LAT_SW0" + "4\020\270\020\022#\n\036KPISAMPLETYPE_INT_HOP_LAT_SW05\020\271" + "\020\022#\n\036KPISAMPLETYPE_INT_HOP_LAT_SW06\020\272\020\022#" + "\n\036KPISAMPLETYPE_INT_HOP_LAT_SW07\020\273\020\022#\n\036K" + "PISAMPLETYPE_INT_HOP_LAT_SW08\020\274\020\022#\n\036KPIS" + "AMPLETYPE_INT_HOP_LAT_SW09\020\275\020\022#\n\036KPISAMP" + "LETYPE_INT_HOP_LAT_SW10\020\276\020\022#\n\036KPISAMPLET" + "YPE_INT_LAT_ON_TOTAL\020\310\020\022\036\n\031KPISAMPLETYPE" + "_INT_IS_DROP\020\231\021\022\"\n\035KPISAMPLETYPE_INT_DRO" + "P_REASON\020\232\021b\006proto3" }; + java.lang.String[] descriptorData = { "\n\026kpi_sample_types.proto\022\020kpi_sample_typ" + "es*\302\016\n\rKpiSampleType\022\031\n\025KPISAMPLETYPE_UN" + "KNOWN\020\000\022%\n!KPISAMPLETYPE_PACKETS_TRANSMI" + "TTED\020e\022\"\n\036KPISAMPLETYPE_PACKETS_RECEIVED" + "\020f\022!\n\035KPISAMPLETYPE_PACKETS_DROPPED\020g\022$\n" + "\037KPISAMPLETYPE_BYTES_TRANSMITTED\020\311\001\022!\n\034K" + "PISAMPLETYPE_BYTES_RECEIVED\020\312\001\022 \n\033KPISAM" + "PLETYPE_BYTES_DROPPED\020\313\001\022+\n&KPISAMPLETYP" + "E_LINK_TOTAL_CAPACITY_GBPS\020\255\002\022*\n%KPISAMP" + "LETYPE_LINK_USED_CAPACITY_GBPS\020\256\002\022 \n\033KPI" + "SAMPLETYPE_ML_CONFIDENCE\020\221\003\022*\n%KPISAMPLE" + "TYPE_OPTICAL_SECURITY_STATUS\020\365\003\022,\n\'KPISA" + "MPLETYPE_OPTICAL_POWER_TOTAL_INPUT\020\366\003\022,\n" + "\'KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER" + "\020\367\003\022)\n$KPISAMPLETYPE_L3_UNIQUE_ATTACK_CO" + "NNS\020\331\004\022*\n%KPISAMPLETYPE_L3_TOTAL_DROPPED" + "_PACKTS\020\332\004\022&\n!KPISAMPLETYPE_L3_UNIQUE_AT" + "TACKERS\020\333\004\0220\n+KPISAMPLETYPE_L3_UNIQUE_CO" + "MPROMISED_CLIENTS\020\334\004\022,\n\'KPISAMPLETYPE_L3" + "_SECURITY_STATUS_CRYPTO\020\335\004\022%\n KPISAMPLET" + "YPE_SERVICE_LATENCY_MS\020\275\005\0221\n,KPISAMPLETY" + "PE_PACKETS_TRANSMITTED_AGG_OUTPUT\020\315\010\022.\n)" + "KPISAMPLETYPE_PACKETS_RECEIVED_AGG_OUTPU" + "T\020\316\010\022-\n(KPISAMPLETYPE_PACKETS_DROPPED_AG" + "G_OUTPUT\020\317\010\022/\n*KPISAMPLETYPE_BYTES_TRANS" + "MITTED_AGG_OUTPUT\020\261\t\022,\n\'KPISAMPLETYPE_BY" + "TES_RECEIVED_AGG_OUTPUT\020\262\t\022+\n&KPISAMPLET" + "YPE_BYTES_DROPPED_AGG_OUTPUT\020\263\t\0220\n+KPISA" + "MPLETYPE_SERVICE_LATENCY_MS_AGG_OUTPUT\020\245" + "\r\022\036\n\031KPISAMPLETYPE_INT_SEQ_NUM\020\321\017\022\035\n\030KPI" + "SAMPLETYPE_INT_TS_ING\020\322\017\022\035\n\030KPISAMPLETYP" + "E_INT_TS_EGR\020\323\017\022\036\n\031KPISAMPLETYPE_INT_HOP" + "_LAT\020\324\017\022\"\n\035KPISAMPLETYPE_INT_PORT_ID_ING" + "\020\325\017\022\"\n\035KPISAMPLETYPE_INT_PORT_ID_EGR\020\326\017\022" + "\"\n\035KPISAMPLETYPE_INT_QUEUE_OCCUP\020\327\017\022\037\n\032K" + "PISAMPLETYPE_INT_QUEUE_ID\020\330\017\022#\n\036KPISAMPL" + "ETYPE_INT_HOP_LAT_SW01\020\265\020\022#\n\036KPISAMPLETY" + "PE_INT_HOP_LAT_SW02\020\266\020\022#\n\036KPISAMPLETYPE_" + "INT_HOP_LAT_SW03\020\267\020\022#\n\036KPISAMPLETYPE_INT" + "_HOP_LAT_SW04\020\270\020\022#\n\036KPISAMPLETYPE_INT_HO" + "P_LAT_SW05\020\271\020\022#\n\036KPISAMPLETYPE_INT_HOP_L" + "AT_SW06\020\272\020\022#\n\036KPISAMPLETYPE_INT_HOP_LAT_" + "SW07\020\273\020\022#\n\036KPISAMPLETYPE_INT_HOP_LAT_SW0" + "8\020\274\020\022#\n\036KPISAMPLETYPE_INT_HOP_LAT_SW09\020\275" + "\020\022#\n\036KPISAMPLETYPE_INT_HOP_LAT_SW10\020\276\020\022#" + "\n\036KPISAMPLETYPE_INT_LAT_ON_TOTAL\020\310\020\022\036\n\031K" + "PISAMPLETYPE_INT_IS_DROP\020\231\021\022\"\n\035KPISAMPLE" + "TYPE_INT_DROP_REASON\020\232\021b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom(descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] {}); } // @@protoc_insertion_point(outer_class_scope) diff --git a/src/policy/target/kubernetes/kubernetes.yml b/src/policy/target/kubernetes/kubernetes.yml index cd578b34bd6b71b9b3c89e2c0b83599e1f9d5eb0..2af0c358781197d297972a75e2570d263e15c406 100644 --- a/src/policy/target/kubernetes/kubernetes.yml +++ b/src/policy/target/kubernetes/kubernetes.yml @@ -3,8 +3,8 @@ apiVersion: v1 kind: Service metadata: annotations: - app.quarkus.io/commit-id: 9e3e0ebd57f108eb7c0e1946bfc122dfd1a3180e - app.quarkus.io/build-timestamp: 2026-02-21 - 15:35:10 +0000 + app.quarkus.io/commit-id: 81b25ce03beb7463bc8bb5fcecf6a2cf4f64ddc4 + app.quarkus.io/build-timestamp: 2026-03-30 - 08:42:32 +0000 prometheus.io/scrape: "true" prometheus.io/path: /q/metrics prometheus.io/port: "8080" @@ -37,8 +37,8 @@ apiVersion: apps/v1 kind: Deployment metadata: annotations: - app.quarkus.io/commit-id: 9e3e0ebd57f108eb7c0e1946bfc122dfd1a3180e - app.quarkus.io/build-timestamp: 2026-02-21 - 15:35:10 +0000 + app.quarkus.io/commit-id: 81b25ce03beb7463bc8bb5fcecf6a2cf4f64ddc4 + app.quarkus.io/build-timestamp: 2026-03-30 - 08:42:32 +0000 prometheus.io/scrape: "true" prometheus.io/path: /q/metrics prometheus.io/port: "8080" @@ -57,8 +57,8 @@ spec: template: metadata: annotations: - app.quarkus.io/commit-id: 9e3e0ebd57f108eb7c0e1946bfc122dfd1a3180e - app.quarkus.io/build-timestamp: 2026-02-21 - 15:35:10 +0000 + app.quarkus.io/commit-id: 81b25ce03beb7463bc8bb5fcecf6a2cf4f64ddc4 + app.quarkus.io/build-timestamp: 2026-03-30 - 08:42:32 +0000 prometheus.io/scrape: "true" prometheus.io/path: /q/metrics prometheus.io/port: "8080" @@ -75,14 +75,14 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: SERVICE_SERVICE_HOST + value: serviceservice + - name: MONITORING_SERVICE_HOST + value: monitoringservice - name: KAFKA_BROKER_HOST value: kafka-service.kafka.svc.cluster.local - name: CONTEXT_SERVICE_HOST value: contextservice - - name: MONITORING_SERVICE_HOST - value: monitoringservice - - name: SERVICE_SERVICE_HOST - value: serviceservice image: labs.etsi.org:5050/tfs/controller/policy:0.1.0 imagePullPolicy: Always livenessProbe: diff --git a/src/service/service/ServiceServiceServicerImpl.py b/src/service/service/ServiceServiceServicerImpl.py index 52f2fd45b139ee4e6a6640152ff8ec58f8a90145..272174d9924baad575493b9617a7acfd2035dc2f 100644 --- a/src/service/service/ServiceServiceServicerImpl.py +++ b/src/service/service/ServiceServiceServicerImpl.py @@ -21,7 +21,7 @@ from common.method_wrappers.ServiceExceptions import ( ) from common.proto.context_pb2 import ( Connection, ConstraintActionEnum, Empty, Service, ServiceId, ServiceStatusEnum, - ServiceTypeEnum, TopologyId + ServiceTypeEnum, TopologyId, ContextId ) from common.proto.pathcomp_pb2 import PathCompRequest from common.proto.e2eorchestrator_pb2 import E2EOrchestratorRequest @@ -44,11 +44,12 @@ from .task_scheduler.TaskScheduler import TasksScheduler from .tools.GeodesicDistance import gps_distance from .tools.OpticalTools import ( add_flex_lightpath, add_lightpath, delete_lightpath, adapt_reply, get_device_name_from_uuid, - get_optical_band, refresh_opticalcontroller, DelFlexLightpath , extend_optical_band - + get_optical_band, refresh_opticalcontroller, DelFlexLightpath , extend_optical_band, + reconfig_flex_lightpath, adapt_reply_ob, add_alien_flex_lightpath ) + LOGGER = logging.getLogger(__name__) METRICS_POOL = MetricsPool('Service', 'RPC') @@ -277,7 +278,7 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): ports = [] for endpoint_id in service.service_endpoint_ids: endpoint_device_uuid = endpoint_id.device_id.device_uuid.uuid - if "." in endpoint_device_uuid: + if "." or "MGON" in endpoint_device_uuid: endpoint_device_name = endpoint_device_uuid else: endpoint_device_name = device_names[endpoint_device_uuid] @@ -290,30 +291,75 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): ob_band = None oc_type = 1 bitrate = 100 + dj_optical_band_id = None + preferred = "ANY" + alien = 0 + alien_band = 0 + alien_optical_band_id = 0 for constraint in service.service_constraints: - if "bandwidth" in constraint.custom.constraint_type: - bitrate = int(float(constraint.custom.constraint_value)) - elif "bidirectionality" in constraint.custom.constraint_type: - bidir = int(constraint.custom.constraint_value) - elif "optical-band-width" in constraint.custom.constraint_type: - ob_band = int(constraint.custom.constraint_value) - elif "type" in constraint.custom.constraint_type: - oc_type = OpticalServiceType(str(constraint.custom.constraint_value)) - + if "alien" in constraint.custom.constraint_type: + alien = 1 + break + for constraint in service.service_constraints: + if alien == 1: + if "alien_spectrum" in constraint.custom.constraint_type: + alien_band = int(constraint.custom.constraint_value) + elif "optical_band_id" in constraint.custom.constraint_type: + alien_optical_band_id = int(constraint.custom.constraint_value) + elif "bidirectionality" in constraint.custom.constraint_type: + bidir = int(constraint.custom.constraint_value) + else: + if "bandwidth" in constraint.custom.constraint_type: + bitrate = int(float(constraint.custom.constraint_value)) + elif "bidirectionality" in constraint.custom.constraint_type: + bidir = int(constraint.custom.constraint_value) + elif "optical-band-width" in constraint.custom.constraint_type: + ob_band = int(constraint.custom.constraint_value) + elif "type" in constraint.custom.constraint_type: + logging.info(f"{constraint.custom.constraint_type}={constraint.custom.constraint_value}") + oc_type = OpticalServiceType(str(constraint.custom.constraint_value)) + logging.info(f"{oc_type}") + elif "disjoint_optical_band_id" in constraint.custom.constraint_type: + logging.info(f"{constraint.custom.constraint_type}={constraint.custom.constraint_value}") + dj_optical_band_id = int(constraint.custom.constraint_value) + logging.info(f"{dj_optical_band_id}") + elif "preferred_band" in constraint.custom.constraint_type: + #used only for optical band specification + logging.info(f"{constraint.custom.constraint_type}={constraint.custom.constraint_value}") + preferred = str(constraint.custom.constraint_value) + logging.info(f"{preferred}") + elif "optical_band_id" in constraint.custom.constraint_type: + #re-use the same tag for the preferred optical band with media-channel + preferred = int(constraint.custom.constraint_value) reply_txt = "" # to get the reply form the optical module #multi-granular - if oc_type == 1: - reply_txt = add_flex_lightpath(src, dst, bitrate, bidir, ob_band) - elif oc_type == 2: - reply_txt = add_lightpath(src, dst, bitrate, bidir) + if alien != 0: + reply_txt = add_alien_flex_lightpath(src, ports[0], dst, ports[1], alien_band, alien_optical_band_id, bidir) else: - reply_txt = add_flex_lightpath(src, dst, bitrate, bidir, ob_band) - + if oc_type == 1: + reply_txt = add_flex_lightpath(src, dst, bitrate, bidir, preferred, ob_band, dj_optical_band_id) + elif oc_type == 2: + reply_txt = add_lightpath(src, dst, bitrate, bidir) + else: + reply_txt = add_flex_lightpath(src, dst, bitrate, bidir, preferred, ob_band, dj_optical_band_id) + if reply_txt is None: + return service_with_uuids.service_id reply_json = json.loads(reply_txt) LOGGER.debug('[optical] reply_json[{:s}]={:s}'.format(str(type(reply_json)), str(reply_json))) optical_band_txt = "" + + if "optical_band_id" in reply_json.keys(): + optical_band_txt = reply_txt + + optical_reply = adapt_reply_ob( + devices, _service, reply_json, context_uuid_x, topology_uuid_x, optical_band_txt + ) + tasks_scheduler.compose_from_opticalcontroller_reply( + optical_reply, is_delete=False) + tasks_scheduler.execute_all() + return service_with_uuids.service_id if "new_optical_band" in reply_json.keys(): if reply_json["new_optical_band"] == 1: if reply_json["parent_opt_band"]: @@ -403,7 +449,8 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): 'flow_id' : None } devs = [] - + src = "" + dst = "" context_id_x = json_context_id(DEFAULT_CONTEXT_NAME) topology_id_x = json_topology_id( DEFAULT_TOPOLOGY_NAME, context_id_x) @@ -412,8 +459,11 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): devices = topology_details.devices for endpoint_id in service.service_endpoint_ids: devs.append(endpoint_id.device_id.device_uuid.uuid) - src = get_device_name_from_uuid(devices, devs[0]) - dst = get_device_name_from_uuid(devices, devs[1]) + if len(devs) == 2: + src = get_device_name_from_uuid(devices, devs[0]) + dst = get_device_name_from_uuid(devices, devs[1]) + else: + print("empty service") bitrate = 100 bidir = 0 oc_type = 1 @@ -430,6 +480,7 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): ''' #multi-granular if oc_type == 1: + LOGGER.info(f"DEVELOP: deleting multi-granular service") if len(service.service_config.config_rules) > 0: c_rules_dict = json.loads( service.service_config.config_rules[0].custom.resource_value @@ -448,12 +499,12 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): params['ob_id' ] = ob_id params['flow_id'] = flow_id params['bidir' ] = bidir - - + LOGGER.info(f"DEVELOP mg: {params}") tasks_scheduler = TasksScheduler(self.service_handler_factory) tasks_scheduler.compose_from_optical_service(service, params=params, is_delete=True) tasks_scheduler.execute_all() return Empty() + #flexigrid elif oc_type ==2 : if len(service.service_config.config_rules) > 0: @@ -469,16 +520,15 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): params['bitrate']=bitrate params['dst']=dst params['src']=src - params['ob_id']=ob_id params['flow_id']=flow_id params['bidir'] = bidir - - + LOGGER.info(f"DEVELOP flexgrid: {params}") tasks_scheduler = TasksScheduler(self.service_handler_factory) tasks_scheduler.compose_from_optical_service(service, params=params, is_delete=True) tasks_scheduler.execute_all() return Empty() + # Normal service # Feed TaskScheduler with this service and the sub-services and sub-connections related to this service. # TaskScheduler identifies inter-dependencies among them and produces a schedule of tasks (an ordered list of @@ -490,6 +540,7 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def RecomputeConnections(self, request : Service, context : grpc.ServicerContext) -> Empty: + if len(request.service_endpoint_ids) > 0: raise NotImplementedException('update-endpoints') @@ -497,7 +548,9 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): raise NotImplementedException('update-constraints') if len(request.service_config.config_rules) > 0: - raise NotImplementedException('update-config-rules') + del request.service_config.config_rules[:] + # raise NotImplementedException('update-config-rules') + LOGGER.error('update-config-rules not implemented') context_client = ContextClient() @@ -517,7 +570,8 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): # Set service status to "SERVICESTATUS_UPDATING" to ensure rest of components are aware the service is # being modified. # pylint: disable=no-member - updated_service.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_UPDATING + if updated_service.service_type != ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY: + updated_service.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_UPDATING # Update endpoints # pylint: disable=no-member @@ -564,83 +618,229 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): str_connection = grpc_message_to_json_string(old_connection) str_extra_details = MSG.format(str_service_id, str_connection_id, str_connection) raise NotImplementedException('service-connection-with-subservices', extra_details=str_extra_details) + + if updated_service.service_type == ServiceTypeEnum.SERVICETYPE_OPTICAL_CONNECTIVITY: + context_id_x = json_context_id(DEFAULT_CONTEXT_NAME) + topology_id_x = json_topology_id( + DEFAULT_TOPOLOGY_NAME, context_id_x) + topology_details = context_client.GetTopologyDetails( + TopologyId(**topology_id_x)) + + str_old_connection = connection_to_string(old_connection) + if len(updated_service_with_uuids.service_config.config_rules)> 0: + #if len(updated_service.service_config.config_rules) > 0: + c_rules_dict = json.loads( + updated_service_with_uuids.service_config.config_rules[0].custom.resource_value) + + #c_rules_dict = json.loads( + # updated_service.service_config.config_rules[0].custom.resource_value) + flow_id=None + #if "ob_id" in c_rules_dict: + # ob_id = c_rules_dict["ob_id"] + if ("flow_id" in c_rules_dict): + updated_service.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_UPDATING + flow_id = c_rules_dict["flow_id"] + reply_txt = "" + # to get the reply form the optical module + #multi-granular + reply_txt = reconfig_flex_lightpath(flow_id) + reply_json = json.loads(reply_txt) + LOGGER.info('[optical] reply_json[{:s}]={:s}'.format(str(type(reply_json)), str(reply_json))) + devices = topology_details.devices + context_uuid_x = topology_details.topology_id.context_id.context_uuid.uuid + topology_uuid_x = topology_details.topology_id.topology_uuid.uuid + + device_names : Dict[str, str] = dict() + for device in devices: + device_uuid = device.device_id.device_uuid.uuid + device_names[device_uuid] = device.name + + if reply_txt != '': + optical_reply = adapt_reply(devices, updated_service, reply_json, context_uuid_x, topology_uuid_x, "") + new_connection = optical_reply.connections[0] + #for candidate_new_connection in pathcomp_reply.connections: + str_candidate_new_connection = connection_to_string(new_connection) + # Change UUID of new connection to prevent collisions + tmp_connection = Connection() + tmp_connection.CopyFrom(new_connection) + tmp_connection.connection_id.connection_uuid.uuid = str(uuid.uuid4()) + new_connection = tmp_connection + service_new = optical_reply.services[0] + # Feed TaskScheduler with the service to update, the old connection to + # deconfigure and the new connection to configure. It will produce a + # schedule of tasks (an ordered list of tasks to be executed) to + # implement the requested changes. + tasks_scheduler = TasksScheduler(self.service_handler_factory) + #tasks_scheduler.compose_optical_service_update( + # updated_service, old_connection, service_new, new_connection) + tasks_scheduler.compose_optical_service_update( + service_new, old_connection, new_connection) + tasks_scheduler.execute_all() + else: + if ("ob_id" in c_rules_dict) and ("low-freq" in c_rules_dict): + ob_id = c_rules_dict["ob_id"] + band_txt = get_optical_band(ob_id) + optical_band = json.loads(band_txt) + ''' + optical_band = None + obs = context_client.GetOpticalBand() + for obz in obs: + if obz.opticalband_id == ob_id: + optical_band = obz + if optical_band is not None: + ''' + #optical_band = context_client.SelectOpticalBand(ob_id) + served_flows = optical_band.get('served_lightpaths') + #context_id_x = json_context_id(DEFAULT_CONTEXT_NAME) + response = context_client.ListServices(ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))) + #response = context_client.ListServices(context_id_x) + LOGGER.info('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) + #assert len(response.services) == 1 + service_ids = [] + for service in response.services: + ########## + #service = response.services[0] + if len(service.service_config.config_rules) > 0: + c_rules_dict = json.loads( + service.service_config.config_rules[0].custom.resource_value + ) + if ("flow_id" in c_rules_dict): + flow_id = c_rules_dict["flow_id"] + LOGGER.info(f"Checking {flow_id} and {served_flows}") + if flow_id in served_flows: + ########## + updated_service : Optional[Service] = get_service_by_id( + context_client, service.service_id, rw_copy=True, + include_config_rules=False, include_constraints=False, include_endpoint_ids=False) + updated_service.service_status.service_status = ServiceStatusEnum.SERVICESTATUS_UPDATING + updated_service_id_with_uuids = context_client.SetService(updated_service) + + # PathComp requires endpoints, constraints and config rules + updated_service_with_uuids = get_service_by_id( + context_client, updated_service_id_with_uuids, rw_copy=True, + include_config_rules=True, include_constraints=True, include_endpoint_ids=True) + + # Get active connection + connections = context_client.ListConnections(updated_service_id_with_uuids) + old_connection = connections.connections[0] + + flow_id = c_rules_dict["flow_id"] + reply_txt = "" + # to get the reply form the optical module + #multi-granular + reply_txt = reconfig_flex_lightpath(flow_id) + reply_json = json.loads(reply_txt) + devices = topology_details.devices + context_uuid_x = topology_details.topology_id.context_id.context_uuid.uuid + topology_uuid_x = topology_details.topology_id.topology_uuid.uuid + + device_names : Dict[str, str] = dict() + for device in devices: + device_uuid = device.device_id.device_uuid.uuid + device_names[device_uuid] = device.name + + if reply_txt != '': + optical_reply = adapt_reply(devices, updated_service, reply_json, context_uuid_x, topology_uuid_x, "") + new_connection = optical_reply.connections[0] + #for candidate_new_connection in pathcomp_reply.connections: + str_candidate_new_connection = connection_to_string(new_connection) + # Change UUID of new connection to prevent collisions + tmp_connection = Connection() + tmp_connection.CopyFrom(new_connection) + tmp_connection.connection_id.connection_uuid.uuid = str(uuid.uuid4()) + new_connection = tmp_connection + service_new = optical_reply.services[0] + + + # Feed TaskScheduler with the service to update, the old connection to + # deconfigure and the new connection to configure. It will produce a + # schedule of tasks (an ordered list of tasks to be executed) to + # implement the requested changes. + tasks_scheduler = TasksScheduler(self.service_handler_factory) + #tasks_scheduler.compose_optical_service_update( + # updated_service, old_connection, service_new, new_connection) + tasks_scheduler.compose_optical_service_update( + service_new, old_connection, new_connection) + tasks_scheduler.execute_all() + - # Find alternative connections - # pylint: disable=no-member - pathcomp_request = PathCompRequest() - pathcomp_request.services.append(updated_service_with_uuids) - #pathcomp_request.k_disjoint_path.num_disjoint = 100 - pathcomp_request.k_shortest_path.k_inspection = 100 - pathcomp_request.k_shortest_path.k_return = 3 - - LOGGER.debug('pathcomp_request={:s}'.format(grpc_message_to_json_string(pathcomp_request))) - pathcomp = PathCompClient() - pathcomp_reply = pathcomp.Compute(pathcomp_request) - pathcomp.close() - LOGGER.debug('pathcomp_reply={:s}'.format(grpc_message_to_json_string(pathcomp_reply))) - - if len(pathcomp_reply.services) == 0: - MSG = 'KDisjointPath reported no services for Service({:s}): {:s}' - str_service_id = grpc_message_to_json_string(updated_service_id_with_uuids) - str_pathcomp_reply = grpc_message_to_json_string(pathcomp_reply) - str_extra_details = MSG.format(str_service_id, str_pathcomp_reply) - raise NotImplementedException('kdisjointpath-no-services', extra_details=str_extra_details) + else: + # Find alternative connections + # pylint: disable=no-member + pathcomp_request = PathCompRequest() + pathcomp_request.services.append(updated_service_with_uuids) + #pathcomp_request.k_disjoint_path.num_disjoint = 100 + pathcomp_request.k_shortest_path.k_inspection = 100 + pathcomp_request.k_shortest_path.k_return = 3 + + LOGGER.debug('pathcomp_request={:s}'.format(grpc_message_to_json_string(pathcomp_request))) + pathcomp = PathCompClient() + pathcomp_reply = pathcomp.Compute(pathcomp_request) + pathcomp.close() + LOGGER.debug('pathcomp_reply={:s}'.format(grpc_message_to_json_string(pathcomp_reply))) + + if len(pathcomp_reply.services) == 0: + MSG = 'KDisjointPath reported no services for Service({:s}): {:s}' + str_service_id = grpc_message_to_json_string(updated_service_id_with_uuids) + str_pathcomp_reply = grpc_message_to_json_string(pathcomp_reply) + str_extra_details = MSG.format(str_service_id, str_pathcomp_reply) + raise NotImplementedException('kdisjointpath-no-services', extra_details=str_extra_details) + + if len(pathcomp_reply.services) > 1: + MSG = 'KDisjointPath reported subservices for Service({:s}): {:s}' + str_service_id = grpc_message_to_json_string(updated_service_id_with_uuids) + str_pathcomp_reply = grpc_message_to_json_string(pathcomp_reply) + str_extra_details = MSG.format(str_service_id, str_pathcomp_reply) + raise NotImplementedException('kdisjointpath-subservices', extra_details=str_extra_details) + + if len(pathcomp_reply.connections) == 0: + MSG = 'KDisjointPath reported no connections for Service({:s}): {:s}' + str_service_id = grpc_message_to_json_string(updated_service_id_with_uuids) + str_pathcomp_reply = grpc_message_to_json_string(pathcomp_reply) + str_extra_details = MSG.format(str_service_id, str_pathcomp_reply) + raise NotImplementedException('kdisjointpath-no-connections', extra_details=str_extra_details) + + # compute a string representing the old connection + str_old_connection = connection_to_string(old_connection) + + LOGGER.debug('old_connection={:s}'.format(grpc_message_to_json_string(old_connection))) + + candidate_new_connections = list() + for candidate_new_connection in pathcomp_reply.connections: + str_candidate_new_connection = connection_to_string(candidate_new_connection) + if str_candidate_new_connection == str_old_connection: continue + candidate_new_connections.append(candidate_new_connection) + + if len(candidate_new_connections) == 0: + MSG = 'Unable to find a new suitable path: pathcomp_request={:s} pathcomp_reply={:s} old_connection={:s}' + str_pathcomp_request = grpc_message_to_json_string(pathcomp_request) + str_pathcomp_reply = grpc_message_to_json_string(pathcomp_reply) + str_old_connection = grpc_message_to_json_string(old_connection) + extra_details = MSG.format(str_pathcomp_request, str_pathcomp_reply, str_old_connection) + raise OperationFailedException('no-new-path-found', extra_details=extra_details) + + str_candidate_new_connections = [ + grpc_message_to_json_string(candidate_new_connection) + for candidate_new_connection in candidate_new_connections + ] + LOGGER.debug('candidate_new_connections={:s}'.format(str(str_candidate_new_connections))) - if len(pathcomp_reply.services) > 1: - MSG = 'KDisjointPath reported subservices for Service({:s}): {:s}' - str_service_id = grpc_message_to_json_string(updated_service_id_with_uuids) - str_pathcomp_reply = grpc_message_to_json_string(pathcomp_reply) - str_extra_details = MSG.format(str_service_id, str_pathcomp_reply) - raise NotImplementedException('kdisjointpath-subservices', extra_details=str_extra_details) + new_connection = random.choice(candidate_new_connections) + LOGGER.debug('new_connection={:s}'.format(grpc_message_to_json_string(new_connection))) - if len(pathcomp_reply.connections) == 0: - MSG = 'KDisjointPath reported no connections for Service({:s}): {:s}' - str_service_id = grpc_message_to_json_string(updated_service_id_with_uuids) - str_pathcomp_reply = grpc_message_to_json_string(pathcomp_reply) - str_extra_details = MSG.format(str_service_id, str_pathcomp_reply) - raise NotImplementedException('kdisjointpath-no-connections', extra_details=str_extra_details) - - # compute a string representing the old connection - str_old_connection = connection_to_string(old_connection) - - LOGGER.debug('old_connection={:s}'.format(grpc_message_to_json_string(old_connection))) - - candidate_new_connections = list() - for candidate_new_connection in pathcomp_reply.connections: - str_candidate_new_connection = connection_to_string(candidate_new_connection) - if str_candidate_new_connection == str_old_connection: continue - candidate_new_connections.append(candidate_new_connection) - - if len(candidate_new_connections) == 0: - MSG = 'Unable to find a new suitable path: pathcomp_request={:s} pathcomp_reply={:s} old_connection={:s}' - str_pathcomp_request = grpc_message_to_json_string(pathcomp_request) - str_pathcomp_reply = grpc_message_to_json_string(pathcomp_reply) - str_old_connection = grpc_message_to_json_string(old_connection) - extra_details = MSG.format(str_pathcomp_request, str_pathcomp_reply, str_old_connection) - raise OperationFailedException('no-new-path-found', extra_details=extra_details) - - str_candidate_new_connections = [ - grpc_message_to_json_string(candidate_new_connection) - for candidate_new_connection in candidate_new_connections - ] - LOGGER.debug('candidate_new_connections={:s}'.format(str(str_candidate_new_connections))) - - new_connection = random.choice(candidate_new_connections) - LOGGER.debug('new_connection={:s}'.format(grpc_message_to_json_string(new_connection))) - - # Change UUID of new connection to prevent collisions - tmp_connection = Connection() - tmp_connection.CopyFrom(new_connection) - tmp_connection.connection_id.connection_uuid.uuid = str(uuid.uuid4()) - new_connection = tmp_connection - - # Feed TaskScheduler with the service to update, the old connection to - # deconfigure and the new connection to configure. It will produce a - # schedule of tasks (an ordered list of tasks to be executed) to - # implement the requested changes. - tasks_scheduler = TasksScheduler(self.service_handler_factory) - tasks_scheduler.compose_service_connection_update( - updated_service_with_uuids, old_connection, new_connection) - tasks_scheduler.execute_all() + # Change UUID of new connection to prevent collisions + tmp_connection = Connection() + tmp_connection.CopyFrom(new_connection) + tmp_connection.connection_id.connection_uuid.uuid = str(uuid.uuid4()) + new_connection = tmp_connection + + # Feed TaskScheduler with the service to update, the old connection to + # deconfigure and the new connection to configure. It will produce a + # schedule of tasks (an ordered list of tasks to be executed) to + # implement the requested changes. + tasks_scheduler = TasksScheduler(self.service_handler_factory) + tasks_scheduler.compose_service_connection_update( + updated_service_with_uuids, old_connection, new_connection) + tasks_scheduler.execute_all() return Empty() diff --git a/src/service/service/service_handlers/oc/OCServiceHandler.py b/src/service/service/service_handlers/oc/OCServiceHandler.py index 8aad5b17afabbbac11dae3a0b58edb7c33771424..7997aba0412f546082a5b01237fec0c1f507c29f 100644 --- a/src/service/service/service_handlers/oc/OCServiceHandler.py +++ b/src/service/service/service_handlers/oc/OCServiceHandler.py @@ -93,14 +93,16 @@ class OCServiceHandler(_ServiceHandler): settings = self.__settings_handler.get('/settings') bidir = settings.value.get("bidir") + op_mode = settings.value.get("operational-mode") ob_expansion =settings.value.get('ob-expanded',None) if ob_expansion : if not is_opticalband: LOGGER.info(f"ob-expanded bvalue is: {ob_expansion} and is_opticalband {is_opticalband}") return results - - flows = endpoints_to_flows(endpoints, bidir, is_opticalband) - + LOGGER.info(f"PDPis_opticalband {is_opticalband}") + LOGGER.info(f"PDPset_Opticalconfig_endpoints is:{endpoints}") + flows = endpoints_to_flows(endpoints, bidir, is_opticalband, op_mode) + LOGGER.info(f"PDPflows: {flows}") #new cycle for setting optical devices for device_uuid, dev_flows in flows.items(): try: @@ -125,14 +127,17 @@ class OCServiceHandler(_ServiceHandler): service_uuid = self.__service.service_id.service_uuid.uuid chk_type('endpoints', endpoints, list) if len(endpoints) == 0: return [] - + op_mode = None if self.__settings_handler.get('/settings-ob_{}'.format(connection_uuid)): is_opticalband =True settings = self.__settings_handler.get('/settings-ob_{}'.format(connection_uuid)) else: - settings = self.__settings_handler.get('/settings') + settings = self.__settings_handler.get('/settings') + op_mode = settings.value.get("operational-mode") + bidir = settings.value.get("bidir",None) + results = [] for endpoint in endpoints: @@ -166,7 +171,10 @@ class OCServiceHandler(_ServiceHandler): if is_openroadm: flows = convert_or_endpoints_to_flows(endpoints, bidir) else: - flows = endpoints_to_flows(endpoints, bidir, is_opticalband) + LOGGER.info(f"is_opticalband {is_opticalband}") + LOGGER.info(f'RERF endpoints :{endpoints}') + flows = endpoints_to_flows(endpoints, bidir, is_opticalband, op_mode) + LOGGER.info(f'RERF:{flows}') for device_uuid, dev_flows in flows.items(): try: diff --git a/src/service/service/service_handlers/oc/OCTools.py b/src/service/service/service_handlers/oc/OCTools.py index 14cd7cbedd1c0aa4390067f7244baead946b84a9..746c29fdb30771bdb6c1531054a346aa1ec8149e 100644 --- a/src/service/service/service_handlers/oc/OCTools.py +++ b/src/service/service/service_handlers/oc/OCTools.py @@ -286,88 +286,189 @@ def ob_flows(endpoints : List[Tuple[str, str, Optional[str]]], bidir : int): return entries -def conn_flows(endpoints : List[Tuple[str, str, Optional[str]]], bidir : int): - entries = {} - end = len(endpoints) - i = 0 - #tx tp - endpoint = endpoints[i] - device_uuid, endpoint_uuid = endpoint[0:2] - - if device_uuid not in entries.keys(): - entries[device_uuid] = [] - entry_tuple = "0", endpoint_uuid - entries[device_uuid].append(entry_tuple) - i = i + 1 - #if bidir reading 4 endpoints per node - if bidir: - log.info(f"i starts with {i} ") - i = i + 1 - while(i < end-2): - #i - endpoint = endpoints[i] - device_uuid, endpoint_uuid = endpoint[0:2] +def conn_flows(endpoints : List[Tuple[str, str, Optional[str]]], bidir : int, op_mode: int): + if op_mode is not None: + entries = {} + end = len(endpoints) + i = 0 + #tx tp + endpoint = endpoints[i] + device_uuid, endpoint_uuid = endpoint[0:2] - if device_uuid not in entries.keys(): - entries[device_uuid] = [] - #i+1 - next_endpoint = endpoints[i+1] - next_device_uuid, next_endpoint_uuid = next_endpoint[0:2] - if next_device_uuid == device_uuid: - entry_tuple = endpoint_uuid, next_endpoint_uuid - entries[device_uuid].append(entry_tuple) - else: + if device_uuid not in entries.keys(): + entries[device_uuid] = [] + entry_tuple = "0", endpoint_uuid + entries[device_uuid].append(entry_tuple) + i = i + 1 + #if bidir reading 4 endpoints per node + if bidir: + log.info(f"i starts with {i} ") + device0 , endpoint0=endpoints[0][0:2] + device1 , endpoint1=endpoints[1][0:2] + finalend=end-2 + if device0 ==device1: + i = i + 1 + else : + finalend=end-1 + while(i < finalend): + #i + endpoint = endpoints[i] + device_uuid, endpoint_uuid = endpoint[0:2] - return {} - #i+2 - - next_2_endpoint = endpoints[i+2] - next_2_device_uuid, next_2_endpoint_uuid = next_2_endpoint[0:2] - #i+3 - next_3_endpoint = endpoints[i+3] - next_3_device_uuid, next_3_endpoint_uuid = next_3_endpoint[0:2] - log.info(f"dev {device_uuid} ") - log.info(f"dev2 {next_2_device_uuid} dev3 {next_3_device_uuid} ") - if next_2_device_uuid == next_3_device_uuid and next_3_device_uuid == device_uuid: - entry_tuple = next_2_endpoint_uuid, next_3_endpoint_uuid - entries[device_uuid].append(entry_tuple) - i = i + 4 - else: + if device_uuid not in entries.keys(): + entries[device_uuid] = [] + #i+1 + next_endpoint = endpoints[i+1] + next_device_uuid, next_endpoint_uuid = next_endpoint[0:2] + if next_device_uuid == device_uuid: + entry_tuple = endpoint_uuid, next_endpoint_uuid + entries[device_uuid].append(entry_tuple) + else: + log.info(f"error : next_dev {next_device_uuid} dev {device_uuid} for i {i} ") + return {} + #i+2 + + next_2_endpoint = endpoints[i+2] + next_2_device_uuid, next_2_endpoint_uuid = next_2_endpoint[0:2] + #i+3 + next_3_endpoint = endpoints[i+3] + next_3_device_uuid, next_3_endpoint_uuid = next_3_endpoint[0:2] + log.info(f"dev {device_uuid} ") + log.info(f"dev2 {next_2_device_uuid} dev3 {next_3_device_uuid} ") + if next_2_device_uuid == next_3_device_uuid and next_3_device_uuid == device_uuid: + entry_tuple = next_2_endpoint_uuid, next_3_endpoint_uuid + entries[device_uuid].append(entry_tuple) + i = i + 4 + else: + log.info(f"error : next_2_dev {next_2_device_uuid} next_3_device{next_3_device_uuid} dev {device_uuid} for i {i} ") + return {} + else: + while(i < end-1): + #i + endpoint = endpoints[i] + device_uuid, endpoint_uuid = endpoint[0:2] - return {} + if device_uuid not in entries.keys(): + entries[device_uuid] = [] + #i+1 + next_endpoint = endpoints[i+1] + next_device_uuid, next_endpoint_uuid = next_endpoint[0:2] + if next_device_uuid == device_uuid: + entry_tuple = endpoint_uuid, next_endpoint_uuid + entries[device_uuid].append(entry_tuple) + i = i + 2 + else: + return {} + #rx tp + endpoint = endpoints[i] + device_uuid, endpoint_uuid = endpoint[0:2] + if device_uuid not in entries.keys(): + entries[device_uuid] = [] + entry_tuple = endpoint_uuid, "0", + entries[device_uuid].append(entry_tuple) else: + entries = {} + if len(endpoints) != 4: + log.info(f"PDP : expected alien configuration with 4 endpoints ") + return {} + i = 0 + device0 , endpoint0 = endpoints[0][0:2] + device1 , endpoint1 = endpoints[1][0:2] + device2 , endpoint2 = endpoints[2][0:2] + device3 , endpoint3 = endpoints[3][0:2] + + endpoint = endpoints[i] + device_uuid, endpoint_uuid = endpoint[0:2] + + if device0 == device1: + if device0 not in entries.keys(): + entries[device0] = [] + entry_tuple = endpoint0, endpoint1 + entries[device0].append(entry_tuple) + + if device2 == device3: + if device2 not in entries.keys(): + entries[device2] = [] + entry_tuple = endpoint2, endpoint3 + entries[device2].append(entry_tuple) + + #if bidir reading 4 endpoints per node + ''' + if bidir: + log.info(f"i starts with {i} ") + device0 , endpoint0=endpoints[0][0:2] + device1 , endpoint1=endpoints[1][0:2] + finalend=end-2 + if device0 ==device1: + i = i + 1 + else : + finalend=end-1 + while(i < finalend): + #i + endpoint = endpoints[i] + device_uuid, endpoint_uuid = endpoint[0:2] + + if device_uuid not in entries.keys(): + entries[device_uuid] = [] + #i+1 + next_endpoint = endpoints[i+1] + next_device_uuid, next_endpoint_uuid = next_endpoint[0:2] + if next_device_uuid == device_uuid: + entry_tuple = endpoint_uuid, next_endpoint_uuid + entries[device_uuid].append(entry_tuple) + else: + log.info(f"error : next_dev {next_device_uuid} dev {device_uuid} for i {i} ") + return {} + #i+2 + + next_2_endpoint = endpoints[i+2] + next_2_device_uuid, next_2_endpoint_uuid = next_2_endpoint[0:2] + #i+3 + next_3_endpoint = endpoints[i+3] + next_3_device_uuid, next_3_endpoint_uuid = next_3_endpoint[0:2] + log.info(f"dev {device_uuid} ") + log.info(f"dev2 {next_2_device_uuid} dev3 {next_3_device_uuid} ") + if next_2_device_uuid == next_3_device_uuid and next_3_device_uuid == device_uuid: + entry_tuple = next_2_endpoint_uuid, next_3_endpoint_uuid + entries[device_uuid].append(entry_tuple) + i = i + 4 + else: + log.info(f"error : next_2_dev {next_2_device_uuid} next_3_device{next_3_device_uuid} dev {device_uuid} for i {i} ") + return {} + else: while(i < end-1): - #i - endpoint = endpoints[i] - device_uuid, endpoint_uuid = endpoint[0:2] + #i + endpoint = endpoints[i] + device_uuid, endpoint_uuid = endpoint[0:2] - if device_uuid not in entries.keys(): - entries[device_uuid] = [] - #i+1 - next_endpoint = endpoints[i+1] - next_device_uuid, next_endpoint_uuid = next_endpoint[0:2] - if next_device_uuid == device_uuid: - entry_tuple = endpoint_uuid, next_endpoint_uuid - entries[device_uuid].append(entry_tuple) - i = i + 2 - else: - return {} - #rx tp - endpoint = endpoints[i] - device_uuid, endpoint_uuid = endpoint[0:2] - if device_uuid not in entries.keys(): - entries[device_uuid] = [] - entry_tuple = endpoint_uuid, "0", - entries[device_uuid].append(entry_tuple) + if device_uuid not in entries.keys(): + entries[device_uuid] = [] + #i+1 + next_endpoint = endpoints[i+1] + next_device_uuid, next_endpoint_uuid = next_endpoint[0:2] + if next_device_uuid == device_uuid: + entry_tuple = endpoint_uuid, next_endpoint_uuid + entries[device_uuid].append(entry_tuple) + i = i + 2 + else: + return {} + #rx tp + endpoint = endpoints[i] + device_uuid, endpoint_uuid = endpoint[0:2] + if device_uuid not in entries.keys(): + entries[device_uuid] = [] + entry_tuple = endpoint_uuid, "0", + entries[device_uuid].append(entry_tuple) + ''' return entries -def endpoints_to_flows(endpoints : List[Tuple[str, str, Optional[str]]], bidir : int, is_ob: bool)->Dict: +def endpoints_to_flows(endpoints : List[Tuple[str, str, Optional[str]]], bidir : int, is_ob: bool, op_mode: int)->Dict: if is_ob: entries = ob_flows(endpoints, bidir) else: - entries = conn_flows(endpoints, bidir) + entries = conn_flows(endpoints, bidir, op_mode) return entries diff --git a/src/service/service/task_scheduler/TaskExecutor.py b/src/service/service/task_scheduler/TaskExecutor.py index e2709d9bca485db317d6469f183621e74dcc8ae5..a46be6414396bc8b997a93d01926d49fb6707bb7 100644 --- a/src/service/service/task_scheduler/TaskExecutor.py +++ b/src/service/service/task_scheduler/TaskExecutor.py @@ -148,6 +148,7 @@ class TaskExecutor: self, device : Device, settings : str, flows : list, is_opticalband : bool, connection_uuid:str ): + LOGGER.info(f"service optical config {settings}") device_key = get_device_key(device.device_id) optical_config_id = OpticalConfigId() optical_config_id.opticalconfig_uuid = opticalconfig_get_uuid(device.device_id) diff --git a/src/service/service/task_scheduler/TaskScheduler.py b/src/service/service/task_scheduler/TaskScheduler.py index eb61948962aa77c0d4e946146a76d5efcbb11ae5..a9a1a20e7a99c4850bcb4a828e2869f363c7a0ff 100644 --- a/src/service/service/task_scheduler/TaskScheduler.py +++ b/src/service/service/task_scheduler/TaskScheduler.py @@ -34,8 +34,9 @@ from .tasks.Task_ServiceDelete import Task_ServiceDelete from .tasks.Task_ServiceSetStatus import Task_ServiceSetStatus from .TaskExecutor import CacheableObjectType, TaskExecutor from .tasks.Task_OpticalServiceConfigDelete import Task_OpticalServiceConfigDelete -from service.service.tools.OpticalTools import delete_lightpath ,DelFlexLightpath +from service.service.tools.OpticalTools import DelFlexLightpath, delete_lightpath from common.Constants import OpticalServiceType + if TYPE_CHECKING: from service.service.service_handler_api.ServiceHandlerFactory import ServiceHandlerFactory @@ -176,6 +177,30 @@ class TasksScheduler: return optical_connection_configure_key + def _optical_connection_configure_simple(self, connection_id : ConnectionId + , service_id : ServiceId , + has_media_channel : bool, has_optical_band = True) -> str: + optical_connection_configure_key = self._add_task_if_not_exists(Task_OpticalConnectionConfigure( + self._executor, connection_id)) + + ''' + # the connection configuration depends on its connection's service being in planning state + service_planned_key = self._add_task_if_not_exists(Task_ServiceSetStatus( + self._executor, service_id, ServiceStatusEnum.SERVICESTATUS_PLANNED)) + self._dag.add(optical_connection_configure_key, service_planned_key) + ''' + + + # the connection's service depends on the connection configuration to transition to active state + service_active_key = self._add_task_if_not_exists(Task_ServiceSetStatus( + self._executor, service_id, ServiceStatusEnum.SERVICESTATUS_ACTIVE)) + self._dag.add(service_active_key, optical_connection_configure_key) + + + return optical_connection_configure_key + + + def _optical_connection_deconfigure( self, connection_id : ConnectionId, service_id : ServiceId, has_media_channel : bool, has_optical_band = True @@ -196,6 +221,30 @@ class TasksScheduler: self._dag.add(service_delete_key, connection_deconfigure_key) return connection_deconfigure_key + + def _optical_connection_deconfigure_simple( + self, connection_id : ConnectionId, service_id : ServiceId, + has_media_channel : bool, has_optical_band = True + ) -> str: + connection_deconfigure_key = self._add_task_if_not_exists(Task_OpticalConnectionDeconfigure( + self._executor, connection_id, has_media_channel=has_media_channel + )) + ''' + # the connection deconfiguration depends on its connection's service being in removing state + service_pending_removal_key = self._add_task_if_not_exists(Task_ServiceSetStatus( + self._executor, service_id, ServiceStatusEnum.SERVICESTATUS_ACTIVE + )) + self._dag.add(connection_deconfigure_key, service_pending_removal_key) + + + service_delete_key = self._add_task_if_not_exists(Task_OpticalServiceDelete( + self._executor, service_id, has_media_channel, has_optical_band + )) + self._dag.add(service_delete_key, connection_deconfigure_key) + ''' + + return connection_deconfigure_key + def _optical_service_config_remove( self, connection_id : ConnectionId, service_id : ServiceId @@ -258,9 +307,8 @@ class TasksScheduler: else : has_optical_band = True return (has_media_channel, has_optical_band) - - - + + def compose_from_opticalcontroller_reply( self, pathcomp_reply : PathCompReply, is_delete : bool = False ) -> None: @@ -273,50 +321,48 @@ class TasksScheduler: has_optical_band = None for service in pathcomp_reply.services: + connections = self._context_client.ListConnections(service.service_id) + has_media_channel, has_optical_band = self.check_service_for_media_channel( + connections=connections, item=service.service_id + ) + + include_service( + service.service_id, has_media_channel=has_media_channel, + has_optical_band=has_optical_band + ) + self._add_service_to_executor_cache(service) - connections = self._context_client.ListConnections(service.service_id) - has_media_channel, has_optical_band = self.check_service_for_media_channel( - connections=connections, item=service.service_id - ) - - - include_service(service.service_id , has_media_channel=has_media_channel, has_optical_band=has_optical_band) - self._add_service_to_executor_cache(service) - - for connection in connections.connections: - self._add_connection_to_executor_cache(connection) - - + for connection in connections.connections: + self._add_connection_to_executor_cache(connection) for connection in pathcomp_reply.connections: - - connection_key = include_connection( - connection.connection_id, connection.service_id, has_media_channel=has_media_channel, - has_optical_band=has_optical_band - ) - self._add_connection_to_executor_cache(connection) + connection_key = include_connection( + connection.connection_id, connection.service_id, + has_media_channel=has_media_channel, + has_optical_band=has_optical_band + ) + self._add_connection_to_executor_cache(connection) - self._executor.get_service(connection.service_id) - for sub_service_id in connection.sub_service_ids: - _,service_key_done = include_service( - sub_service_id, has_media_channel=has_media_channel, - has_optical_band=has_optical_band - ) - self._executor.get_service(sub_service_id) - self._dag.add(connection_key, service_key_done) + self._executor.get_service(connection.service_id) + for sub_service_id in connection.sub_service_ids: + _,service_key_done = include_service( + sub_service_id, has_media_channel=has_media_channel, + has_optical_band=has_optical_band + ) + self._executor.get_service(sub_service_id) + self._dag.add(connection_key, service_key_done) t1 = time.time() LOGGER.debug('[compose_from_service] elapsed_time: {:f} sec'.format(t1-t0)) - - - + + def compose_from_service_expansion( self, service :Service, ) -> None: t0 = time.time() include_service = self._optical_service_create include_connection = self._optical_connection_configure - + logging.debug(f"after setting the config {service}") #pending_items_to_explore.put(service) has_media_channel = None @@ -324,17 +370,16 @@ class TasksScheduler: if service is None : raise NotFoundException('Service', service, extra_details=[ 'service not found ' ]) - - - + connections = self._context_client.ListConnections(service.service_id) has_media_channel, has_optical_band = self.check_service_for_media_channel( connections=connections, item=service.service_id ) - - _,service_key_done= include_service(service.service_id , - has_media_channel=has_media_channel, - has_optical_band=has_optical_band) + + _,service_key_done= include_service( + service.service_id, has_media_channel=has_media_channel, + has_optical_band=has_optical_band + ) # self._add_service_to_executor_cache(service) service_updating_key = self._add_task_if_not_exists(Task_ServiceSetStatus( self._executor, service.service_id, ServiceStatusEnum.SERVICESTATUS_UPDATING @@ -342,7 +387,8 @@ class TasksScheduler: self._add_service_to_executor_cache(service) for connection in connections.connections: connection_key = include_connection( - connection.connection_id, connection.service_id, has_media_channel=has_media_channel, + connection.connection_id, connection.service_id, + has_media_channel=has_media_channel, has_optical_band=has_optical_band ) self._add_connection_to_executor_cache(connection) @@ -350,7 +396,9 @@ class TasksScheduler: t1 = time.time() LOGGER.debug('[compose_from_service] elapsed_time: {:f} sec'.format(t1-t0)) - def compose_from_optical_service(self, service : Service, params:dict, is_delete : bool = False) -> None: + def compose_from_optical_service( + self, service : Service, params:dict, is_delete : bool = False + ) -> None: t0 = time.time() include_service = self._optical_service_remove if is_delete else self._service_create include_connection = self._optical_connection_deconfigure if is_delete else self._connection_configure @@ -359,130 +407,118 @@ class TasksScheduler: explored_items = set() pending_items_to_explore = queue.Queue() pending_items_to_explore.put(service) - has_media_channel=None - has_optical_band=None - reply=None - code=0 - reply_not_allowed="DELETE_NOT_ALLOWED" + has_media_channel = None + has_optical_band = None + reply = None + code = 0 + reply_not_allowed = "DELETE_NOT_ALLOWED" while not pending_items_to_explore.empty(): try: item = pending_items_to_explore.get(block=False) - except queue.Empty: break - + if isinstance(item, Service): - str_item_key = grpc_message_to_json_string(item.service_id) if str_item_key in explored_items: continue connections = self._context_client.ListConnections(item.service_id) - has_media_channel,has_optical_band=self.check_service_for_media_channel(connections=connections,item=item.service_id) - oc_type = 1 + has_media_channel, has_optical_band = self.check_service_for_media_channel( + connections=connections, item=item.service_id + ) + oc_type = 1 if len(service.service_config.config_rules) > 0: for constraint in service.service_constraints: if "type" in constraint.custom.constraint_type: - oc_type = OpticalServiceType(str(constraint.custom.constraint_value)) - if oc_type == 2 : - reply,code = delete_lightpath( - params['src'] - , params ['dst'] - , params['bitrate'] - , flow_id= params['flow_id'] - ) - - else : - reply,code = DelFlexLightpath( - params['src'] - , params ['dst'] - , params['bitrate'] - , params['ob_id'] - , delete_band=not has_media_channel - , flow_id= params['flow_id'] - ) - - + oc_type = OpticalServiceType(str(constraint.custom.constraint_value)) + if oc_type == 2: + reply, code = delete_lightpath( + params['src'], params ['dst'], params['bitrate'], + flow_id= params['flow_id'] + ) + else: + reply, code = DelFlexLightpath( + params['src'], params ['dst'], params['bitrate'], + params['ob_id'], flow_id=params['flow_id'] + ) if code == 400 and reply_not_allowed in reply : MSG = 'Deleteion for the service is not Allowed , Served Lightpaths is not empty' raise Exception(MSG) - include_service(item.service_id,has_media_channel=has_media_channel,has_optical_band=has_optical_band) + include_service( + item.service_id, has_media_channel=has_media_channel, + has_optical_band=has_optical_band + ) self._add_service_to_executor_cache(item) - - + for connection in connections.connections: - self._add_connection_to_executor_cache(connection) - pending_items_to_explore.put(connection) + self._add_connection_to_executor_cache(connection) + pending_items_to_explore.put(connection) explored_items.add(str_item_key) - elif isinstance(item, ServiceId): - - if code == 400 and reply_not_allowed in reply:break - + if code == 400 and reply_not_allowed in reply: break + str_item_key = grpc_message_to_json_string(item) if str_item_key in explored_items: continue connections = self._context_client.ListConnections(item) - has_media_channel,has_optical_band=self.check_service_for_media_channel(connections=connections,item=item) + has_media_channel, has_optical_band = self.check_service_for_media_channel( + connections=connections, item=item + ) + + include_service( + item, has_media_channel=has_media_channel, + has_optical_band=has_optical_band + ) - - include_service(item,has_media_channel=has_media_channel,has_optical_band=has_optical_band) - - self._executor.get_service(item) - + for connection in connections.connections: - self._add_connection_to_executor_cache(connection) pending_items_to_explore.put(connection) - + explored_items.add(str_item_key) elif isinstance(item, Connection): if code == 400 and reply_not_allowed in reply:break str_item_key = grpc_message_to_json_string(item.connection_id) if str_item_key in explored_items: continue - - - connection_key = include_connection( item.connection_id - , item.service_id - , has_media_channel=has_media_channel - , has_optical_band=has_optical_band ) + + connection_key = include_connection( + item.connection_id, item.service_id, has_media_channel=has_media_channel, + has_optical_band=has_optical_band + ) self._add_connection_to_executor_cache(connection) - + if include_service_config is not None : - connections_list = ConnectionList() - connections_list.connections.append(item) - - is_media_channel,_=self.check_service_for_media_channel(connections=connections_list,item=service) - - if has_optical_band and is_media_channel: - include_service_config(item.connection_id - , item.service_id ) - + connections_list = ConnectionList() + connections_list.connections.append(item) + + is_media_channel,_=self.check_service_for_media_channel( + connections=connections_list,item=service + ) + + if has_optical_band and is_media_channel: + include_service_config(item.connection_id, item.service_id) self._executor.get_service(item.service_id) pending_items_to_explore.put(item.service_id) - - + for sub_service_id in item.sub_service_ids: - _,service_key_done = include_service(sub_service_id - ,has_media_channel=has_media_channel - ,has_optical_band=has_optical_band) + _,service_key_done = include_service( + sub_service_id, has_media_channel=has_media_channel, + has_optical_band=has_optical_band + ) self._executor.get_service(sub_service_id) self._dag.add(service_key_done, connection_key) pending_items_to_explore.put(sub_service_id) - - explored_items.add(str_item_key) - - else: MSG = 'Unsupported item {:s}({:s})' raise Exception(MSG.format(type(item).__name__, grpc_message_to_json_string(item))) - + t1 = time.time() - LOGGER.debug('[compose_from_service] elapsed_time: {:f} sec'.format(t1-t0)) + LOGGER.debug('[compose_from_optical_service] elapsed_time: {:f} sec'.format(t1-t0)) def compose_from_service(self, service : Service, is_delete : bool = False) -> None: @@ -548,6 +584,100 @@ class TasksScheduler: t1 = time.time() LOGGER.debug('[compose_from_service] elapsed_time: {:f} sec'.format(t1-t0)) + + def compose_optical_service_update( + self, service : Service, old_connection : Connection, new_connection : Connection + ) -> None: + t0 = time.time() + + self._add_service_to_executor_cache(service) + self._add_connection_to_executor_cache(old_connection) + self._add_connection_to_executor_cache(new_connection) + + service_updating_key = self._add_task_if_not_exists(Task_ServiceSetStatus( + self._executor, service.service_id, ServiceStatusEnum.SERVICESTATUS_UPDATING + )) + + #old_connection_deconfigure_key = self._add_task_if_not_exists(Task_OpticalConnectionDeconfigure( + # self._executor, old_connection.connection_id, old_connection.service_id + #)) + + old_connection_deconfigure_key = self._add_task_if_not_exists(Task_OpticalConnectionDeconfigure( + self._executor, old_connection.connection_id, True + )) + + + new_connection_configure_key = self._add_task_if_not_exists(Task_OpticalConnectionConfigure( + self._executor, new_connection.connection_id + )) + + service_active_key = self._add_task_if_not_exists(Task_ServiceSetStatus( + self._executor, service.service_id, ServiceStatusEnum.SERVICESTATUS_ACTIVE + )) + + # the old connection deconfiguration depends on service being in updating state + self._dag.add(old_connection_deconfigure_key, service_updating_key) + + # the new connection configuration depends on service being in updating state + self._dag.add(new_connection_configure_key, service_updating_key) + + # the new connection configuration depends on the old connection having been deconfigured + self._dag.add(new_connection_configure_key, old_connection_deconfigure_key) + + # re-activating the service depends on the service being in updating state before + self._dag.add(service_active_key, service_updating_key) + + # re-activating the service depends on the new connection having been configured + self._dag.add(service_active_key, new_connection_configure_key) + + t1 = time.time() + LOGGER.debug('[compose_optical_service_update] elapsed_time: {:f} sec'.format(t1-t0)) + + + def compose_optical_service_update1( + self, service : Service, old_connection : Connection, new_connection : Connection + ) -> None: + t0 = time.time() + + self._add_service_to_executor_cache(service) + #self._add_connection_to_executor_cache(old_connection) + self._add_connection_to_executor_cache(new_connection) + + service_updating_key = self._add_task_if_not_exists(Task_ServiceSetStatus( + self._executor, service.service_id, ServiceStatusEnum.SERVICESTATUS_UPDATING + )) + + #old_connection_deconfigure_key = self._add_task_if_not_exists(Task_OpticalConnectionDeconfigure( + # self._executor, old_connection.connection_id, old_connection.service_id + #)) + + new_connection_configure_key = self._add_task_if_not_exists(Task_OpticalConnectionConfigure( + self._executor, new_connection.connection_id + )) + + service_active_key = self._add_task_if_not_exists(Task_ServiceSetStatus( + self._executor, service.service_id, ServiceStatusEnum.SERVICESTATUS_ACTIVE + )) + + # the old connection deconfiguration depends on service being in updating state + #self._dag.add(old_connection_deconfigure_key, service_updating_key) + + # the new connection configuration depends on service being in updating state + self._dag.add(new_connection_configure_key, service_updating_key) + + # the new connection configuration depends on the old connection having been deconfigured + #self._dag.add(new_connection_configure_key, old_connection_deconfigure_key) + + # re-activating the service depends on the service being in updating state before + self._dag.add(service_active_key, service_updating_key) + + # re-activating the service depends on the new connection having been configured + self._dag.add(service_active_key, new_connection_configure_key) + + t1 = time.time() + LOGGER.debug('[compose_optical_service_update1] elapsed_time: {:f} sec'.format(t1-t0)) + + def compose_service_connection_update( self, service : Service, old_connection : Connection, new_connection : Connection ) -> None: diff --git a/src/service/service/tools/OpticalTools.py b/src/service/service/tools/OpticalTools.py index 8ec7dcee8bb8fd0b4fa17d8a2c59fc13439e0b50..99261647e0d8dbb26295b7c74a96ff9868ccebac 100644 --- a/src/service/service/tools/OpticalTools.py +++ b/src/service/service/tools/OpticalTools.py @@ -120,19 +120,43 @@ def refresh_opticalcontroller(topology_id : dict): log.debug(f"GetTopology Response {res}") -def add_flex_lightpath(src, dst, bitrate, bidir, ob_band) -> str: +def reconfig_flex_lightpath(flow_id) -> str: if not TESTING: urlx = "" headers = {"Content-Type": "application/json"} base_url = get_optical_controller_base_url() + urlx = "{:s}/ReconfigFlexLightpath/{}".format(base_url, flow_id) + r = requests.put(urlx, headers=headers) + print(f"reconfig {r}") + reply = r.text + return reply + else: + if bidir is not None: + if bidir == 0: + return reply_uni_txt + return reply_bid_txt + + +def add_flex_lightpath(src, dst, bitrate, bidir, pref, ob_band, dj_optical_band_id) -> str: + if not TESTING: + urlx = "" + headers = {"Content-Type": "application/json"} + base_url = get_optical_controller_base_url() + prefs = "ANY" + if pref != None: + prefs = pref + if ob_band is None: if bidir is None: bidir = 1 - urlx = "{:s}/AddFlexLightpath/{:s}/{:s}/{:s}/{:s}".format(base_url, src, dst, str(bitrate), str(bidir)) + urlx = "{:s}/AddFlexLightpath/{:s}/{:s}/{:s}/{:s}/{:s}".format(base_url, src, dst, str(bitrate), str(prefs), str(bidir)) else: if bidir is None: bidir = 1 - urlx = "{:s}/AddFlexLightpath/{:s}/{:s}/{:s}/{:s}/{:s}".format(base_url, src, dst, str(bitrate), str(bidir), str(ob_band)) + if dj_optical_band_id is None: + urlx = "{:s}/AddFlexLightpath/{:s}/{:s}/{:s}/{:s}/{:s}/{:s}".format(base_url, src, dst, str(bitrate), str(prefs), str(bidir), str(ob_band)) + else: + urlx = "{:s}/AddFlexLightpath/{:s}/{:s}/{:s}/{:s}/{:s}/{:s}/{:s}".format(base_url, src, dst, str(bitrate), str(prefs), str(bidir), str(ob_band), str(dj_optical_band_id)) r = requests.put(urlx, headers=headers) print(f"addpathlight {r}") reply = r.text @@ -143,6 +167,19 @@ def add_flex_lightpath(src, dst, bitrate, bidir, ob_band) -> str: return reply_uni_txt return reply_bid_txt +def add_alien_flex_lightpath(src, s_port, dst, d_port, band, ob_id, bidir=None) -> str: + urlx = "" + headers = {"Content-Type": "application/json"} + base_url = get_optical_controller_base_url() + #/AddAlienFLexLightpath////// + if bidir is None: + urlx = "{:s}/AddAlienFLexLightpath/{:s}/{:s}/{:s}/{:s}/{:s}/{:s}".format(base_url, src, s_port, dst, d_port, str(band), str(ob_id)) + else: + urlx = "{:s}/AddAlienFLexLightpath/{:s}/{:s}/{:s}/{:s}/{:s}/{:s}/{:s}".format(base_url, src, s_port, dst, d_port, str(band), str(ob_id), str(bidir)) + r = requests.put(urlx, headers=headers) + reply = r.text + return reply + def add_lightpath(src, dst, bitrate, bidir) -> str: if not TESTING: urlx = "" @@ -177,14 +214,14 @@ def get_optical_band(idx) -> str: return optical_band_uni_txt -def DelFlexLightpath( src, dst, bitrate, ob_id, delete_band, flow_id=None) -> str: - reply = "200" - delete_band = 1 if delete_band else 0 +def DelFlexLightpath( src, dst, bitrate, ob_id, flow_id=None) -> str: + reply = {} + code = 200 base_url = get_optical_controller_base_url() if not TESTING: if flow_id is not None: if ob_id is not None : - urlx = "{:s}/DelFlexLightpath/{}/{}/{}/{}/{}".format(base_url, src, dst, bitrate, flow_id,ob_id) + urlx = "{:s}/DelFlexLightpath/{}/{}/{}/{}".format(base_url, src, dst, flow_id, ob_id) else : #urlx = "http://{}:{}/OpticalTFS/DelOpticalBand/{}/{}/{}".format(OPTICAL_IP, OPTICAL_PORT, src, dst, ob_id) @@ -216,6 +253,135 @@ def get_lightpaths() -> str: reply = r.text return reply +def adapt_reply_ob(devices, service, reply_json, context_id, topology_id, optical_band_txt) -> PathCompReply: + opt_reply = PathCompReply() + topo = TopologyId( + context_id=ContextId(context_uuid=Uuid(uuid=context_id)), + topology_uuid=Uuid(uuid=topology_id) + ) + #add optical band connection first + rules_ob= [] + ob_id = 0 + connection_ob=None + + r = reply_json + if "optical_band_id" in r.keys(): + ob_id = r["optical_band_id"] + if "bidir" in r.keys(): + bidir_f = r["bidir"] + else: + bidir_f = False + if optical_band_txt != "": + ob_json = json.loads(optical_band_txt) + ob = ob_json + connection_ob = add_connection_to_reply(opt_reply) + uuuid_x = str(uuid.uuid4()) + connection_ob.connection_id.connection_uuid.uuid = uuuid_x + connection_ob.service_id.CopyFrom(service.service_id) + obt = ob["band_type"] + if obt == "l_slots": + band_type = "L_BAND" + elif obt == "s_slots": + band_type = "S_BAND" + else: + band_type = "C_BAND" + + freq = ob["freq"] + bx = ob["band"] + #+1 is added to avoid overlap in the WSS of MGONs + lf = int(int(freq)-int(bx/2))+1 + uf = int(int(freq)+int(bx/2)) + val_ob = { + "band_type" : band_type, + "low-freq" : lf, + "up-freq" : uf, + "frequency" : freq, + "band" : bx, + "ob_id" : ob_id, + "bidir" : bidir_f + } + rules_ob.append(ConfigRule_Custom(resource_key="/settings-ob_{}".format(uuuid_x), resource_value=json.dumps(val_ob))) + bidir_ob = ob["bidir"] + # in case the service is built upon existed optical band , don't clacluate the endpoints of it + for devxb in ob["flows"].keys(): + log.debug("optical-band device {}".format(devxb)) + in_end_point_b = "0" + out_end_point_b = "0" + in_end_point_f = ob["flows"][devxb]["f"]["in"] + out_end_point_f = ob["flows"][devxb]["f"]["out"] + log.debug("optical-band ports {}, {}".format(in_end_point_f, out_end_point_f)) + if bidir_ob: + in_end_point_b = ob["flows"][devxb]["b"]["in"] + out_end_point_b = ob["flows"][devxb]["b"]["out"] + log.debug("optical-band ports {}, {}".format(in_end_point_b, out_end_point_b)) + #if (in_end_point_f == "0" or out_end_point_f == "0") and (in_end_point_b == "0" or out_end_point_b == "0"): + if in_end_point_f != "0": + d_ob, p_ob = get_uuids_from_names(devices, devxb, in_end_point_f) + if d_ob != "" and p_ob != "": + end_point_b = EndPointId(topology_id=topo, device_id=DeviceId(device_uuid=Uuid(uuid=d_ob)), endpoint_uuid=Uuid(uuid=p_ob)) + connection_ob.path_hops_endpoint_ids.add().CopyFrom(end_point_b) + else: + log.info("no map device port for device {} port {}".format(devxb, in_end_point_f)) + + if out_end_point_f != "0": + d_ob, p_ob = get_uuids_from_names(devices, devxb, out_end_point_f) + if d_ob != "" and p_ob != "": + end_point_b = EndPointId(topology_id=topo, device_id=DeviceId(device_uuid=Uuid(uuid=d_ob)), endpoint_uuid=Uuid(uuid=p_ob)) + connection_ob.path_hops_endpoint_ids.add().CopyFrom(end_point_b) + else: + log.info("no map device port for device {} port {}".format(devxb, out_end_point_f)) + if in_end_point_b != "0": + d_ob, p_ob = get_uuids_from_names(devices, devxb, in_end_point_b) + if d_ob != "" and p_ob != "": + end_point_b = EndPointId(topology_id=topo, device_id=DeviceId(device_uuid=Uuid(uuid=d_ob)), endpoint_uuid=Uuid(uuid=p_ob)) + connection_ob.path_hops_endpoint_ids.add().CopyFrom(end_point_b) + else: + log.info("no map device port for device {} port {}".format(devxb, in_end_point_b)) + if out_end_point_b != "0": + d_ob, p_ob = get_uuids_from_names(devices, devxb, out_end_point_b) + if d_ob != "" and p_ob != "": + end_point_b = EndPointId(topology_id=topo, device_id=DeviceId(device_uuid=Uuid(uuid=d_ob)), endpoint_uuid=Uuid(uuid=p_ob)) + connection_ob.path_hops_endpoint_ids.add().CopyFrom(end_point_b) + else: + log.info("no map device port for device {} port {}".format(devxb, out_end_point_b)) + log.debug("optical-band connection {}".format(connection_ob)) + #check that list of endpoints is not empty + if connection_ob is not None and len(connection_ob.path_hops_endpoint_ids) == 0: + log.debug("deleting empty optical-band connection") + opt_reply.connections.remove(connection_ob) + + ''' + #inizialize custom optical parameters + band = r["band"] if "band" in r else None + op_mode = r["op-mode"] if "op-mode" in r else None + frequency = r["freq"] if "freq" in r else None + flow_id = r["flow_id"] if "flow_id" in r else None + r_type = r["band_type"] if "band_type" in r else None + if r_type == "l_slots": + band_type = "L_BAND" + elif r_type == "s_slots": + band_type = "S_BAND" + else: + band_type = "C_BAND" + if ob_id != 0: + val = {"target-output-power": "1.0", "frequency": frequency, "operational-mode": op_mode, "band": band, "flow_id": flow_id, "ob_id": ob_id, "band_type": band_type, "bidir": bidir_f} + else: + val = {"target-output-power": "1.0", "frequency": frequency, "operational-mode": op_mode, "band": band, "flow_id": flow_id, "band_type": band_type, "bidir": bidir_f} + custom_rule = ConfigRule_Custom(resource_key="/settings", resource_value=json.dumps(val)) + rule = ConfigRule(action=ConfigActionEnum.CONFIGACTION_SET, custom=custom_rule) + service.service_config.config_rules.add().CopyFrom(rule) + ''' + + if len(rules_ob) > 0: + for rulex in rules_ob: + rule_ob = ConfigRule(action=ConfigActionEnum.CONFIGACTION_SET, custom=rulex) + service.service_config.config_rules.add().CopyFrom(rule_ob) + + opt_reply.services.add().CopyFrom(service) + return opt_reply + + + def adapt_reply(devices, service, reply_json, context_id, topology_id, optical_band_txt) -> PathCompReply: opt_reply = PathCompReply() topo = TopologyId( diff --git a/src/service/tests/test_recon.py b/src/service/tests/test_recon.py new file mode 100644 index 0000000000000000000000000000000000000000..b049fc93b87c6d545828722ceea1516a2eb01f9a --- /dev/null +++ b/src/service/tests/test_recon.py @@ -0,0 +1,101 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, pytest +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import ContextId, Service +#from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results, validate_empty_scenario +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from service.client.ServiceClient import ServiceClient + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +DESCRIPTOR_FILE = 'src/service/tests/descriptors_recompute_conns.json' +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) + +@pytest.fixture(scope='session') +def context_client(): + _client = ContextClient() + yield _client + _client.close() + +@pytest.fixture(scope='session') +def device_client(): + _client = DeviceClient() + yield _client + _client.close() + +@pytest.fixture(scope='session') +def service_client(): + _client = ServiceClient() + yield _client + _client.close() + + +def test_service_recompute_connection( + context_client : ContextClient, # pylint: disable=redefined-outer-name + device_client : DeviceClient, # pylint: disable=redefined-outer-name + service_client : ServiceClient, # pylint: disable=redefined-outer-name +) -> None: + + # ===== Setup scenario ============================================================================================= + #validate_empty_scenario(context_client) + + # Load descriptors and validate the base scenario + #descriptor_loader = DescriptorLoader( + # descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client, + # service_client=service_client) + #results = descriptor_loader.process() + #check_descriptor_load_results(results, descriptor_loader) + #descriptor_loader.validate() + + + # ===== Recompute Connection ======================================================================================= + response = context_client.ListServices(ADMIN_CONTEXT_ID) + print('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) + #assert len(response.services) == 1 + for service in response.services: + #service = response.services[0] + service_id = service.service_id + name = service.name + print(name) + + if name == "optical-band-C1": + response = context_client.ListConnections(service_id) + print("AAAAAAAAA") + print(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( + grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response))) + #assert len(response.connections) == 1 # 1 connection per service + #str_old_connections = grpc_message_to_json_string(response) + + # Change path first time + request = Service() + request.CopyFrom(service) + del request.service_endpoint_ids[:] # pylint: disable=no-member + del request.service_constraints[:] # pylint: disable=no-member + del request.service_config.config_rules[:] # pylint: disable=no-member + service_client.RecomputeConnections(request) + + response = context_client.ListConnections(service_id) + print(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( + grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response))) + #assert len(response.connections) == 1 # 1 connection per service + #str_new_connections = grpc_message_to_json_string(response) + #print(' new connection => {:s}'.format(str_new_connections)) + + diff --git a/src/service/tests/test_recon2.py b/src/service/tests/test_recon2.py new file mode 100644 index 0000000000000000000000000000000000000000..2c9fbed7dcf66235e179e270edc9969c2e8a6021 --- /dev/null +++ b/src/service/tests/test_recon2.py @@ -0,0 +1,101 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, pytest +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import ContextId, Service +#from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results, validate_empty_scenario +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from service.client.ServiceClient import ServiceClient + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +DESCRIPTOR_FILE = 'src/service/tests/descriptors_recompute_conns.json' +ADMIN_CONTEXT_ID = ContextId(**json_context_id(DEFAULT_CONTEXT_NAME)) + +@pytest.fixture(scope='session') +def context_client(): + _client = ContextClient() + yield _client + _client.close() + +@pytest.fixture(scope='session') +def device_client(): + _client = DeviceClient() + yield _client + _client.close() + +@pytest.fixture(scope='session') +def service_client(): + _client = ServiceClient() + yield _client + _client.close() + + +def test_service_recompute_connection( + context_client : ContextClient, # pylint: disable=redefined-outer-name + device_client : DeviceClient, # pylint: disable=redefined-outer-name + service_client : ServiceClient, # pylint: disable=redefined-outer-name +) -> None: + + # ===== Setup scenario ============================================================================================= + #validate_empty_scenario(context_client) + + # Load descriptors and validate the base scenario + #descriptor_loader = DescriptorLoader( + # descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client, + # service_client=service_client) + #results = descriptor_loader.process() + #check_descriptor_load_results(results, descriptor_loader) + #descriptor_loader.validate() + + + # ===== Recompute Connection ======================================================================================= + response = context_client.ListServices(ADMIN_CONTEXT_ID) + print('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) + #assert len(response.services) == 1 + for service in response.services: + #service = response.services[0] + service_id = service.service_id + name = service.name + print(name) + + if name == "optical-band-C2": + response = context_client.ListConnections(service_id) + print("AAAAAAAAA") + print(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( + grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response))) + #assert len(response.connections) == 1 # 1 connection per service + #str_old_connections = grpc_message_to_json_string(response) + + # Change path first time + request = Service() + request.CopyFrom(service) + del request.service_endpoint_ids[:] # pylint: disable=no-member + del request.service_constraints[:] # pylint: disable=no-member + del request.service_config.config_rules[:] # pylint: disable=no-member + service_client.RecomputeConnections(request) + + response = context_client.ListConnections(service_id) + print(' ServiceId[{:s}] => Connections[{:d}] = {:s}'.format( + grpc_message_to_json_string(service_id), len(response.connections), grpc_message_to_json_string(response))) + #assert len(response.connections) == 1 # 1 connection per service + #str_new_connections = grpc_message_to_json_string(response) + #print(' new connection => {:s}'.format(str_new_connections)) + + diff --git a/src/telemetry/backend/requirements.in b/src/telemetry/backend/requirements.in index 3b1fd8b35de2bdd944923beaec42ce61e0d4ac50..51ea6b90612bc366e67fd74677b09281615947d4 100644 --- a/src/telemetry/backend/requirements.in +++ b/src/telemetry/backend/requirements.in @@ -20,3 +20,4 @@ kafka-python==2.0.6 numpy==2.0.1 pygnmi==0.8.14 pytz>=2025.2 +scapy==2.6.1 # TODO: UBI need to confirm the version (This depencdency was missing) diff --git a/src/telemetry/backend/service/HelperMethods.py b/src/telemetry/backend/service/HelperMethods.py index 2d57917c1e89ede7bc04ad53944066f8016c1209..313dbba86eb5e2742039374aa8957808b20267d2 100644 --- a/src/telemetry/backend/service/HelperMethods.py +++ b/src/telemetry/backend/service/HelperMethods.py @@ -14,6 +14,7 @@ import uuid import logging +from typing import Optional from .collector_api._Collector import _Collector from .collector_api.DriverInstanceCache import get_driver from .collectors.int_collector.INTCollector import INTCollector @@ -21,11 +22,14 @@ from common.proto.kpi_manager_pb2 import KpiId from common.tools.context_queries.Device import get_device from common.tools.context_queries.EndPoint import get_endpoint_names from typing import List, Tuple, Optional +from telemetry.backend.service.collectors.gnmi_oc.KPI import KPI + +from .collectors.gnmi_oc.GnmiOpenConfigCollector import GNMIOpenConfigCollector LOGGER = logging.getLogger(__name__) def get_subscription_parameters( - kpi_id : str, kpi_manager_client, context_client, duration, interval + kpi_id : str, kpi_manager_client, context_client, resource, duration, interval ) -> Optional[List[Tuple]]: """ Method to get subscription parameters based on KPI ID. @@ -43,9 +47,9 @@ def get_subscription_parameters( - A KPI Descriptor must be added in KPI DB with correct device_id. - The device must be available in the context. """ - kpi_id_obj = KpiId() + kpi_id_obj = KpiId() kpi_id_obj.kpi_id.uuid = kpi_id # pyright: ignore[reportAttributeAccessIssue] - kpi_descriptor = kpi_manager_client.GetKpiDescriptor(kpi_id_obj) + kpi_descriptor = kpi_manager_client.GetKpiDescriptor(kpi_id_obj) if not kpi_descriptor: LOGGER.warning(f"KPI ID: {kpi_id} - Descriptor not found. Skipping...") return None @@ -59,7 +63,7 @@ def get_subscription_parameters( include_components = False ) if not device: - raise Exception(f"KPI ID: {kpi_id} - Device not found for KPI descriptor.") + raise Exception(f"KPI ID: {kpi_id} - Device not found for KPI descriptor.") #TODO: Change to TFS NotFoundException endpoints = device.device_endpoints LOGGER.debug(f"Device for KPI ID: {kpi_id} - {endpoints}") @@ -86,7 +90,7 @@ def get_subscription_parameters( { "kpi" : kpi_sample_type, # As request is based on the single KPI so it should have only one endpoint "endpoint" : endpoint_data[endpoint.endpoint_uuid.uuid][0], # Endpoint name - "resource" : 'interface', # Example resource type + "resource" : resource, # Example resource type is 'interface' or 'wavelength-router' for MG-ON, this should be defined in the KPI Descriptor or as part of the request }, float(duration), float(interval), @@ -100,14 +104,13 @@ def get_collector_by_kpi_id(kpi_id: str, kpi_manager_client, context_client, dri Method to get a collector instance based on KPI ID. Preconditions: - A KPI Descriptor must be added in KPI DB with correct device_id. - - The device must be available in the context. + - The device must be available in the context DB. Returns: - - Collector instance if found, otherwise None. - Raises: - - Exception if the KPI ID is not found or the collector cannot be created. + - Collector instance if found, otherwise raises exception + if the KPI ID is not found or the collector cannot be created. """ LOGGER.info(f"Getting collector for KPI ID: {kpi_id}") - kpi_id_obj = KpiId() + kpi_id_obj = KpiId() kpi_id_obj.kpi_id.uuid = kpi_id # pyright: ignore[reportAttributeAccessIssue] kpi_descriptor = kpi_manager_client.GetKpiDescriptor(kpi_id_obj) if not kpi_descriptor: @@ -122,9 +125,10 @@ def get_collector_by_kpi_id(kpi_id: str, kpi_manager_client, context_client, dri ) # Getting device collector (testing) - collector : _Collector = get_driver(driver_instance_cache, device) + collector : _Collector = get_driver(driver_instance_cache, device) # NOTE: driver_instance_cache is define in collector_api.DriverInstanceCache if collector is None: - raise Exception(f"KPI ID: {kpi_id} - Collector not found for device {device.device_uuid.uuid}.") + raise Exception(f"KPI ID: {kpi_id} - Collector not found for device {device.device_uuid.uuid}.") #TODO: Change to TFS NotFoundException + # LOGGER.info(f"Collector for KPI ID: {kpi_id} - {collector.__class__.__name__}") return collector def get_node_level_int_collector(collector_id: str, kpi_id: str, address: str, interface: str, port: int, @@ -173,3 +177,40 @@ def get_node_level_int_collector(collector_id: str, kpi_id: str, address: str, i LOGGER.exception(f"Failed to connect INT Collector on node {address}, {interface}:{port}") return collector if connected else None + + +def get_mgon_subscription_parameters(resource: str, endpoint: str, kpi: str, duration: int, interval: int) -> Optional[List[Tuple]]: + return [( + str(uuid.uuid4()), # "x123", + { + "kpi" : kpi, # sub_parameters['kpi'], + "endpoint" : endpoint, # sub_parameters['endpoint'], + "resource" : resource, #sub_parameters['resource'], + }, + duration, + interval, + ),] + + +def get_mgon_collector( + address: str, port: int, username: Optional[str], password: Optional[str], insecure: Optional[bool], + skip_verify: Optional[bool] + ) -> Optional[_Collector]: + + _collector = GNMIOpenConfigCollector( + address = address, + port = port, + username = username, + password = password, + insecure = insecure, + skip_verify = skip_verify, + ) + try: + connected = _collector.Connect() + if not connected: + LOGGER.error(f"Failed to connect to MG-ON collector at {address}:{port}") + return None + return _collector + except Exception as ex: + LOGGER.exception(f"Exception while connecting to MG-ON collector at {address}:{port}") + return None diff --git a/src/telemetry/backend/service/TelemetryBackendService.py b/src/telemetry/backend/service/TelemetryBackendService.py index d4e99f300b2450259e7f64c2745a96d88513e57b..a7cd05e45c2eb8e6d26988d8535994490d149500 100755 --- a/src/telemetry/backend/service/TelemetryBackendService.py +++ b/src/telemetry/backend/service/TelemetryBackendService.py @@ -17,7 +17,7 @@ import time import logging import threading -from .HelperMethods import get_collector_by_kpi_id, get_subscription_parameters, get_node_level_int_collector +from .HelperMethods import get_collector_by_kpi_id, get_subscription_parameters, get_node_level_int_collector, get_mgon_subscription_parameters, get_mgon_collector from common.Constants import ServiceNameEnum from common.Settings import get_service_port_grpc from confluent_kafka import Consumer as KafkaConsumer @@ -82,7 +82,7 @@ class TelemetryBackendService(GenericGrpcService): LOGGER.error(f"Consumer error: {receive_msg.error()}") break try: - collector = json.loads(receive_msg.value().decode('utf-8')) + collector = json.loads(receive_msg.value().decode('utf-8')) collector_id = receive_msg.key().decode('utf-8') LOGGER.debug(f"Received Collector: {collector_id} - {collector}") @@ -97,6 +97,10 @@ class TelemetryBackendService(GenericGrpcService): threading.Thread(target=self.GenericCollectorHandler, args=( collector_id, + collector, # TODO: later all other collector[''] should be removed. + # For now, to avoid multiple changes in the code, + # I am passing the whole collector dict and accessing the required parameters in the GenericCollectorHandler method. + # This will be changed after confirming the current implementation is working fine. collector['kpi_id'], duration, collector['interval'], @@ -126,11 +130,13 @@ class TelemetryBackendService(GenericGrpcService): LOGGER.warning( f"Unable to consume message from topic: {KafkaTopic.TELEMETRY_REQUEST.value}. ERROR: {e}") - def GenericCollectorHandler(self, collector_id, kpi_id, duration, interval, interface, port, service_id, context_id, stop_event): + def GenericCollectorHandler(self, + collector_id, collector, kpi_id, duration, interval, interface, port, + service_id, context_id, stop_event): """ Method to handle collector request. """ - + LOGGER.info(f"Starting Collector Handler for Collector ID: {collector_id} - KPI ID: {kpi_id}") # INT collector invocation if interface: self.device_collector = get_node_level_int_collector( @@ -144,6 +150,15 @@ class TelemetryBackendService(GenericGrpcService): ) return # Rest of the collectors + elif context_id == "43813baf-195e-5da6-af20-b3d0922e71a7": + self.device_collector = get_mgon_collector( + address = collector['host'], # "172.17.254.24", + port = collector['port'], # 50061, + username = collector['username'], # "admin", + password = collector['password'], # "admin", + insecure = collector.get('insecure', True), + skip_verify = collector.get('skip_verify', True) + ) else: self.device_collector = get_collector_by_kpi_id( kpi_id, self.kpi_manager_client, self.context_client, self.driver_instance_cache) @@ -153,9 +168,15 @@ class TelemetryBackendService(GenericGrpcService): raise Exception(f"KPI ID: {kpi_id} - Collector not found.") # CONFIRM: The method (get_subscription_parameters) is working correctly. testcase in telemetry backend tests - resource_to_subscribe = get_subscription_parameters( - kpi_id, self.kpi_manager_client, self.context_client, duration, interval - ) + # resource_to_subscribe = get_subscription_parameters( + # kpi_id, self.kpi_manager_client, self.context_client, resource, duration, interval + # ) + # TODO: Remove after confirming get_subscription_parameters generic is working correctly + resource_to_subscribe = get_mgon_subscription_parameters( + collector['resource'], collector['endpoint'], collector['kpi'], + collector['duration'], collector['sample_interval'] + ) + if not resource_to_subscribe: LOGGER.warning(f"KPI ID: {kpi_id} - Resource to subscribe not found. Skipping...") raise Exception(f"KPI ID: {kpi_id} - Resource to subscribe not found.") @@ -167,14 +188,32 @@ class TelemetryBackendService(GenericGrpcService): raise status else: LOGGER.info(f"Subscription successful for KPI ID: {kpi_id} - Status: {status}") - + + sample_value = None for samples in self.device_collector.GetState(duration=duration, blocking=True): LOGGER.info(f"KPI ID: {kpi_id} - Samples: {samples}") - self.GenerateKpiValue(collector_id, kpi_id, samples) - - # TODO: Stop_event should be managed in this method because there will be no more specific collector - if stop_event.is_set(): - self.device_collector.Disconnect() + if isinstance(samples, dict): + inn_dict = samples.get('update', {}) + LOGGER.info(f"KPI ID: {kpi_id} - Inner Dictionary: {inn_dict}") + list_update = inn_dict.get('update', []) + LOGGER.info(f"KPI ID: {kpi_id} - List Update: {list_update}") + if len(list_update) > 0: + sample_value = list_update[0].get('val') + self.GenerateKpiValue(collector_id, kpi_id, sample_value) + ''' +{ + 'update': { + 'timestamp': 1772103489806507669, + 'update': + [ + {'path': 'openconfig-wavelength-router:wavelength-router/flex-scale-mg-on:optical-bands/optical-band[index=2]/state/optical-power-total-input/instant', 'val': -2.51} + ] + } + } + ''' + # TODO: Stop_event should be managed in this method because there will be no more specific collector + if stop_event.is_set(): + self.device_collector.Disconnect() def GenerateKpiValue(self, collector_id: str, kpi_id: str, measured_kpi_value: Any): """ @@ -186,11 +225,12 @@ class TelemetryBackendService(GenericGrpcService): "kpi_id": kpi_id, "kpi_value": measured_kpi_value } + LOGGER.info(f"Producing KPI Value for Collector ID: {collector_id} - KPI ID: {kpi_id} - Value: {kpi_value}") producer.produce( - KafkaTopic.VALUE.value, - key=collector_id, - value=json.dumps(kpi_value), - callback=self.delivery_callback + "topic_value", #KafkaTopic.VALUE.value, + key = collector_id, + value = json.dumps(kpi_value), + callback = self.delivery_callback ) producer.flush() diff --git a/src/telemetry/backend/service/collectors/gnmi_oc/GnmiOpenConfigCollector.py b/src/telemetry/backend/service/collectors/gnmi_oc/GnmiOpenConfigCollector.py index 31cdce39a0c079bb3c853276421a390d9947e7ab..1b7067b03958dcee66e14ce767525e31488e318b 100644 --- a/src/telemetry/backend/service/collectors/gnmi_oc/GnmiOpenConfigCollector.py +++ b/src/telemetry/backend/service/collectors/gnmi_oc/GnmiOpenConfigCollector.py @@ -33,14 +33,15 @@ class GNMIOpenConfigCollector(_Collector): ========================= Lightweight wrapper around *pygnmi* with subscribe / get / unsubscribe helpers. """ - def __init__(self, address: str = '', port: int = -1, **setting) -> None: + def __init__(self, address: str = "", port: int = -1, **setting) -> None: super().__init__('gNMI_openconfig_collector', address, port, **setting) self._subscriptions : Dict[str, Subscription] = {} - self.username = setting.get('username', 'admin') - self.password = setting.get('password', 'admin') - self.insecure = setting.get('insecure', True) + self.username = setting.get('username', 'admin') + self.password = setting.get('password', 'admin') + self.insecure = setting.get('insecure', True) + self.skip_verify = setting.get('skip_verify', False) # For TLS certificate verification # self.username = username # self.password = password # self.insecure = insecure @@ -50,19 +51,22 @@ class GNMIOpenConfigCollector(_Collector): self._output_queue = queue.Queue() # Queue for telemetry updates self.logger = logging.getLogger(__name__) - self.logger.debug("GNMICollector instantiated.") + self.logger.info("GNMICollector instantiated.") def Connect(self) -> bool: """ Connect to the gNMI target device. """ + self.logger.info("Connecting to gNMI target %s:%s with username '%s'", + self.address, self.port, self.username) if not self.connected: self.client = gNMIclient( - target=(self.address, self.port), - username=self.username, - password=self.password, - insecure=self.insecure + target = (self.address, self.port), + username = self.username, + password = self.password, + insecure = self.insecure, + skip_verify = self.skip_verify # Skip TLS certificate verification (like gnmi_subscribe_example.py) ) # self.logger.info("Connecting to gNMI target %s:%s with %s and %s", self.address, self.port, self.username, self.password) self.client.connect() # type: ignore @@ -76,12 +80,31 @@ class GNMIOpenConfigCollector(_Collector): def Disconnect(self) -> bool: """ Disconnect from the gNMI target device. + Stops all active subscriptions before closing the connection. """ + # Stop all active subscriptions first + if self._subscriptions: + self.logger.info("Stopping %d active subscription(s) before disconnect...", + len(self._subscriptions)) + # Create a list of subscription IDs to avoid dictionary size change during iteration + sub_ids = list(self._subscriptions.keys()) + for sub_id in sub_ids: + try: + self.UnsubscribeState(sub_id) + except Exception as exc: + self.logger.warning("Error stopping subscription %s during disconnect: %s", + sub_id, exc) + if self.connected and self.client: - self.client.close() - self.connected = False - self.logger.info("Disconnected from gNMI target %s:%s", self.address, self.port) - return True + try: + self.client.close() + self.connected = False + self.logger.info("Disconnected from gNMI target %s:%s", self.address, self.port) + return True + except Exception as exc: + self.logger.error("Error during disconnect: %s", exc) + self.connected = False # Mark as disconnected even if close fails + return False else: self.logger.warning("Not connected to any gNMI target.") return True @@ -106,22 +129,21 @@ class GNMIOpenConfigCollector(_Collector): raise KeyError("Endpoint dictionary must contain 'resource' key.") paths = PathMapper.build( - endpoint=sub_endpoint['endpoint'], - kpi=sub_endpoint['kpi'], - resource=sub_endpoint['resource'], + endpoint = sub_endpoint['endpoint'], + kpi = sub_endpoint['kpi' ], + resource = sub_endpoint['resource'], ) - + self.logger.debug("Built %d candidate path(s) for endpoint '%s'", len(paths), paths) self._subscriptions[sub_id] = Subscription( sub_id = sub_id, gnmi_client = self.client, # type: ignore path_list = paths, # <- list of paths metric_queue = self._output_queue, - mode = 'stream', # Default mode + mode = 'sample', # Entry mode: sample/on_change/target_defined sample_interval_ns = int(interval * 1_000_000_000), # Convert seconds to nanoseconds - heartbeat_interval_ns = int(duration * 1_000_000_000), # Convert seconds to nanoseconds - encoding = 'json_ietf', # Default encoding + total_duration = duration, + encoding = 'json', # Use 'json' encoding (not 'json_ietf') ) - self.logger.info("Subscribing to %s with job_id %s ...", sub_endpoint, sub_id) response.append(True) except: @@ -130,18 +152,19 @@ class GNMIOpenConfigCollector(_Collector): return response def UnsubscribeState(self, resource_key: str) -> bool: - """Stop the given subscription.""" + """Stop the given subscription gracefully.""" sub = self._subscriptions.pop(resource_key, None) if not sub: - self.logger.error("Attempt to unsubscribe unknown id=%s", resource_key) - # raise KeyError(f"Unknown subscription id '{resource_key}'.") + self.logger.warning("Attempt to unsubscribe unknown id=%s", resource_key) return False - try: sub.stop() - except: - self.logger.exception("Error stopping subscription %s. ", resource_key) + + try: + sub.stop() + self.logger.info("Unsubscribed from state: %s", resource_key) + return True + except Exception as exc: + self.logger.error("Error stopping subscription %s: %s", resource_key, exc) return False - self.logger.info("Unsubscribed from state: %s", resource_key) - return True def GetState(self, duration : float, blocking : bool = True, terminate: Optional[queue.Queue] = None ) -> Iterator[Tuple[float, str, Any]]: diff --git a/src/telemetry/backend/service/collectors/gnmi_oc/KPI.py b/src/telemetry/backend/service/collectors/gnmi_oc/KPI.py index 7281c8a2e015096512f373b84c45310acba59655..0e902fa9c67aceb9c08a5c853af8db2201b582fc 100644 --- a/src/telemetry/backend/service/collectors/gnmi_oc/KPI.py +++ b/src/telemetry/backend/service/collectors/gnmi_oc/KPI.py @@ -16,13 +16,15 @@ from enum import IntEnum, unique @unique -class KPI(IntEnum): # TODO: verify KPI names and codes with KPI proto file. (How many TFS supports) +class KPI(IntEnum): + # TODO: verify KPI names and codes with KPI proto file. (How many TFS supports) """Generic KPI codes that map to interface statistics.""" - PACKETS_TRANSMITTED = 101 - PACKETS_RECEIVED = 102 - PACKETS_DROPPED = 103 - BYTES_TRANSMITTED = 201 - BYTES_RECEIVED = 202 - BYTES_DROPPED = 203 - INBAND_POWER = 301 + KPISAMPLETYPE_PACKETS_TRANSMITTED = 101 + KPISAMPLETYPE_PACKETS_RECEIVED = 102 + KPISAMPLETYPE_PACKETS_DROPPED = 103 + KPISAMPLETYPE_BYTES_TRANSMITTED = 201 + KPISAMPLETYPE_BYTES_RECEIVED = 202 + KPISAMPLETYPE_BYTES_DROPPED = 203 + KPISAMPLETYPE_INBAND_POWER = 301 + KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER = 302 # TODO: Add more KPIs as needed, diff --git a/src/telemetry/backend/service/collectors/gnmi_oc/PathMapper.py b/src/telemetry/backend/service/collectors/gnmi_oc/PathMapper.py index b02ca55980244d5c715ff577bd8b24166d0545c7..fb1d549525b51b36c7543eb2e7bc479e19c59174 100644 --- a/src/telemetry/backend/service/collectors/gnmi_oc/PathMapper.py +++ b/src/telemetry/backend/service/collectors/gnmi_oc/PathMapper.py @@ -39,24 +39,24 @@ class PathMapper: # in the future. The list is not exhaustive, but it covers the most common cases # across OpenConfig implementations. The collector will try each until one succeeds. # ---- packets --------------------------------------------------- - KPI.PACKETS_TRANSMITTED: [ + KPI.KPISAMPLETYPE_PACKETS_TRANSMITTED: [ "out-pkts", "out-unicast-pkts", "tx-pkts", "packets-output" ], - KPI.PACKETS_RECEIVED: [ + KPI.KPISAMPLETYPE_PACKETS_RECEIVED: [ "in-pkts", "in-unicast-pkts", "rx-pkts", "packets-input" ], - KPI.PACKETS_DROPPED: [ + KPI.KPISAMPLETYPE_PACKETS_DROPPED: [ "in-discards", "out-discards", "packets-drop" ], # ---- bytes ----------------------------------------------------- - KPI.BYTES_TRANSMITTED: [ + KPI.KPISAMPLETYPE_BYTES_TRANSMITTED: [ "out-octets", "tx-octets", "bytes-output" ], - KPI.BYTES_RECEIVED: [ + KPI.KPISAMPLETYPE_BYTES_RECEIVED: [ "in-octets", "rx-octets", "bytes-input" ], - KPI.BYTES_DROPPED: [ + KPI.KPISAMPLETYPE_BYTES_DROPPED: [ "in-octets-discarded", "out-octets-discarded", "bytes-drop" ], @@ -64,9 +64,15 @@ class PathMapper: # Note: Inband power is not a standard leaf in OpenConfig, but # it is included here for completeness. The actual leaf names # may vary by implementation. - KPI.INBAND_POWER: [ + KPI.KPISAMPLETYPE_INBAND_POWER: [ "inband-power", "inband-power-state" ], + + # ---- total power (optical wavelength router) ---------------- + # For optical devices using FlexScale MGON YANG model + KPI.KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER : [ + "optical-power-total-input/instant", + ], } # --------------------------------------------------------------# @@ -78,6 +84,14 @@ class PathMapper: 'interfaces/interface[name={endpoint}]/state/counters/{leaf}', # 'interfaces/interface[name="{endpoint}"]/state/{leaf}', ] + + # Wavelength router prefixes (for optical devices) + # Uses oc-wave-router and fsmgon module prefixes to avoid origin extraction issues + _WAVELENGTH_ROUTER_PREFIXES = [ + 'oc-wave-router:wavelength-router/fsmgon:optical-bands/optical-band[index={endpoint}]/state/{leaf}', + # Also try without the root prefix as fallback + 'wavelength-router/fsmgon:optical-bands/optical-band[index={endpoint}]/state/{leaf}', + ] # --------------------------------------------------------------# # Public helper # # --------------------------------------------------------------# @@ -88,9 +102,9 @@ class PathMapper: """ Return **a list** of path strings. - :param endpoint: Interface name, e.g. 'Ethernet0' + :param endpoint: Interface name (e.g. 'Ethernet0') or optical band index (e.g. '1') :param kpi: KPI enum - :param resource: Interface parameter + :param resource: Resource type: 'interface' or 'wavelength-router' """ try: kpi_enum = KPI(kpi) @@ -104,11 +118,16 @@ class PathMapper: paths: List[str] = [] for leaf in leaves: if resource == "interface": + # Use standard interface prefixes for prefix in cls._PREFIXES: paths.append(prefix.format(endpoint=endpoint, leaf=leaf)) + elif resource == "wavelength-router": + # Use wavelength router prefixes with module prefixes to avoid origin extraction + for prefix in cls._WAVELENGTH_ROUTER_PREFIXES: + paths.append(prefix.format(endpoint=endpoint, leaf=leaf)) else: raise ValueError(f"Unsupported resource: {resource}") - logger.debug("Built %d candidate path(s) for %s on %s", - len(paths), kpi_enum.name, endpoint) + logger.debug("Built %d candidate path(s) for %s on %s (resource=%s)", + len(paths), kpi_enum.name, endpoint, resource) return paths diff --git a/src/telemetry/backend/service/collectors/gnmi_oc/SubscriptionNew.py b/src/telemetry/backend/service/collectors/gnmi_oc/SubscriptionNew.py index cbf0ebf424b6e6a9e4310d838250c9586b8459f3..c48ba505c7dd3f89f5abfe72aeb81a7f5add3bc5 100644 --- a/src/telemetry/backend/service/collectors/gnmi_oc/SubscriptionNew.py +++ b/src/telemetry/backend/service/collectors/gnmi_oc/SubscriptionNew.py @@ -13,16 +13,17 @@ # limitations under the License. -from google.protobuf.json_format import MessageToDict +import time from pygnmi.client import gNMIclient # type: ignore from queue import Queue -from typing import Callable, Tuple, Optional, List +from typing import Callable, Tuple, Optional, List, Any import grpc +import json import logging import threading -logger = logging.getLogger(__name__) -# logger.setLevel(logging.INFO) +LOGGER = logging.getLogger(__name__) +# LOGGER.setLevel(logging.INFO) class Subscription: @@ -39,10 +40,11 @@ class Subscription: gnmi_client: gNMIclient, path_list: List[str], metric_queue: Queue, - mode: str = "stream", - sample_interval_ns: int = 10_000_000_000, - heartbeat_interval_ns: Optional[int] = None, # ← NEW - encoding: str = "json_ietf", + mode: str = "stream", + sample_interval_ns: int = 10_000_000_000, + heartbeat_interval_ns: Optional[int] = None, + total_duration: Optional[float] = None, # in seconds + encoding: str = "json_ietf", on_update: Optional[Callable[[dict], None]] = None, ) -> None: @@ -54,15 +56,30 @@ class Subscription: self._thread = threading.Thread( target = self._run, args = ( - path_list, mode, - sample_interval_ns, heartbeat_interval_ns, encoding, on_update, + path_list, mode, sample_interval_ns, + heartbeat_interval_ns, encoding, on_update, ), name=f"gnmi-sub-{sub_id[:8]}", daemon=True, ) + # Start the subscription thread self._thread.start() - logger.info("Started subscription %s",sub_id) + + # Stop the subscription after the given duration + if total_duration and total_duration > 0: + def stop_after_duration(): + time.sleep(total_duration) + LOGGER.warning(f"Execution duration ({total_duration}s) completed for Subscription: {sub_id}") + self.stop() + duration_thread = threading.Thread( + target=stop_after_duration, daemon=True, name=f"stop_after_duration_{sub_id[:8]}" + ) + duration_thread.start() + else: + LOGGER.debug("Subscription %s has no total duration limit", sub_id) + + LOGGER.info("Started subscription %s",sub_id) # --------------------------------------------------------------# # Public helpers # # --------------------------------------------------------------# @@ -70,10 +87,96 @@ class Subscription: return self._queue.get(timeout=timeout) def stop(self) -> None: + """Gracefully stop the subscription thread.""" + if not self._thread.is_alive(): + LOGGER.debug("Subscription %s thread already stopped", self.sub_id) + return + + LOGGER.debug("Stopping subscription %s...", self.sub_id) self._stop_event.set() - self._thread.join(2) - logger.info("Stopped subscription %s", self.sub_id) + self._thread.join(timeout=3) + + if self._thread.is_alive(): + LOGGER.warning("Subscription %s thread did not stop within timeout", self.sub_id) + else: + LOGGER.info("Stopped subscription %s", self.sub_id) + # --------------------------------------------------------------# + # Internal loop # + # --------------------------------------------------------------# + def _parse_subscribe_response(self, stream_msg) -> dict: + """ + Parse gNMI SubscribeResponse protobuf message. + Mimics pygnmi's telemetryParser but simplified for our needs. + Properly decodes json_ietf_val by directly accessing protobuf bytes. + """ + response = {} + + if stream_msg.HasField("update"): + response["update"] = { + "timestamp": stream_msg.update.timestamp if stream_msg.update.timestamp else 0, + "update": [] + } + + # Process updates + for update_msg in stream_msg.update.update: + update_container = { + "path": self._gnmi_path_to_string(update_msg.path) if update_msg.path else None + } + + # Decode the value - THIS IS THE KEY PART + if update_msg.HasField("val"): + if update_msg.val.HasField("json_ietf_val"): + # Access raw bytes and decode directly (like pygnmi does) + decoded_val = json.loads(update_msg.val.json_ietf_val) + # Try to convert numeric strings to float for proper formatting + if isinstance(decoded_val, str): + try: + decoded_val = float(decoded_val) + except (ValueError, TypeError): + pass # Keep as string if not numeric + update_container["val"] = decoded_val + elif update_msg.val.HasField("json_val"): + decoded_val = json.loads(update_msg.val.json_val) + # Try to convert numeric strings to float + if isinstance(decoded_val, str): + try: + decoded_val = float(decoded_val) + except (ValueError, TypeError): + pass + update_container["val"] = decoded_val + elif update_msg.val.HasField("string_val"): + update_container["val"] = update_msg.val.string_val + elif update_msg.val.HasField("int_val"): + update_container["val"] = update_msg.val.int_val + elif update_msg.val.HasField("uint_val"): + update_container["val"] = update_msg.val.uint_val + elif update_msg.val.HasField("bool_val"): + update_container["val"] = update_msg.val.bool_val + elif update_msg.val.HasField("float_val"): + update_container["val"] = update_msg.val.float_val + else: + update_container["val"] = None + + response["update"]["update"].append(update_container) + + elif stream_msg.HasField("sync_response"): + response["sync_response"] = stream_msg.sync_response + + return response + + def _gnmi_path_to_string(self, path_msg) -> str: + """Convert gNMI Path protobuf to string representation.""" + path_parts = [] + for elem in path_msg.elem: + part = elem.name + if elem.key: + # Add keys in sorted order for consistency + for key_name, key_val in sorted(elem.key.items()): + part += f"[{key_name}={key_val}]" + path_parts.append(part) + return "/".join(path_parts) + # --------------------------------------------------------------# # Internal loop # # --------------------------------------------------------------# @@ -88,7 +191,6 @@ class Subscription: ) -> None: # pragma: no cover """ Try each candidate path until the Subscribe RPC succeeds. - * Top level mode: STREAM / ONCE / POLL (here we always stream) * Per entry mode: SAMPLE / ON_CHANGE """ @@ -101,9 +203,10 @@ class Subscription: break entry: dict = {"path": path} + LOGGER.debug("Subscription %s preparing entry for path: %s", self.sub_id, path) if entry_mode == "sample": - entry["mode"] = "sample" + entry["mode"] = "sample" entry["sample_interval"] = sample_interval_ns elif entry_mode == "on_change": entry["mode"] = "on_change" @@ -117,41 +220,63 @@ class Subscription: "mode": top_mode, "encoding": encoding, } - logger.debug("Subscription %s to be requested: %s", self.sub_id, request) + LOGGER.debug("Subscription %s to be requested: %s", self.sub_id, request) try: - logger.debug("Sub %s attempting path %s", self.sub_id, path) + LOGGER.debug("Sub %s attempting path %s", self.sub_id, path) for stream in self.gnmi_client.subscribe(request): - msg_dict = MessageToDict(stream) - # logger.debug("Stream: %s", msg_dict) + # Check if stop was requested + if self._stop_event.is_set(): + LOGGER.debug("Sub %s stop requested, breaking stream loop", self.sub_id) + break + + LOGGER.info("Sub %s received stream message: %s", self.sub_id, stream) + + # DEBUG: Check if update has actual update messages + if stream.HasField("update"): + LOGGER.debug("Sub %s update field present, num updates: %d", + self.sub_id, len(stream.update.update)) + if len(stream.update.update) == 0: + LOGGER.warning("Sub %s received update notification with NO data values - device may have no data for path %s", + self.sub_id, path) + for i, upd in enumerate(stream.update.update): + LOGGER.debug("Sub %s update[%d] has val: %s, path elem count: %d", + self.sub_id, i, upd.HasField("val"), + len(upd.path.elem) if upd.path else 0) + + # Parse the protobuf message directly (like pygnmi does) + msg_dict = self._parse_subscribe_response(stream) + LOGGER.debug("Sub %s received message: %s", self.sub_id, msg_dict) # Process any update data - if msg_dict.get('update'): # 'update' in msg_dict: - logger.debug("Sub %s got update data", self.sub_id) + if msg_dict.get('update'): + LOGGER.debug("Sub %s got update data", self.sub_id) if on_update: on_update(msg_dict) else: self._queue.put(msg_dict) - # logger.debug("The update added in queue → %s", msg_dict) # Put a dummy update if syncResponse is received to prevent timeout - elif msg_dict.get('syncResponse'): # 'syncResponse' in msg_dict: - logger.debug("Sub %s received sync response", self.sub_id) + elif msg_dict.get('sync_response'): + LOGGER.debug("Sub %s received sync response", self.sub_id) # Optional: put a notification about the sync if not on_update: self._queue.put({"type": "sync_response", "value": True}) else: - logger.warning("Sub %s received unknown message: %s", self.sub_id, msg_dict) + LOGGER.warning("Sub %s received unknown message: %s", self.sub_id, msg_dict) except grpc.RpcError as err: - if err.code() == grpc.StatusCode.INVALID_ARGUMENT: - logger.warning("Path '%s' rejected (%s) -- trying next", - path, err.details()) + # Handle graceful shutdown (channel closed) + if err.code() == grpc.StatusCode.CANCELLED: + LOGGER.debug("Sub %s cancelled (channel closed) - graceful shutdown", self.sub_id) + break + elif err.code() == grpc.StatusCode.INVALID_ARGUMENT: + LOGGER.warning("Path '%s' rejected (%s) -- trying next", path, err.details()) continue - logger.exception("Subscription %s hit gRPC error: %s", - self.sub_id, err) - break + else: + LOGGER.exception("Subscription %s hit gRPC error: %s", self.sub_id, err) # Change with TFS Exception + break except Exception as exc: # pylint: disable=broad-except - logger.exception("Subscription %s failed: %s", self.sub_id, exc) + LOGGER.exception("Subscription %s failed: %s", self.sub_id, exc) # Change with TFS Exception break - logger.info("Subscription thread %s terminating", self.sub_id) + LOGGER.info("Subscription thread %s terminating", self.sub_id) diff --git a/src/telemetry/backend/tests/gnmi_oc/messages.py b/src/telemetry/backend/tests/gnmi_oc/messages.py index d68d2dde3d63ee9083b3a99795e9f89b493206b5..9cad7edef3b22a0e3e47d3f44fe00fdb3438773c 100644 --- a/src/telemetry/backend/tests/gnmi_oc/messages.py +++ b/src/telemetry/backend/tests/gnmi_oc/messages.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Optional import uuid from common.proto import kpi_manager_pb2 from common.proto.kpi_sample_types_pb2 import KpiSampleType @@ -20,48 +21,70 @@ from src.telemetry.backend.service.collectors.gnmi_oc.KPI import KPI # Test device connection parameters devices = { 'device1': { - 'host': '10.1.1.86', - 'port': '6030', + 'host' : '10.1.1.86', + 'port' : '6030', 'username': 'ocnos', 'password': 'ocnos', 'insecure': True, + 'kpi' : KPI.KPISAMPLETYPE_PACKETS_RECEIVED, + 'resource': 'interface', + 'endpoint': 'Management0', }, 'device2': { - 'host': '10.1.1.87', - 'port': '6030', + 'host' : '10.1.1.87', + 'port' : '6030', 'username': 'ocnos', 'password': 'ocnos', 'insecure': True, + 'kpi' : KPI.KPISAMPLETYPE_PACKETS_RECEIVED, + 'resource': 'interface', + 'endpoint': 'Management0', }, 'device3': { - 'host': '172.20.20.101', - 'port': '6030', + 'host' : '172.20.20.101', + 'port' : '6030', 'username': 'admin', 'password': 'admin', 'insecure': True, + 'kpi' : KPI.KPISAMPLETYPE_PACKETS_RECEIVED, + 'resource': 'interface', + 'endpoint': 'Management0', + }, + 'mgon': { + 'host' : '172.17.254.24', + 'port' : '50061', + 'username' : 'admin', + 'password' : 'admin', + 'insecure' : True, + 'kpi' : KPI.KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER, + 'resource' : 'wavelength-router', #TODO: verify resource name form mg-on model + 'endpoint' : '1', + 'skip_verify': True, }, } -def creat_basic_sub_request_parameters( - resource: str = 'interface', - endpoint: str = 'Management0', # 'Ethernet1', - kpi: KPI = KPI.PACKETS_RECEIVED, # It should be KPI Id not name? Need to be replaced with KPI id. -) -> dict: +def creat_basic_sub_request_parameters() -> dict: - device = devices['device3'] - return { - 'target' : (device['host'], device['port']), - 'username' : device['username'], - 'password' : device['password'], - 'connect_timeout' : 15, - 'insecure' : device['insecure'], - 'mode' : 'on_change', # Subscription internal mode posibly: on_change, poll, sample - 'sample_interval_ns': '3s', - 'sample_interval' : '10s', - 'kpi' : kpi, - 'resource' : resource, - 'endpoint' : endpoint, - } + device = devices['mgon'] + if device: + kpi = device['kpi'] + resource = device['resource'] + endpoint = device['endpoint'] + return { + 'target' : (device['host'], device['port']), + 'username' : device['username'], + 'password' : device['password'], + 'connect_timeout' : 15, + 'insecure' : device['insecure'], + 'skip_verify' : device.get('skip_verify', True), + 'mode' : 'sample', # Subscription internal mode posibly: on_change, poll, sample + 'sample_interval_ns': '3s', + 'sample_interval' : '10s', + 'kpi' : kpi, + 'resource' : resource, + 'endpoint' : endpoint, + } + return {} def create_kpi_descriptor_request(descriptor_name: str = "Test_name"): _create_kpi_request = kpi_manager_pb2.KpiDescriptor() diff --git a/src/telemetry/backend/tests/gnmi_oc/test_integration_GnmiOCcollector.py b/src/telemetry/backend/tests/gnmi_oc/test_integration_GnmiOCcollector.py index 6543c87436fdc6ab1e34136b0c93ca4df21d906a..d200fc0b74c7f9d1d16dfa9ce527d013beab419d 100644 --- a/src/telemetry/backend/tests/gnmi_oc/test_integration_GnmiOCcollector.py +++ b/src/telemetry/backend/tests/gnmi_oc/test_integration_GnmiOCcollector.py @@ -164,7 +164,7 @@ def telemetry_backend_service(): # + Uncomment test_add_to_topology() in helper methods section to add a device. # - A KPI Descriptor must be added in KPI DB with correct device_id. # + Uncomment test_SetKpiDescriptor() in helper methods section to add a KPI Descriptor. - # - Kafka should be exposed externally 'kubectl port-forward -n kafka service/kafka-service 9094:9094'. + # - Kafka should be exposed externally 'kubectl port-forward -n kafka service/kafka-service 9092:9092'. def test_helper_get_collector_by_kpi_id(kpi_manager_client, context_client): LOGGER.info("Testing get_collector_by_kpi_id...") diff --git a/src/telemetry/backend/tests/gnmi_oc/test_unit_GnmiOpenConfigCollector.py b/src/telemetry/backend/tests/gnmi_oc/test_unit_GnmiOpenConfigCollector.py index 127098d26b3818c0f7aa16b87419b505bcfaf53b..c08f196d166106ba6d30eb21e641bf294ad61e0c 100644 --- a/src/telemetry/backend/tests/gnmi_oc/test_unit_GnmiOpenConfigCollector.py +++ b/src/telemetry/backend/tests/gnmi_oc/test_unit_GnmiOpenConfigCollector.py @@ -17,6 +17,7 @@ import time import pytest from telemetry.backend.service.collectors.gnmi_oc.GnmiOpenConfigCollector import GNMIOpenConfigCollector from .messages import creat_basic_sub_request_parameters +from ..Fixtures import kpi_manager_client, context_client logging.basicConfig( level=logging.DEBUG, @@ -25,6 +26,16 @@ logging.basicConfig( logger = logging.getLogger(__name__) +@pytest.fixture(autouse=True) +def log_all_methods(request): + ''' + This fixture logs messages before and after each test function runs, indicating the start and end of the test. + The autouse=True parameter ensures that this logging happens automatically for all tests in the module. + ''' + logger.info(f" >>>>> Starting test: {request.node.name} ") + yield + logger.info(f" <<<<< Finished test: {request.node.name} ") + @pytest.fixture def sub_parameters(): """Fixture to provide subscription parameters.""" @@ -35,11 +46,12 @@ def sub_parameters(): def collector(sub_parameters): """Fixture to create and connect GNMI collector.""" collector = GNMIOpenConfigCollector( - username = sub_parameters['username'], - password = sub_parameters['password'], - insecure = sub_parameters['insecure'], - address = sub_parameters['target'][0], - port = sub_parameters['target'][1], + username = sub_parameters['username'], + password = sub_parameters['password'], + insecure = sub_parameters['insecure'], + address = sub_parameters['target'][0], + port = sub_parameters['target'][1], + skip_verify = sub_parameters.get('skip_verify', True), ) collector.Connect() yield collector @@ -58,7 +70,7 @@ def subscription_data(sub_parameters): "endpoint" : sub_parameters['endpoint'], "resource" : sub_parameters['resource'], }, - float(10.0), + float(60.0), float(5.0), ), ] @@ -117,17 +129,21 @@ def test_full_workflow(collector, subscription_data): response1 = collector.SubscribeState(subscription_data) logger.info("Subscription started: %s", subscription_data) assert all(response1) and isinstance(response1, list) + + _, _, duration_received, interval_received = subscription_data[0] # Get updates - logger.info("Requesting state updates for 5 seconds ...") + logger.info(f"Requesting state updates for {duration_received} seconds after every {interval_received} seconds ...") updates_received = [] - for samples in collector.GetState(duration=5.0, blocking=True): + for samples in collector.GetState(duration=duration_received, blocking=True): logger.info("Received state update: %s", samples) updates_received.append(samples) assert len(updates_received) > 0 # Wait for additional updates - logger.info("Waiting for updates for 5 seconds...") - time.sleep(5) + logger.info(f"Waiting for updates after every {interval_received} seconds...") + + # put a sleep to simulate waiting for more updates + time.sleep(15) # Unsubscribe response2 = collector.UnsubscribeState("x123") diff --git a/src/tests/automation/descriptors/automation.json b/src/tests/automation/descriptors/automation.json index 711dad5c72b653c6df91d0719f2ae11f794c0e97..bef7c4aa63c316631e0de3fc1ea6c79baf9898b5 100644 --- a/src/tests/automation/descriptors/automation.json +++ b/src/tests/automation/descriptors/automation.json @@ -1,19 +1,19 @@ { "target_service_id": { - "service_uuid": {"uuid": "66d498ad-5d94-5d90-8cb4-861e30689c64"}, + "service_uuid": {"uuid": "b2a60c5b-8c46-5707-a64a-9c6539d395f2"}, "context_id": {"context_uuid": {"uuid": "43813baf-195e-5da6-af20-b3d0922e71a7"}} }, "telemetry_service_id": { - "service_uuid": {"uuid": "db73d789-4abc-5514-88bb-e21f7e31d36a"}, + "service_uuid": {"uuid": "7397bdf2-eec8-57f4-9406-4f9e9f3dc50e"}, "context_id": {"context_uuid": {"uuid": "43813baf-195e-5da6-af20-b3d0922e71a7"}} }, "analyzer":{ "operation_mode": "ANALYZEROPERATIONMODE_STREAMING", "parameters": { - "thresholds": "{\"task_type\": \"AggregationHandler\",\"task_parameter\": [ {\"avg\": [0, 2500]}]}" + "thresholds": "{\"task_type\": \"AggregationHandler\",\"task_parameter\": [ {\"last\": [-20, 5]}]}" }, "input_kpi_ids": [ - {"kpi_id": { "uuid": "58ed7d00-eca9-4d02-95d1-52ca9ad2532a"}} + {"kpi_id": { "uuid": "6e22f180-ba28-4641-b190-2287bf447777"}} ], "output_kpi_ids": [ {"kpi_id": { "uuid": "c45b09d8-c84a-45d8-b4c2-9fa9902d157d"}} @@ -24,7 +24,7 @@ "policy": { "serviceId": { "context_id": {"context_uuid": {"uuid": "43813baf-195e-5da6-af20-b3d0922e71a7"}}, - "service_uuid": {"uuid": "66d498ad-5d94-5d90-8cb4-861e30689c64"} + "service_uuid": {"uuid": "b2a60c5b-8c46-5707-a64a-9c6539d395f2"} }, "policyRuleBasic": { "actionList": [ diff --git a/src/tests/automation/run_test_automation.sh b/src/tests/automation/run_test_automation.sh old mode 100644 new mode 100755 diff --git a/src/tests/ofc26_flexscale/Fixtures.py b/src/tests/ofc26_flexscale/Fixtures.py new file mode 100644 index 0000000000000000000000000000000000000000..b958a39cddce5d2b093ab8d3e49bfeb418d9355b --- /dev/null +++ b/src/tests/ofc26_flexscale/Fixtures.py @@ -0,0 +1,76 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +import logging +import os +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from service.client.ServiceClient import ServiceClient +from kpi_manager.client.KpiManagerClient import KpiManagerClient +from telemetry.frontend.client.TelemetryFrontendClient import TelemetryFrontendClient + +# Import ENV variables +_ip_kpi_address = os.getenv('IP_KPI', None) +_ip_tele_address = os.getenv('IP_TELE', None) + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + + +@pytest.fixture(scope='session') +def context_client(): + _client = ContextClient(host="10.152.183.180") + _client.connect() + LOGGER.info('Yielding Connected ContextClient...') + yield _client + LOGGER.info('Closing ContextClient...') + _client.close() + +@pytest.fixture(scope='session') +def device_client(): + _client = DeviceClient(host="10.152.183.212") + _client.connect() + LOGGER.info('Yielding Connected DeviceClient...') + yield _client + LOGGER.info('Closing DeviceClient...') + _client.close() + +@pytest.fixture(scope='session') +def service_client(): + _client = ServiceClient(host="10.152.183.98") + _client.connect() + LOGGER.info('Yielding Connected ServiceClient...') + yield _client + LOGGER.info('Closing ServiceClient...') + _client.close() + +@pytest.fixture(scope='session') +def kpi_manager_client(): + _client = KpiManagerClient(host=_ip_kpi_address) + _client.connect() + LOGGER.info('Yielding Connected KpiManagerClient...') + yield _client + LOGGER.info('Closed KpiManagerClient...') + _client.close() + + +@pytest.fixture(scope='session') +def telemetry_frontend_client(): + _client = TelemetryFrontendClient(host=_ip_tele_address) + _client.connect() + LOGGER.info('Yielding Connected TelemetryFrontendClient...') + yield _client + LOGGER.info('Closed TelemetryFrontendClient...') + _client.close() diff --git a/src/tests/ofc26_flexscale/__init__.py b/src/tests/ofc26_flexscale/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3ccc21c7db78aac26daa1f8c5ff8e1ffd3f35460 --- /dev/null +++ b/src/tests/ofc26_flexscale/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/src/tests/ofc26_flexscale/deploy_specs.sh b/src/tests/ofc26_flexscale/deploy_specs.sh new file mode 100755 index 0000000000000000000000000000000000000000..5c9282e9f084f6abf28580f449ad70c58cecbc16 --- /dev/null +++ b/src/tests/ofc26_flexscale/deploy_specs.sh @@ -0,0 +1,240 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# ----- TeraFlowSDN ------------------------------------------------------------ + +# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" + +# Set the list of components, separated by spaces, you want to build images for, and deploy. +export TFS_COMPONENTS="context device pathcomp opticalcontroller service nbi webui" + +# Uncomment to activate Monitoring (old) +#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring" + +# Uncomment to activate Monitoring Framework (new) +export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics automation" + +# Uncomment to activate QoS Profiles +#export TFS_COMPONENTS="${TFS_COMPONENTS} qos_profile" + +# Uncomment to activate BGP-LS Speaker +#export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker" + +# Uncomment to activate Optical Controller +# To manage optical connections, "service" requires "opticalcontroller" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "opticalcontroller" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} opticalcontroller service ${AFTER}" +#fi + +# Uncomment to activate ZTP +#export TFS_COMPONENTS="${TFS_COMPONENTS} ztp" + +# Uncomment to activate Policy Manager +export TFS_COMPONENTS="${TFS_COMPONENTS} policy" + +# Uncomment to activate Optical CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager" + +# Uncomment to activate L3 CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector" + +# Uncomment to activate TE +#export TFS_COMPONENTS="${TFS_COMPONENTS} te" + +# Uncomment to activate Forecaster +#export TFS_COMPONENTS="${TFS_COMPONENTS} forecaster" + +# Uncomment to activate E2E Orchestrator +#export TFS_COMPONENTS="${TFS_COMPONENTS} e2e_orchestrator" + +# Uncomment to activate VNT Manager +#export TFS_COMPONENTS="${TFS_COMPONENTS} vnt_manager" + +# Uncomment to activate OSM Client +#export TFS_COMPONENTS="${TFS_COMPONENTS} osm_client" + +# Uncomment to activate DLT and Interdomain +#export TFS_COMPONENTS="${TFS_COMPONENTS} interdomain dlt" +#if [[ "$TFS_COMPONENTS" == *"dlt"* ]]; then +# export KEY_DIRECTORY_PATH="src/dlt/gateway/keys/priv_sk" +# export CERT_DIRECTORY_PATH="src/dlt/gateway/keys/cert.pem" +# export TLS_CERT_PATH="src/dlt/gateway/keys/ca.crt" +#fi + +# Uncomment to activate QKD App +# To manage QKD Apps, "service" requires "qkd_app" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "qkd_app" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} qkd_app service ${AFTER}" +#fi + +# Uncomment to activate SIMAP Connector +#export TFS_COMPONENTS="${TFS_COMPONENTS} simap_connector" + +# Uncomment to activate Load Generator +#export TFS_COMPONENTS="${TFS_COMPONENTS} load_generator" + +# Uncomment to activate Pluggables Component +#export TFS_COMPONENTS="${TFS_COMPONENTS} pluggables" + + +# Set the tag you want to use for your images. +export TFS_IMAGE_TAG="dev" + +# Set the name of the Kubernetes namespace to deploy TFS to. +export TFS_K8S_NAMESPACE="tfs" + +# Set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" + +# Uncomment to monitor performance of components +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml" + +# Uncomment when deploying Optical CyberSecurity +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml" + +# Set the new Grafana admin password +export TFS_GRAFANA_PASSWORD="admin123+" + +# Disable skip-build flag to rebuild the Docker images. +export TFS_SKIP_BUILD="" + + +# ----- CockroachDB ------------------------------------------------------------ + +# Set the namespace where CockroackDB will be deployed. +export CRDB_NAMESPACE="crdb" + +# Set the external port CockroackDB Postgre SQL interface will be exposed to. +export CRDB_EXT_PORT_SQL="26257" + +# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to. +export CRDB_EXT_PORT_HTTP="8081" + +# Set the database username to be used by Context. +export CRDB_USERNAME="tfs" + +# Set the database user's password to be used by Context. +export CRDB_PASSWORD="tfs123" + +# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/crdb.sh for additional details +export CRDB_DEPLOY_MODE="single" + +# Disable flag for dropping database, if it exists. +export CRDB_DROP_DATABASE_IF_EXISTS="YES" + +# Disable flag for re-deploying CockroachDB from scratch. +export CRDB_REDEPLOY="YES" + + +# ----- NATS ------------------------------------------------------------------- + +# Set the namespace where NATS will be deployed. +export NATS_NAMESPACE="nats" + +# Set the external port NATS Client interface will be exposed to. +export NATS_EXT_PORT_CLIENT="4222" + +# Set the external port NATS HTTP Mgmt GUI interface will be exposed to. +export NATS_EXT_PORT_HTTP="8222" + +# Set NATS installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/nats.sh for additional details +export NATS_DEPLOY_MODE="single" + +# Disable flag for re-deploying NATS from scratch. +export NATS_REDEPLOY="" + + +# ----- Apache Kafka ----------------------------------------------------------- + +# Set the namespace where Apache Kafka will be deployed. +export KFK_NAMESPACE="kafka" + +# Set the port Apache Kafka server will be exposed to. +export KFK_EXT_PORT_CLIENT="9092" + +# Set Kafka installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/kafka.sh for additional details +export KFK_DEPLOY_MODE="single" + +# Disable flag for re-deploying Kafka from scratch. +export KFK_REDEPLOY="" + + +# ----- QuestDB ---------------------------------------------------------------- + +# Set the namespace where QuestDB will be deployed. +export QDB_NAMESPACE="qdb" + +# Set the external port QuestDB Postgre SQL interface will be exposed to. +export QDB_EXT_PORT_SQL="8812" + +# Set the external port QuestDB Influx Line Protocol interface will be exposed to. +export QDB_EXT_PORT_ILP="9009" + +# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to. +export QDB_EXT_PORT_HTTP="9000" + +# Set the database username to be used for QuestDB. +export QDB_USERNAME="admin" + +# Set the database user's password to be used for QuestDB. +export QDB_PASSWORD="quest" + +# Set the table name to be used by Monitoring for KPIs. +export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis" + +# Set the table name to be used by Slice for plotting groups. +export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups" + +# Disable flag for dropping tables if they exist. +export QDB_DROP_TABLES_IF_EXIST="" + +# Disable flag for re-deploying QuestDB from scratch. +export QDB_REDEPLOY="" + + +# ----- Time Series Storage - Prometheus / Grafana Mimir ----------------------- + +# Set Time Series Storage installation mode to 'single' (i.e., Prometheus only). +# This option is convenient for development and testing. See ./deploy/all.sh or +# ./deploy/monitoring.sh for additional details. +export TSDB_DEPLOY_MODE="single" + + +# ----- K8s Observability ------------------------------------------------------ + +# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to. +export PROM_EXT_PORT_HTTP="9090" + +# Set the external port Grafana HTTP Dashboards will be exposed to. +export GRAF_EXT_PORT_HTTP="3000" + + +# ----- Telemetry Config ------------------------------------------------------ + +# Define a Load Balancer IP for Telemetry Collector components +export LOAD_BALANCER_IP="192.168.5.250" # <-- Change this to match your network diff --git a/src/tests/ofc26_flexscale/mock_tfs_services.py b/src/tests/ofc26_flexscale/mock_tfs_services.py new file mode 100644 index 0000000000000000000000000000000000000000..6145e60f50abb07a65d42f22c8a843f120ea7853 --- /dev/null +++ b/src/tests/ofc26_flexscale/mock_tfs_services.py @@ -0,0 +1,86 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import os, pytest +import logging +from typing import Union + +from common.Constants import ServiceNameEnum +from common.Settings import ( + ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_service_port_grpc) +from common.tests.MockServicerImpl_Context import MockServicerImpl_Context +from common.proto.context_pb2_grpc import add_ContextServiceServicer_to_server + +from common.tools.service.GenericGrpcService import GenericGrpcService + +from kpi_manager.service.KpiManagerService import KpiManagerService +from kpi_manager.client.KpiManagerClient import KpiManagerClient + + + +LOGGER = logging.getLogger(__name__) + +LOCAL_HOST = '127.0.0.1' + +KPIMANAGER_SERVICE_PORT = get_service_port_grpc(ServiceNameEnum.KPIMANAGER) # type: ignore +os.environ[get_env_var_name(ServiceNameEnum.KPIMANAGER, ENVVAR_SUFIX_SERVICE_HOST )] = str(LOCAL_HOST) +os.environ[get_env_var_name(ServiceNameEnum.KPIMANAGER, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(KPIMANAGER_SERVICE_PORT) + + +# NOTE: ---> For local testing, without need of running TFS services. + +class MockContextService(GenericGrpcService): + # Mock Service implementing Context to simplify unitary tests of Monitoring + + def __init__(self, bind_port: Union[str, int]) -> None: + super().__init__(bind_port, LOCAL_HOST, enable_health_servicer=False, cls_name='MockService') + + # pylint: disable=attribute-defined-outside-init + def install_servicers(self): + self.context_servicer = MockServicerImpl_Context() + add_ContextServiceServicer_to_server(self.context_servicer, self.server) + +# This fixture will be requested by test cases and last during testing session +@pytest.fixture(scope='session') +def kpi_manager_service(): + LOGGER.info('Initializing KpiManagerService...') + _service = KpiManagerService() + _service.start() + + # yield the server, when test finishes, execution will resume to stop it + LOGGER.info('Yielding KpiManagerService...') + yield _service + + LOGGER.info('Terminating KpiManagerService...') + _service.stop() + + LOGGER.info('Terminated KpiManagerService...') + +# This fixture will be requested by test cases and last during testing session. +# The client requires the server, so client fixture has the server as dependency. +# def monitoring_client(monitoring_service : MonitoringService): (Add for better understanding) +@pytest.fixture(scope='session') +def kpi_manager_client(kpi_manager_service : KpiManagerService): # pylint: disable=redefined-outer-name,unused-argument + LOGGER.info('Initializing KpiManagerClient...') + _client = KpiManagerClient() + + # yield the server, when test finishes, execution will resume to stop it + LOGGER.info('Yielding KpiManagerClient...') + yield _client + + LOGGER.info('Closing KpiManagerClient...') + _client.close() + + LOGGER.info('Closed KpiManagerClient...') \ No newline at end of file diff --git a/src/tests/ofc26_flexscale/run_ofc26_test.sh b/src/tests/ofc26_flexscale/run_ofc26_test.sh new file mode 100755 index 0000000000000000000000000000000000000000..2644320f514bac5c08a195954a6a1f36f6703100 --- /dev/null +++ b/src/tests/ofc26_flexscale/run_ofc26_test.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Make folder containing the script the root folder for its execution +cd $(dirname $0)/../../../ +echo "Running OFC26 test from folder: $(pwd)" +cd src/ +CRDB_SQL_ADDRESS=$(kubectl get service --namespace ${CRDB_NAMESPACE} cockroachdb-public -o 'jsonpath={.spec.clusterIP}') +export CRDB_URI="cockroachdb://tfs:tfs123@${CRDB_SQL_ADDRESS}:26257/tfs_kpi_mgmt?sslmode=require" + +#added for kafka exposure +export KFK_SERVER_ADDRESS='127.0.0.1:9092' + +kubectl port-forward -n kafka service/kafka-service 9092:9092 > /dev/null 2>&1 & +KAFKA_PF_PID=$! + +# Function to cleanup port-forward on exit +cleanup() { + # echo "Cleaning up Kafka port-forward (PID: ${KAFKA_PF_PID})..." + kill ${KAFKA_PF_PID} 2>/dev/null || true + wait ${KAFKA_PF_PID} 2>/dev/null || true +} + + +IP_KPI=$(kubectl get all --all-namespaces | grep service/kpi-managerservice | awk '{print $4}') +export IP_KPI +echo "KPI Manager Service IP: ${IP_KPI}" + +IP_TELE=$(kubectl get all --all-namespaces | grep service/telemetryservice | awk '{print $4}') +export IP_TELE +echo "Telemetry Frontend Service IP: ${IP_TELE}" + + +python -m pytest --log-level=INFO --log-cli-level=INFO --verbose \ + tests/ofc26_flexscale/test_ofc26_mgon_integration_V2.py diff --git a/src/tests/ofc26_flexscale/test_ofc26_messages.py b/src/tests/ofc26_flexscale/test_ofc26_messages.py new file mode 100644 index 0000000000000000000000000000000000000000..87549a3000161d1cea4474ae239d611772f62efd --- /dev/null +++ b/src/tests/ofc26_flexscale/test_ofc26_messages.py @@ -0,0 +1,84 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import json +from common.proto import kpi_manager_pb2 +from common.proto.kpi_sample_types_pb2 import KpiSampleType +from src.telemetry.backend.service.collectors.gnmi_oc.KPI import KPI +from common.proto import telemetry_frontend_pb2 + + +# ---> KPI Manager messages creation for testing + +def create_kpi_descriptor_request(descriptor_name: str = "Test_name"): + _create_kpi_request = kpi_manager_pb2.KpiDescriptor() + _create_kpi_request.kpi_id.kpi_id.uuid = "6e22f180-ba28-4641-b190-2287bf447777" + _create_kpi_request.kpi_description = descriptor_name + _create_kpi_request.kpi_sample_type = KpiSampleType.KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER + _create_kpi_request.device_id.device_uuid.uuid = "ddb3ef8e-ee65-5cf9-9d21-dac56a27f85b" # confirm for TFS + _create_kpi_request.service_id.service_uuid.uuid = "b2a60c5b-8c46-5707-a64a-9c6539d395f2" + # _create_kpi_request.slice_id.slice_uuid.uuid = 'SLC1' + _create_kpi_request.endpoint_id.endpoint_uuid.uuid = "2" + # _create_kpi_request.connection_id.connection_uuid.uuid = 'CON1' + # _create_kpi_request.link_id.link_uuid.uuid = 'LNK1' + return _create_kpi_request + + +# ---> Telemetry messages creation for testing + +devices = { + 'mgon': { + 'host' : '172.17.254.24', + 'port' : '50061', + 'username' : 'admin', + 'password' : 'admin', + 'insecure' : True, + 'kpi' : KPI.KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER, + #'resource': 'oc-wave-router:wavelength-router/fsmgon:optical-bands/optical-band[index=4]/state/optical-power-total-input/instant', + 'resource' : 'wavelength-router', #TODO: verify resource name form mg-on model + 'endpoint' : '2', + 'skip_verify': True + }, +} + +def create_basic_sub_request_parameters() -> dict: + + device = devices['mgon'] + if device: + return { + 'host' : device['host'], + 'port' : device['port'], + 'username' : device['username'], + 'password' : device['password'], + 'connect_timeout' : 15, + 'insecure' : device['insecure'], + 'mode' : 'sample', # Subscription internal mode posibly: on_change, poll, sample + 'sample_interval' : 10, # This should be in seconds units + 'duration' : 300.0, # Duration in seconds for how long to receive samples + 'kpi' : device['kpi'], + 'resource' : device['resource'], + 'endpoint' : device['endpoint'], + } + return {} + + +def create_collector_request(): + _create_collector_request = telemetry_frontend_pb2.Collector() + _create_collector_request.collector_id.collector_id.uuid = "efef4d95-1cf1-43c4-9742-95c283dddddd" + _create_collector_request.kpi_id.kpi_id.uuid = "6e22f180-ba28-4641-b190-2287bf447777" + _create_collector_request.duration_s = 300 + _create_collector_request.interval_s = 10 + _create_collector_request.int_collector.context_id = "43813baf-195e-5da6-af20-b3d0922e71a7" + return _create_collector_request diff --git a/src/tests/ofc26_flexscale/test_ofc26_mgon_integration_V2.py b/src/tests/ofc26_flexscale/test_ofc26_mgon_integration_V2.py new file mode 100644 index 0000000000000000000000000000000000000000..49ac18b0c95991a3d1dce4ff9b08d0b53a19bcaa --- /dev/null +++ b/src/tests/ofc26_flexscale/test_ofc26_mgon_integration_V2.py @@ -0,0 +1,171 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging +import time +from common.proto.kpi_manager_pb2 import KpiId +from common.proto.telemetry_frontend_pb2 import CollectorId +import time +import threading + +from common.proto import kpi_manager_pb2 +from common.proto.kpi_sample_types_pb2 import KpiSampleType + +from src.telemetry.backend.service.collector_api import DriverFactory +from tests.ofc26_flexscale.test_ofc26_messages import create_kpi_descriptor_request, create_collector_request +from src.tests.ofc26_flexscale.test_ofc26_messages import create_basic_sub_request_parameters + +from src.telemetry.backend.service.TelemetryBackendService import DriverInstanceCache, TelemetryBackendService + + +WITH_TFS = True #True/False +if WITH_TFS: + from .Fixtures import kpi_manager_client, telemetry_frontend_client +else: + from .mock_tfs_services import kpi_manager_client + +LOGGER = logging.getLogger(__name__) + + +def create_kpi_descriptor_request(descriptor_name: str = "Test_name"): + _create_kpi_request = kpi_manager_pb2.KpiDescriptor() + _create_kpi_request.kpi_id.kpi_id.uuid = "6e22f180-ba28-4641-b190-2287bf447777" + _create_kpi_request.kpi_description = descriptor_name + _create_kpi_request.kpi_sample_type = KpiSampleType.KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER + _create_kpi_request.device_id.device_uuid.uuid = "ddb3ef8e-ee65-5cf9-9d21-dac56a27f85b" # confirm for TFS + _create_kpi_request.service_id.service_uuid.uuid = "b2a60c5b-8c46-5707-a64a-9c6539d395f2" + # _create_kpi_request.slice_id.slice_uuid.uuid = 'SLC1' + # _create_kpi_request.endpoint_id.endpoint_uuid.uuid = str(uuid.uuid4()) + # _create_kpi_request.connection_id.connection_uuid.uuid = 'CON1' + # _create_kpi_request.link_id.link_uuid.uuid = 'LNK1' + return _create_kpi_request + +# def create_collector_filter(): +# _create_collector_filter = telemetry_frontend_pb2.CollectorFilter() +# kpi_id_obj = KpiId() +# # kpi_id_obj.kpi_id.uuid = str(uuid.uuid4()) +# kpi_id_obj.kpi_id.uuid = "8c5ca114-cdc7-4081-b128-b667fd159832" +# _create_collector_filter.kpi_id.append(kpi_id_obj) +# return _create_collector_filter + + +def test_Complete_MGON_Integration(kpi_manager_client, telemetry_frontend_client): + + # 1. KPI Descriptor Creation + LOGGER.info(" >>> test_Complete_MGON_Integration: START <<< ") + kpi_descriptor_obj = create_kpi_descriptor_request() + _search_kpi_id = kpi_descriptor_obj.kpi_id + + try: + response = kpi_manager_client.GetKpiDescriptor(_search_kpi_id) + if isinstance(response, kpi_manager_pb2.KpiDescriptor): + LOGGER.info("KPI Descriptor already exists with ID: %s. Skipping creation.", _search_kpi_id.kpi_id.uuid) + except Exception as e: + LOGGER.info("No existing KPI Descriptor found with ID: %s. Proceeding to create it. Error: %s", _search_kpi_id.kpi_id.uuid, str(e)) + response = kpi_manager_client.SetKpiDescriptor(kpi_descriptor_obj) + LOGGER.info("Response gRPC message object: {:}".format(response)) + assert isinstance(response, KpiId) + + # 2. Telemetry Collector Creation + + # _collector_request = create_collector_request() + # _search_collector_id = CollectorId() + # _search_collector_id = _collector_request.collector_id + # try: + # response_col = telemetry_frontend_client.StopCollector(_search_collector_id) + # LOGGER.info("Response gRPC message object: {:}".format(response_col)) + # if response is not None: + # response = telemetry_frontend_client.StartCollector(_collector_request) + # LOGGER.info("Response gRPC message object: {:}".format(response)) + # assert isinstance(response, CollectorId) + # except Exception as e: + # LOGGER.info("Error finding the collector with ID: %s. Proceeding to create it.", _search_collector_id.collector_id.uuid) + # response = telemetry_frontend_client.StartCollector(_collector_request) + # LOGGER.info("Response gRPC message object: {:}".format(response)) + # assert isinstance(response, CollectorId) + + # step 2: Telemetry Collector backup option + from telemetry.backend.service.collectors import COLLECTORS + from telemetry.backend.service.collector_api.DriverFactory import DriverFactory + from telemetry.backend.service.collector_api.DriverInstanceCache import DriverInstanceCache, preload_drivers + + driver_factory = DriverFactory(COLLECTORS) + driver_instance_cache = DriverInstanceCache(driver_factory) + _service = TelemetryBackendService(driver_instance_cache) + + _collector_request = create_collector_request() + _collector = create_basic_sub_request_parameters() + _coll_id = "mgon_collector_id" + LOGGER.info("Subscription for collector %s parameters: %s", _coll_id, _collector) + + _duration = _collector_request.duration_s + _interval = _collector_request.interval_s + + stop_event = threading.Event() + collector_thread = threading.Thread( + target=_service.GenericCollectorHandler, + args=( + _coll_id, _collector, "6e22f180-ba28-4641-b190-2287bf447777", _duration, _interval, + None, None, None, "43813baf-195e-5da6-af20-b3d0922e71a7", stop_event + ), + daemon=False + ) + collector_thread.start() + + def stop_after_duration(completion_time, stop_event): + time.sleep(completion_time) + if not stop_event.is_set(): + LOGGER.warning(f"Execution duration ({completion_time}) completed for Collector: {_coll_id}") + stop_event.set() + + duration_thread = threading.Thread( + target=stop_after_duration, daemon=True, name=f"stop_after_duration_{_coll_id}", + args=(_duration, stop_event) + ) + duration_thread.start() + + LOGGER.info("Sleeping for %d seconds...", _duration) + time.sleep(_duration) + + LOGGER.info("Setting stop event for Collector: %s", _coll_id) + stop_event.set() + + # Wait for collector thread to complete + collector_thread.join(timeout=10) + if collector_thread.is_alive(): + LOGGER.warning("Collector thread did not terminate within timeout") + + LOGGER.info("Done sleeping.") + LOGGER.info(" >>> test_Complete_MGON_Integration: END <<< ") + + +# def test_get_state_updates(collector, subscription_data): +# """Test getting state updates.""" +# LOGGER.info("----- Testing State Updates -----") +# collector.SubscribeState(subscription_data) + +# LOGGER.info("Requesting state updates for 300 seconds ...") +# updates_received = [] +# for samples in collector.GetState(duration=300, blocking=True): +# LOGGER.info("Received state update: %s", samples) +# updates_received.append(samples) + +# assert len(updates_received) > 0 + + +if __name__ == "__main__": + test_Complete_MGON_Integration(kpi_manager_client, telemetry_frontend_client) + + diff --git a/src/tests/ofc26_flexscale/topology/CNIT/1.context.json b/src/tests/ofc26_flexscale/topology/CNIT/1.context.json new file mode 100644 index 0000000000000000000000000000000000000000..36b3c44fd61fdec9d208a82a11d5a16c3671d004 --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/CNIT/1.context.json @@ -0,0 +1,19 @@ +{ + "contexts": [ + { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "topology_ids": [], + "service_ids": [] + } + ], + "topologies": [ + { + "topology_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "topology_uuid": {"uuid": "admin"} + }, + "device_ids": [], + "link_ids": [] + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/CNIT/2.0nodes-links_no_slots.json b/src/tests/ofc26_flexscale/topology/CNIT/2.0nodes-links_no_slots.json new file mode 100644 index 0000000000000000000000000000000000000000..d9f643f0918e70981e3adc6680f572603d8c16d9 --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/CNIT/2.0nodes-links_no_slots.json @@ -0,0 +1,1378 @@ +{ + "devices": [ + { + "device_id": { + "device_uuid": { + "uuid": "T1.1" + } + }, + "device_type": "optical-transponder", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.17.254.51" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "2022" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "admin", + "password": "admin", + "type": "optical-transponder", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T1.2" + } + }, + "device_type": "optical-transponder", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.17.254.51" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "2022" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "admin", + "password": "admin", + "type": "optical-transponder", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T1.3" + } + }, + "device_type": "optical-transponder", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.17.254.51" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "2022" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "admin", + "password": "admin", + "type": "optical-transponder", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T2.1" + } + }, + "device_type": "optical-transponder", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.17.254.52" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "2022" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "admin", + "password": "admin", + "type": "optical-transponder", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T2.2" + } + }, + "device_type": "optical-transponder", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.17.254.52" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "2022" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "admin", + "password": "admin", + "type": "optical-transponder", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T2.3" + } + }, + "device_type": "optical-transponder", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.17.254.52" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "2022" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "admin", + "password": "admin", + "type": "optical-transponder", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "device_type": "optical-roadm", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.17.254.21" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "2022" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "admin", + "password": "admin", + "type": "optical-roadm", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON2" + } + }, + "device_type": "optical-roadm", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.17.254.22" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "2022" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "admin", + "password": "admin", + "type": "optical-roadm", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON3" + } + }, + "device_type": "optical-roadm", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.17.254.23" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "2022" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "admin", + "password": "admin", + "type": "optical-roadm", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "device_type": "optical-roadm", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.17.254.24" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "2022" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "admin", + "password": "admin", + "type": "optical-roadm", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + } + ], + "optical_links": [ + { + "name": "T1.1-MGON1", + "link_id": { + "link_uuid": { + "uuid": "T1.1->MGON1" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "T1.1" + } + }, + "endpoint_uuid": { + "uuid": "1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-33-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "1", + "dst_port": "port-33-in", + "local_peer_port": "1", + "remote_peer_port": "port-33-out", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "T1.2-MGON1", + "link_id": { + "link_uuid": { + "uuid": "T1.2->MGON1" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "T1.2" + } + }, + "endpoint_uuid": { + "uuid": "2" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-34-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "2", + "dst_port": "port-34-in", + "local_peer_port": "2", + "remote_peer_port": "port-34-out", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "T1.3-MGON1", + "link_id": { + "link_uuid": { + "uuid": "T1.3->MGON1" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "T1.3" + } + }, + "endpoint_uuid": { + "uuid": "3" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-35-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "3", + "dst_port": "port-35-in", + "local_peer_port": "3", + "remote_peer_port": "port-35-out", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON1-T1.1", + "link_id": { + "link_uuid": { + "uuid": "MGON1->T1.1" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-33-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T1.1" + } + }, + "endpoint_uuid": { + "uuid": "1" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-33-out", + "dst_port": "1", + "local_peer_port": "port-33-in", + "remote_peer_port": "1", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON1-T1.2", + "link_id": { + "link_uuid": { + "uuid": "MGON1->T1.2" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-34-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T1.2" + } + }, + "endpoint_uuid": { + "uuid": "2" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-34-out", + "dst_port": "2", + "local_peer_port": "port-34-in", + "remote_peer_port": "2", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON1-T1.3", + "link_id": { + "link_uuid": { + "uuid": "MGON1->T1.3" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-35-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T1.3" + } + }, + "endpoint_uuid": { + "uuid": "3" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-35-out", + "dst_port": "3", + "local_peer_port": "port-35-in", + "remote_peer_port": "3", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON1-MGON2", + "link_id": { + "link_uuid": { + "uuid": "MGON1->MGON2" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-9-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON2" + } + }, + "endpoint_uuid": { + "uuid": "port-3-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-9-out", + "dst_port": "port-3-in", + "local_peer_port": "port-9-in", + "remote_peer_port": "port-3-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON2-MGON1", + "link_id": { + "link_uuid": { + "uuid": "MGON2->MGON1" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON2" + } + }, + "endpoint_uuid": { + "uuid": "port-3-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-9-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-3-out", + "dst_port": "port-9-in", + "local_peer_port": "port-3-in", + "remote_peer_port": "port-9-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON1-MGON3", + "link_id": { + "link_uuid": { + "uuid": "MGON1->MGON3" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-1-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON3" + } + }, + "endpoint_uuid": { + "uuid": "port-3-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-1-out", + "dst_port": "port-3-in", + "local_peer_port": "port-1-in", + "remote_peer_port": "port-3-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON3-MGON1", + "link_id": { + "link_uuid": { + "uuid": "MGON3->MGON1" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON3" + } + }, + "endpoint_uuid": { + "uuid": "port-3-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-1-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-3-out", + "dst_port": "port-1-in", + "local_peer_port": "port-3-in", + "remote_peer_port": "port-1-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON2-MGON4", + "link_id": { + "link_uuid": { + "uuid": "MGON2->MGON4" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON2" + } + }, + "endpoint_uuid": { + "uuid": "port-1-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-1-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-1-out", + "dst_port": "port-1-in", + "local_peer_port": "port-1-in", + "remote_peer_port": "port-1-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON4-MGON2", + "link_id": { + "link_uuid": { + "uuid": "MGON4->MGON2" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-1-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON2" + } + }, + "endpoint_uuid": { + "uuid": "port-1-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-1-out", + "dst_port": "port-1-in", + "local_peer_port": "port-1-in", + "remote_peer_port": "port-1-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON3-MGON4", + "link_id": { + "link_uuid": { + "uuid": "MGON3->MGON4" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON3" + } + }, + "endpoint_uuid": { + "uuid": "port-1-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-9-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-1-out", + "dst_port": "port-9-in", + "local_peer_port": "port-1-in", + "remote_peer_port": "port-9-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON4-MGON3", + "link_id": { + "link_uuid": { + "uuid": "MGON4->MGON3" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-9-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON3" + } + }, + "endpoint_uuid": { + "uuid": "port-1-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-9-out", + "dst_port": "port-1-in", + "local_peer_port": "port-9-in", + "remote_peer_port": "port-1-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "T2.1-MGON4", + "link_id": { + "link_uuid": { + "uuid": "T2.1->MGON4" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "T2.1" + } + }, + "endpoint_uuid": { + "uuid": "1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-33-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "1", + "dst_port": "port-33-in", + "local_peer_port": "1", + "remote_peer_port": "port-33-out", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "T2.2-MGON4", + "link_id": { + "link_uuid": { + "uuid": "T2.2->MGON4" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "T2.2" + } + }, + "endpoint_uuid": { + "uuid": "2" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-34-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "2", + "dst_port": "port-34-in", + "local_peer_port": "2", + "remote_peer_port": "port-34-out", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "T2.3-MGON4", + "link_id": { + "link_uuid": { + "uuid": "T2.3->MGON4" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "T2.3" + } + }, + "endpoint_uuid": { + "uuid": "3" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-35-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "3", + "dst_port": "port-35-in", + "local_peer_port": "3", + "remote_peer_port": "port-35-out", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON4-T2.1", + "link_id": { + "link_uuid": { + "uuid": "MGON4->T2.1" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-33-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T2.1" + } + }, + "endpoint_uuid": { + "uuid": "1" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-33-out", + "dst_port": "1", + "local_peer_port": "port-33-in", + "remote_peer_port": "1", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON4-T2.2", + "link_id": { + "link_uuid": { + "uuid": "MGON4->T2.2" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-34-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T2.2" + } + }, + "endpoint_uuid": { + "uuid": "2" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-34-out", + "dst_port": "2", + "local_peer_port": "port-34-in", + "remote_peer_port": "2", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON4-T2.3", + "link_id": { + "link_uuid": { + "uuid": "MGON4->T2.3" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-35-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T2.3" + } + }, + "endpoint_uuid": { + "uuid": "3" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-35-out", + "dst_port": "3", + "local_peer_port": "port-35-in", + "remote_peer_port": "3", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/CNIT/3.0-ob-s.json b/src/tests/ofc26_flexscale/topology/CNIT/3.0-ob-s.json new file mode 100644 index 0000000000000000000000000000000000000000..d3fa89a1f3c86cbb66a02200bfa25e977d36cefc --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/CNIT/3.0-ob-s.json @@ -0,0 +1,23 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "optical-band-S"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-9-out"}}, + {"device_id": {"device_uuid": {"uuid": "MGON4"}}, "endpoint_uuid": {"uuid": "port-1-in"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}}, + {"custom": {"constraint_type": "preferred_band", "constraint_value": "S_BAND"}}, + {"custom": {"constraint_type": "optical-band-width[GHz]", "constraint_value": "6000"}} + ], + "service_config": {"config_rules": []} + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/CNIT/3.1-sc-s1-alien.json b/src/tests/ofc26_flexscale/topology/CNIT/3.1-sc-s1-alien.json new file mode 100644 index 0000000000000000000000000000000000000000..f1752d7e637ab83eb3e67bde0d3b47348f96739f --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/CNIT/3.1-sc-s1-alien.json @@ -0,0 +1,25 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "super-channel-s1"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-25-in"}}, + {"device_id": {"device_uuid": {"uuid": "MGON4"}}, "endpoint_uuid": {"uuid": "port-25-out"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "alien", "constraint_value": "1"}}, + {"custom": {"constraint_type": "alien_spectrum[GHz]", "constraint_value": "3000"}}, + {"custom": {"constraint_type": "optical_band_id", "constraint_value": "1"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}} + + ], + "service_config": {"config_rules": []} + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/CNIT/3.2-sc-s2-alien.json b/src/tests/ofc26_flexscale/topology/CNIT/3.2-sc-s2-alien.json new file mode 100644 index 0000000000000000000000000000000000000000..4a38e9022041b50a38aa743d79db7ed320bdfe69 --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/CNIT/3.2-sc-s2-alien.json @@ -0,0 +1,25 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "super-channel-s2"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-26-in"}}, + {"device_id": {"device_uuid": {"uuid": "MGON4"}}, "endpoint_uuid": {"uuid": "port-26-out"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "alien", "constraint_value": "1"}}, + {"custom": {"constraint_type": "alien_spectrum[GHz]", "constraint_value": "2900"}}, + {"custom": {"constraint_type": "optical_band_id", "constraint_value": "1"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}} + + ], + "service_config": {"config_rules": []} + } + ] +} \ No newline at end of file diff --git a/src/tests/ofc26_flexscale/topology/CNIT/4.0.ob_l.json b/src/tests/ofc26_flexscale/topology/CNIT/4.0.ob_l.json new file mode 100644 index 0000000000000000000000000000000000000000..0f02c6122dead037955fb7b0da5cdaeb9d1173dd --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/CNIT/4.0.ob_l.json @@ -0,0 +1,25 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "optical-band-L"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-1-out"}}, + {"device_id": {"device_uuid": {"uuid": "MGON4"}}, "endpoint_uuid": {"uuid": "port-9-in"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}}, + {"custom": {"constraint_type": "preferred_band", "constraint_value": "L_BAND"}}, + {"custom": {"constraint_type": "optical-band-width[GHz]", "constraint_value": "3000"}}, + {"custom": {"constraint_type": "disjoint_optical_band_id", "constraint_value": "1"}} + + ], + "service_config": {"config_rules": []} + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/CNIT/4.1-sc-l1-alien.json b/src/tests/ofc26_flexscale/topology/CNIT/4.1-sc-l1-alien.json new file mode 100644 index 0000000000000000000000000000000000000000..e92ef29da561c2e30c6ab43c8ffe4a05e85f4824 --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/CNIT/4.1-sc-l1-alien.json @@ -0,0 +1,25 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "super-channel-l1"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-27-in"}}, + {"device_id": {"device_uuid": {"uuid": "MGON4"}}, "endpoint_uuid": {"uuid": "port-27-out"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "alien", "constraint_value": "1"}}, + {"custom": {"constraint_type": "alien_spectrum[GHz]", "constraint_value": "1500"}}, + {"custom": {"constraint_type": "optical_band_id", "constraint_value": "2"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}} + + ], + "service_config": {"config_rules": []} + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/CNIT/4.2-sc-l2-alien.json b/src/tests/ofc26_flexscale/topology/CNIT/4.2-sc-l2-alien.json new file mode 100644 index 0000000000000000000000000000000000000000..bada34dbc5d771844de42e68171ef9d4d328d24b --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/CNIT/4.2-sc-l2-alien.json @@ -0,0 +1,25 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "super-channel-l2"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-28-in"}}, + {"device_id": {"device_uuid": {"uuid": "MGON4"}}, "endpoint_uuid": {"uuid": "port-28-out"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "alien", "constraint_value": "1"}}, + {"custom": {"constraint_type": "alien_spectrum[GHz]", "constraint_value": "1500"}}, + {"custom": {"constraint_type": "optical_band_id", "constraint_value": "2"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}} + + ], + "service_config": {"config_rules": []} + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/CNIT/5.0ob_c1.json b/src/tests/ofc26_flexscale/topology/CNIT/5.0ob_c1.json new file mode 100644 index 0000000000000000000000000000000000000000..ab56e9daaf723f5580bd045131264534d77fa34e --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/CNIT/5.0ob_c1.json @@ -0,0 +1,23 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "optical-band-C1"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-9-out"}}, + {"device_id": {"device_uuid": {"uuid": "MGON4"}}, "endpoint_uuid": {"uuid": "port-1-in"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}}, + {"custom": {"constraint_type": "preferred_band", "constraint_value": "C_BAND"}}, + {"custom": {"constraint_type": "optical-band-width[GHz]", "constraint_value": "3900"}} + ], + "service_config": {"config_rules": []} + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/CNIT/5.1ob_c2.json b/src/tests/ofc26_flexscale/topology/CNIT/5.1ob_c2.json new file mode 100644 index 0000000000000000000000000000000000000000..b9398c1066171b80fc49b8c8fdfd31155c2f3593 --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/CNIT/5.1ob_c2.json @@ -0,0 +1,25 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "optical-band-C2"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-1-out"}}, + {"device_id": {"device_uuid": {"uuid": "MGON4"}}, "endpoint_uuid": {"uuid": "port-9-in"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}}, + {"custom": {"constraint_type": "preferred_band", "constraint_value": "C_BAND"}}, + {"custom": {"constraint_type": "optical-band-width[GHz]", "constraint_value": "3900"}}, + {"custom": {"constraint_type": "disjoint_optical_band_id", "constraint_value": "3"}} + + ], + "service_config": {"config_rules": []} + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/CNIT/5.2-sc-c-alien.json b/src/tests/ofc26_flexscale/topology/CNIT/5.2-sc-c-alien.json new file mode 100644 index 0000000000000000000000000000000000000000..831a271d2eaf62c6ab3007055595c5beb6201d0c --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/CNIT/5.2-sc-c-alien.json @@ -0,0 +1,25 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "super-channel-c"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-29-in"}}, + {"device_id": {"device_uuid": {"uuid": "MGON4"}}, "endpoint_uuid": {"uuid": "port-29-out"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "alien", "constraint_value": "1"}}, + {"custom": {"constraint_type": "alien_spectrum[GHz]", "constraint_value": "3000"}}, + {"custom": {"constraint_type": "optical_band_id", "constraint_value": "4"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}} + + ], + "service_config": {"config_rules": []} + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/CNIT/5.3-oc-c-service.json b/src/tests/ofc26_flexscale/topology/CNIT/5.3-oc-c-service.json new file mode 100644 index 0000000000000000000000000000000000000000..47b551a94be5a99392434a79b14f4c84a8a15137 --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/CNIT/5.3-oc-c-service.json @@ -0,0 +1,23 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "optical-channel-C"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "T1.1"}}, "endpoint_uuid": {"uuid": "1"}}, + {"device_id": {"device_uuid": {"uuid": "T2.1"}}, "endpoint_uuid": {"uuid": "1"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "bandwidth[gbps]", "constraint_value": "800.0"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}}, + {"custom": {"constraint_type": "optical_band_id", "constraint_value": "4"}} + ], + "service_config": {"config_rules": []} + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/HHI/1.context.json b/src/tests/ofc26_flexscale/topology/HHI/1.context.json new file mode 100644 index 0000000000000000000000000000000000000000..36b3c44fd61fdec9d208a82a11d5a16c3671d004 --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/HHI/1.context.json @@ -0,0 +1,19 @@ +{ + "contexts": [ + { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "topology_ids": [], + "service_ids": [] + } + ], + "topologies": [ + { + "topology_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "topology_uuid": {"uuid": "admin"} + }, + "device_ids": [], + "link_ids": [] + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/HHI/2.0nodes-links_no_slots_2TPs.json b/src/tests/ofc26_flexscale/topology/HHI/2.0nodes-links_no_slots_2TPs.json new file mode 100644 index 0000000000000000000000000000000000000000..2fa10174412789348dcdba9949d9d6d85a2f552d --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/HHI/2.0nodes-links_no_slots_2TPs.json @@ -0,0 +1,826 @@ +{ + "devices": [ + { + "device_id": { + "device_uuid": { + "uuid": "T1.1" + } + }, + "device_type": "optical-transponder", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.16.31.93" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "830" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "hussein", + "password": "flexscale", + "type": "optical-transponder", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T2.1" + } + }, + "device_type": "optical-transponder", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.16.31.93" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "3830" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "hussein", + "password": "flexscale", + "type": "optical-transponder", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "device_type": "optical-roadm", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.24.36.52" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "44551" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "admin", + "password": "admin", + "type": "optical-roadm", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON2" + } + }, + "device_type": "optical-roadm", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.24.36.52" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "44552" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "admin", + "password": "admin", + "type": "optical-roadm", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON3" + } + }, + "device_type": "optical-roadm", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.24.36.52" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "44553" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "admin", + "password": "admin", + "type": "optical-roadm", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "device_type": "optical-roadm", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.24.36.52" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "44554" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "admin", + "password": "admin", + "type": "optical-roadm", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + } + ], + "optical_links": [ + { + "name": "T1.1-MGON1", + "link_id": { + "link_uuid": { + "uuid": "T1.1->MGON1" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "T1.1" + } + }, + "endpoint_uuid": { + "uuid": "1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-33-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "1", + "dst_port": "port-33-in", + "local_peer_port": "1", + "remote_peer_port": "port-33-out", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON1-T1.1", + "link_id": { + "link_uuid": { + "uuid": "MGON1->T1.1" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-33-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T1.1" + } + }, + "endpoint_uuid": { + "uuid": "1" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-33-out", + "dst_port": "1", + "local_peer_port": "port-33-in", + "remote_peer_port": "1", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON1-MGON2", + "link_id": { + "link_uuid": { + "uuid": "MGON1->MGON2" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-9-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON2" + } + }, + "endpoint_uuid": { + "uuid": "port-3-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-9-out", + "dst_port": "port-3-in", + "local_peer_port": "port-9-in", + "remote_peer_port": "port-3-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON2-MGON1", + "link_id": { + "link_uuid": { + "uuid": "MGON2->MGON1" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON2" + } + }, + "endpoint_uuid": { + "uuid": "port-3-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-9-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-3-out", + "dst_port": "port-9-in", + "local_peer_port": "port-3-in", + "remote_peer_port": "port-9-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON1-MGON3", + "link_id": { + "link_uuid": { + "uuid": "MGON1->MGON3" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-10-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON3" + } + }, + "endpoint_uuid": { + "uuid": "port-3-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-10-out", + "dst_port": "port-3-in", + "local_peer_port": "port-10-in", + "remote_peer_port": "port-3-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON3-MGON1", + "link_id": { + "link_uuid": { + "uuid": "MGON3->MGON1" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON3" + } + }, + "endpoint_uuid": { + "uuid": "port-3-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-1-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-3-out", + "dst_port": "port-10-in", + "local_peer_port": "port-3-in", + "remote_peer_port": "port-10-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON2-MGON4", + "link_id": { + "link_uuid": { + "uuid": "MGON2->MGON4" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON2" + } + }, + "endpoint_uuid": { + "uuid": "port-1-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-1-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-1-out", + "dst_port": "port-1-in", + "local_peer_port": "port-1-in", + "remote_peer_port": "port-1-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON4-MGON2", + "link_id": { + "link_uuid": { + "uuid": "MGON4->MGON2" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-1-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON2" + } + }, + "endpoint_uuid": { + "uuid": "port-1-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-1-out", + "dst_port": "port-1-in", + "local_peer_port": "port-1-in", + "remote_peer_port": "port-1-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON3-MGON4", + "link_id": { + "link_uuid": { + "uuid": "MGON3->MGON4" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON3" + } + }, + "endpoint_uuid": { + "uuid": "port-1-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-2-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-1-out", + "dst_port": "port-2-in", + "local_peer_port": "port-1-in", + "remote_peer_port": "port-2-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON4-MGON3", + "link_id": { + "link_uuid": { + "uuid": "MGON4->MGON3" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-2-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON3" + } + }, + "endpoint_uuid": { + "uuid": "port-1-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-2-out", + "dst_port": "port-1-in", + "local_peer_port": "port-2-in", + "remote_peer_port": "port-1-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "T2.1-MGON4", + "link_id": { + "link_uuid": { + "uuid": "T2.1->MGON4" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "T2.1" + } + }, + "endpoint_uuid": { + "uuid": "1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-33-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "1", + "dst_port": "port-33-in", + "local_peer_port": "1", + "remote_peer_port": "port-33-out", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON4-T2.1", + "link_id": { + "link_uuid": { + "uuid": "MGON4->T2.1" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-33-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T2.1" + } + }, + "endpoint_uuid": { + "uuid": "1" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-33-out", + "dst_port": "1", + "local_peer_port": "port-33-in", + "remote_peer_port": "1", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/HHI/2.0nodes-links_no_slots_6TPs.json b/src/tests/ofc26_flexscale/topology/HHI/2.0nodes-links_no_slots_6TPs.json new file mode 100644 index 0000000000000000000000000000000000000000..a4dea6ae3349d09a56966edbf0ecb64e99f56dc0 --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/HHI/2.0nodes-links_no_slots_6TPs.json @@ -0,0 +1,1378 @@ +{ + "devices": [ + { + "device_id": { + "device_uuid": { + "uuid": "T1.1" + } + }, + "device_type": "optical-transponder", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.16.31.93" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "830" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "hussein", + "password": "flexscale", + "type": "optical-transponder", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T1.2" + } + }, + "device_type": "optical-transponder", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.16.31.93" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "830" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "hussein", + "password": "flexscale", + "type": "optical-transponder", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T1.3" + } + }, + "device_type": "optical-transponder", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.16.31.93" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "830" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "hussein", + "password": "flexscale", + "type": "optical-transponder", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T2.1" + } + }, + "device_type": "optical-transponder", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.16.31.93" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "3830" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "hussein", + "password": "flexscale", + "type": "optical-transponder", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T2.2" + } + }, + "device_type": "optical-transponder", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.16.31.93" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "3830" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "hussein", + "password": "flexscale", + "type": "optical-transponder", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T2.3" + } + }, + "device_type": "optical-transponder", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.16.31.93" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "3830" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "hussein", + "password": "flexscale", + "type": "optical-transponder", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "device_type": "optical-roadm", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.24.36.52" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "44551" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "admin", + "password": "admin", + "type": "optical-roadm", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON2" + } + }, + "device_type": "optical-roadm", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.24.36.52" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "44552" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "admin", + "password": "admin", + "type": "optical-roadm", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON3" + } + }, + "device_type": "optical-roadm", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.24.36.52" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "44553" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "admin", + "password": "admin", + "type": "optical-roadm", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "device_type": "optical-roadm", + "device_drivers": [ + 11 + ], + "device_endpoints": [ + ], + "device_operational_status": 1, + "device_config": { + "config_rules": [ + { + "action": 1, + "custom": { + "resource_key": "_connect/address", + "resource_value": "172.24.36.52" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/port", + "resource_value": "44554" + } + }, + { + "action": 1, + "custom": { + "resource_key": "_connect/settings", + "resource_value": { + "username": "admin", + "password": "admin", + "type": "optical-roadm", + "force_running": false, + "hostkey_verify": false, + "look_for_keys": false, + "allow_agent": false, + "commit_per_rule": false, + "device_params": { + "name": "default" + }, + "manager_params": { + "timeout": 120 + }, + "endpoints": [ + ] + } + } + } + ] + } + } + ], + "optical_links": [ + { + "name": "T1.1-MGON1", + "link_id": { + "link_uuid": { + "uuid": "T1.1->MGON1" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "T1.1" + } + }, + "endpoint_uuid": { + "uuid": "1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-33-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "1", + "dst_port": "port-33-in", + "local_peer_port": "1", + "remote_peer_port": "port-33-out", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "T1.2-MGON1", + "link_id": { + "link_uuid": { + "uuid": "T1.2->MGON1" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "T1.2" + } + }, + "endpoint_uuid": { + "uuid": "2" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-34-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "2", + "dst_port": "port-34-in", + "local_peer_port": "2", + "remote_peer_port": "port-34-out", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "T1.3-MGON1", + "link_id": { + "link_uuid": { + "uuid": "T1.3->MGON1" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "T1.3" + } + }, + "endpoint_uuid": { + "uuid": "3" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-35-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "3", + "dst_port": "port-35-in", + "local_peer_port": "3", + "remote_peer_port": "port-35-out", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON1-T1.1", + "link_id": { + "link_uuid": { + "uuid": "MGON1->T1.1" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-33-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T1.1" + } + }, + "endpoint_uuid": { + "uuid": "1" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-33-out", + "dst_port": "1", + "local_peer_port": "port-33-in", + "remote_peer_port": "1", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON1-T1.2", + "link_id": { + "link_uuid": { + "uuid": "MGON1->T1.2" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-34-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T1.2" + } + }, + "endpoint_uuid": { + "uuid": "2" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-34-out", + "dst_port": "2", + "local_peer_port": "port-34-in", + "remote_peer_port": "2", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON1-T1.3", + "link_id": { + "link_uuid": { + "uuid": "MGON1->T1.3" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-35-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T1.3" + } + }, + "endpoint_uuid": { + "uuid": "3" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-35-out", + "dst_port": "3", + "local_peer_port": "port-35-in", + "remote_peer_port": "3", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON1-MGON2", + "link_id": { + "link_uuid": { + "uuid": "MGON1->MGON2" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-9-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON2" + } + }, + "endpoint_uuid": { + "uuid": "port-3-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-9-out", + "dst_port": "port-3-in", + "local_peer_port": "port-9-in", + "remote_peer_port": "port-3-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON2-MGON1", + "link_id": { + "link_uuid": { + "uuid": "MGON2->MGON1" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON2" + } + }, + "endpoint_uuid": { + "uuid": "port-3-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-9-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-3-out", + "dst_port": "port-9-in", + "local_peer_port": "port-3-in", + "remote_peer_port": "port-9-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON1-MGON3", + "link_id": { + "link_uuid": { + "uuid": "MGON1->MGON3" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-10-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON3" + } + }, + "endpoint_uuid": { + "uuid": "port-3-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-10-out", + "dst_port": "port-3-in", + "local_peer_port": "port-10-in", + "remote_peer_port": "port-3-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON3-MGON1", + "link_id": { + "link_uuid": { + "uuid": "MGON3->MGON1" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON3" + } + }, + "endpoint_uuid": { + "uuid": "port-3-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON1" + } + }, + "endpoint_uuid": { + "uuid": "port-1-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-3-out", + "dst_port": "port-10-in", + "local_peer_port": "port-3-in", + "remote_peer_port": "port-10-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON2-MGON4", + "link_id": { + "link_uuid": { + "uuid": "MGON2->MGON4" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON2" + } + }, + "endpoint_uuid": { + "uuid": "port-1-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-1-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-1-out", + "dst_port": "port-1-in", + "local_peer_port": "port-1-in", + "remote_peer_port": "port-1-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON4-MGON2", + "link_id": { + "link_uuid": { + "uuid": "MGON4->MGON2" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-1-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON2" + } + }, + "endpoint_uuid": { + "uuid": "port-1-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-1-out", + "dst_port": "port-1-in", + "local_peer_port": "port-1-in", + "remote_peer_port": "port-1-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON3-MGON4", + "link_id": { + "link_uuid": { + "uuid": "MGON3->MGON4" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON3" + } + }, + "endpoint_uuid": { + "uuid": "port-1-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-2-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-1-out", + "dst_port": "port-2-in", + "local_peer_port": "port-1-in", + "remote_peer_port": "port-2-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON4-MGON3", + "link_id": { + "link_uuid": { + "uuid": "MGON4->MGON3" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-2-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON3" + } + }, + "endpoint_uuid": { + "uuid": "port-1-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-2-out", + "dst_port": "port-1-in", + "local_peer_port": "port-2-in", + "remote_peer_port": "port-1-out", + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "T2.1-MGON4", + "link_id": { + "link_uuid": { + "uuid": "T2.1->MGON4" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "T2.1" + } + }, + "endpoint_uuid": { + "uuid": "1" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-33-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "1", + "dst_port": "port-33-in", + "local_peer_port": "1", + "remote_peer_port": "port-33-out", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "T2.2-MGON4", + "link_id": { + "link_uuid": { + "uuid": "T2.2->MGON4" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "T2.2" + } + }, + "endpoint_uuid": { + "uuid": "2" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-34-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "2", + "dst_port": "port-34-in", + "local_peer_port": "2", + "remote_peer_port": "port-34-out", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "T2.3-MGON4", + "link_id": { + "link_uuid": { + "uuid": "T2.3->MGON4" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "T2.3" + } + }, + "endpoint_uuid": { + "uuid": "3" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-35-in" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "3", + "dst_port": "port-35-in", + "local_peer_port": "3", + "remote_peer_port": "port-35-out", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON4-T2.1", + "link_id": { + "link_uuid": { + "uuid": "MGON4->T2.1" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-33-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T2.1" + } + }, + "endpoint_uuid": { + "uuid": "1" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-33-out", + "dst_port": "1", + "local_peer_port": "port-33-in", + "remote_peer_port": "1", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON4-T2.2", + "link_id": { + "link_uuid": { + "uuid": "MGON4->T2.2" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-34-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T2.2" + } + }, + "endpoint_uuid": { + "uuid": "2" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-34-out", + "dst_port": "2", + "local_peer_port": "port-34-in", + "remote_peer_port": "2", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + }, + { + "name": "MGON4-T2.3", + "link_id": { + "link_uuid": { + "uuid": "MGON4->T2.3" + } + }, + "link_endpoint_ids": [ + { + "device_id": { + "device_uuid": { + "uuid": "MGON4" + } + }, + "endpoint_uuid": { + "uuid": "port-35-out" + } + }, + { + "device_id": { + "device_uuid": { + "uuid": "T2.3" + } + }, + "endpoint_uuid": { + "uuid": "3" + } + } + ], + "optical_details": { + "length": 0, + "src_port": "port-35-out", + "dst_port": "3", + "local_peer_port": "port-35-in", + "remote_peer_port": "3", + "used": false, + "c_slots": {"1": 1}, + "l_slots": {"101": 1}, + "s_slots": {"501": 1} + } + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/HHI/3.0-ob-s.json b/src/tests/ofc26_flexscale/topology/HHI/3.0-ob-s.json new file mode 100644 index 0000000000000000000000000000000000000000..d3fa89a1f3c86cbb66a02200bfa25e977d36cefc --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/HHI/3.0-ob-s.json @@ -0,0 +1,23 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "optical-band-S"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-9-out"}}, + {"device_id": {"device_uuid": {"uuid": "MGON4"}}, "endpoint_uuid": {"uuid": "port-1-in"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}}, + {"custom": {"constraint_type": "preferred_band", "constraint_value": "S_BAND"}}, + {"custom": {"constraint_type": "optical-band-width[GHz]", "constraint_value": "6000"}} + ], + "service_config": {"config_rules": []} + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/HHI/3.1-sc-s1-alien.json b/src/tests/ofc26_flexscale/topology/HHI/3.1-sc-s1-alien.json new file mode 100644 index 0000000000000000000000000000000000000000..37969cfb80b5c19681ab6a079ac979786dd94943 --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/HHI/3.1-sc-s1-alien.json @@ -0,0 +1,25 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "super-channel-s1"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-25-in"}}, + {"device_id": {"device_uuid": {"uuid": "MGON4"}}, "endpoint_uuid": {"uuid": "port-25-out"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "alien", "constraint_value": "1"}}, + {"custom": {"constraint_type": "alien_spectrum[GHz]", "constraint_value": "3000"}}, + {"custom": {"constraint_type": "optical_band_id", "constraint_value": "3"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}} + + ], + "service_config": {"config_rules": []} + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/HHI/3.2-sc-s2-alien.json b/src/tests/ofc26_flexscale/topology/HHI/3.2-sc-s2-alien.json new file mode 100644 index 0000000000000000000000000000000000000000..e4d620d4762fa0b7342726eb41b1e91ea757b74d --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/HHI/3.2-sc-s2-alien.json @@ -0,0 +1,25 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "super-channel-s2"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-26-in"}}, + {"device_id": {"device_uuid": {"uuid": "MGON4"}}, "endpoint_uuid": {"uuid": "port-26-out"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "alien", "constraint_value": "1"}}, + {"custom": {"constraint_type": "alien_spectrum[GHz]", "constraint_value": "2900"}}, + {"custom": {"constraint_type": "optical_band_id", "constraint_value": "3"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}} + + ], + "service_config": {"config_rules": []} + } + ] +} \ No newline at end of file diff --git a/src/tests/ofc26_flexscale/topology/HHI/4.0.ob_l.json b/src/tests/ofc26_flexscale/topology/HHI/4.0.ob_l.json new file mode 100644 index 0000000000000000000000000000000000000000..c63243f328aa21924d828f6ec5e9bf613899adf7 --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/HHI/4.0.ob_l.json @@ -0,0 +1,25 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "optical-band-L"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-1-out"}}, + {"device_id": {"device_uuid": {"uuid": "MGON4"}}, "endpoint_uuid": {"uuid": "port-9-in"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}}, + {"custom": {"constraint_type": "preferred_band", "constraint_value": "L_BAND"}}, + {"custom": {"constraint_type": "optical-band-width[GHz]", "constraint_value": "3000"}}, + {"custom": {"constraint_type": "disjoint_optical_band_id", "constraint_value": "3"}} + + ], + "service_config": {"config_rules": []} + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/HHI/4.1-sc-l1-alien.json b/src/tests/ofc26_flexscale/topology/HHI/4.1-sc-l1-alien.json new file mode 100644 index 0000000000000000000000000000000000000000..3d24a34e30c9f40d113ca3b7486f6a9ef25349e5 --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/HHI/4.1-sc-l1-alien.json @@ -0,0 +1,25 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "super-channel-l1"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-27-in"}}, + {"device_id": {"device_uuid": {"uuid": "MGON4"}}, "endpoint_uuid": {"uuid": "port-27-out"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "alien", "constraint_value": "1"}}, + {"custom": {"constraint_type": "alien_spectrum[GHz]", "constraint_value": "1500"}}, + {"custom": {"constraint_type": "optical_band_id", "constraint_value": "4"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}} + + ], + "service_config": {"config_rules": []} + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/HHI/4.2-sc-l2-alien.json b/src/tests/ofc26_flexscale/topology/HHI/4.2-sc-l2-alien.json new file mode 100644 index 0000000000000000000000000000000000000000..8210c786ea4b8fd2b9acd13a7bcc1c8bbe341a5c --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/HHI/4.2-sc-l2-alien.json @@ -0,0 +1,25 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "super-channel-l2"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-28-in"}}, + {"device_id": {"device_uuid": {"uuid": "MGON4"}}, "endpoint_uuid": {"uuid": "port-28-out"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "alien", "constraint_value": "1"}}, + {"custom": {"constraint_type": "alien_spectrum[GHz]", "constraint_value": "1200"}}, + {"custom": {"constraint_type": "optical_band_id", "constraint_value": "4"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}} + + ], + "service_config": {"config_rules": []} + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/HHI/5.0ob_c1.json b/src/tests/ofc26_flexscale/topology/HHI/5.0ob_c1.json new file mode 100644 index 0000000000000000000000000000000000000000..ab56e9daaf723f5580bd045131264534d77fa34e --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/HHI/5.0ob_c1.json @@ -0,0 +1,23 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "optical-band-C1"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-9-out"}}, + {"device_id": {"device_uuid": {"uuid": "MGON4"}}, "endpoint_uuid": {"uuid": "port-1-in"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}}, + {"custom": {"constraint_type": "preferred_band", "constraint_value": "C_BAND"}}, + {"custom": {"constraint_type": "optical-band-width[GHz]", "constraint_value": "3900"}} + ], + "service_config": {"config_rules": []} + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/HHI/5.1ob_c2 - Copia.json b/src/tests/ofc26_flexscale/topology/HHI/5.1ob_c2 - Copia.json new file mode 100644 index 0000000000000000000000000000000000000000..d34f10d8590a3e7e4e35a2e3e63cb0e7397796a4 --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/HHI/5.1ob_c2 - Copia.json @@ -0,0 +1,25 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "optical-band-C2"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-1-out"}}, + {"device_id": {"device_uuid": {"uuid": "MGON4"}}, "endpoint_uuid": {"uuid": "port-9-in"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}}, + {"custom": {"constraint_type": "preferred_band", "constraint_value": "C_BAND"}}, + {"custom": {"constraint_type": "optical-band-width[GHz]", "constraint_value": "1900"}}, + {"custom": {"constraint_type": "disjoint_optical_band_id", "constraint_value": "1"}} + + ], + "service_config": {"config_rules": []} + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/HHI/5.1ob_c2.json b/src/tests/ofc26_flexscale/topology/HHI/5.1ob_c2.json new file mode 100644 index 0000000000000000000000000000000000000000..31229c1dcb3828517a2d648238cb0364037d8280 --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/HHI/5.1ob_c2.json @@ -0,0 +1,25 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "optical-band-C2"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-1-out"}}, + {"device_id": {"device_uuid": {"uuid": "MGON4"}}, "endpoint_uuid": {"uuid": "port-9-in"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}}, + {"custom": {"constraint_type": "preferred_band", "constraint_value": "C_BAND"}}, + {"custom": {"constraint_type": "optical-band-width[GHz]", "constraint_value": "3900"}}, + {"custom": {"constraint_type": "disjoint_optical_band_id", "constraint_value": "1"}} + + ], + "service_config": {"config_rules": []} + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/HHI/5.2-sc-c-alien.json b/src/tests/ofc26_flexscale/topology/HHI/5.2-sc-c-alien.json new file mode 100644 index 0000000000000000000000000000000000000000..8411862c0b3c3f82b65b9b73bab7919dbe19ecf1 --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/HHI/5.2-sc-c-alien.json @@ -0,0 +1,25 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "super-channel-c"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "MGON1"}}, "endpoint_uuid": {"uuid": "port-29-in"}}, + {"device_id": {"device_uuid": {"uuid": "MGON4"}}, "endpoint_uuid": {"uuid": "port-29-out"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "alien", "constraint_value": "1"}}, + {"custom": {"constraint_type": "alien_spectrum[GHz]", "constraint_value": "1959"}}, + {"custom": {"constraint_type": "optical_band_id", "constraint_value": "2"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}} + + ], + "service_config": {"config_rules": []} + } + ] +} diff --git a/src/tests/ofc26_flexscale/topology/HHI/5.3-oc-c-service.json b/src/tests/ofc26_flexscale/topology/HHI/5.3-oc-c-service.json new file mode 100644 index 0000000000000000000000000000000000000000..169b854c20adc1e30d4c0fbbbf5a06cf26089790 --- /dev/null +++ b/src/tests/ofc26_flexscale/topology/HHI/5.3-oc-c-service.json @@ -0,0 +1,23 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "optical-channel-C"} + }, + "service_type": 6, + "service_status": {"service_status": 1}, + "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "T1.1"}}, "endpoint_uuid": {"uuid": "1"}}, + {"device_id": {"device_uuid": {"uuid": "T2.1"}}, "endpoint_uuid": {"uuid": "1"}} + ], + "service_constraints": [ + {"custom": {"constraint_type": "type", "constraint_value": "multi_granular"}}, + {"custom": {"constraint_type": "bandwidth[gbps]", "constraint_value": "800.0"}}, + {"custom": {"constraint_type": "bidirectionality", "constraint_value": "0"}}, + {"custom": {"constraint_type": "optical_band_id", "constraint_value": "2"}} + ], + "service_config": {"config_rules": []} + } + ] +} diff --git a/src/ztp/target/generated-sources/grpc/kpi_sample_types/KpiSampleTypes.java b/src/ztp/target/generated-sources/grpc/kpi_sample_types/KpiSampleTypes.java index 0c98ddbb4625c12c345226ec5654e475188619f9..3f4a7a2a6d81affb2837cc465268d9085a134466 100644 --- a/src/ztp/target/generated-sources/grpc/kpi_sample_types/KpiSampleTypes.java +++ b/src/ztp/target/generated-sources/grpc/kpi_sample_types/KpiSampleTypes.java @@ -71,6 +71,14 @@ public final class KpiSampleTypes { * KPISAMPLETYPE_OPTICAL_SECURITY_STATUS = 501; */ KPISAMPLETYPE_OPTICAL_SECURITY_STATUS(501), + /** + * KPISAMPLETYPE_OPTICAL_POWER_TOTAL_INPUT = 502; + */ + KPISAMPLETYPE_OPTICAL_POWER_TOTAL_INPUT(502), + /** + * KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER = 503; + */ + KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER(503), /** * KPISAMPLETYPE_L3_UNIQUE_ATTACK_CONNS = 601; */ @@ -280,6 +288,16 @@ public final class KpiSampleTypes { */ public static final int KPISAMPLETYPE_OPTICAL_SECURITY_STATUS_VALUE = 501; + /** + * KPISAMPLETYPE_OPTICAL_POWER_TOTAL_INPUT = 502; + */ + public static final int KPISAMPLETYPE_OPTICAL_POWER_TOTAL_INPUT_VALUE = 502; + + /** + * KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER = 503; + */ + public static final int KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER_VALUE = 503; + /** * KPISAMPLETYPE_L3_UNIQUE_ATTACK_CONNS = 601; */ @@ -503,6 +521,10 @@ public final class KpiSampleTypes { return KPISAMPLETYPE_ML_CONFIDENCE; case 501: return KPISAMPLETYPE_OPTICAL_SECURITY_STATUS; + case 502: + return KPISAMPLETYPE_OPTICAL_POWER_TOTAL_INPUT; + case 503: + return KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER; case 601: return KPISAMPLETYPE_L3_UNIQUE_ATTACK_CONNS; case 602: @@ -628,7 +650,7 @@ public final class KpiSampleTypes { private static com.google.protobuf.Descriptors.FileDescriptor descriptor; static { - java.lang.String[] descriptorData = { "\n\026kpi_sample_types.proto\022\020kpi_sample_typ" + "es*\346\r\n\rKpiSampleType\022\031\n\025KPISAMPLETYPE_UN" + "KNOWN\020\000\022%\n!KPISAMPLETYPE_PACKETS_TRANSMI" + "TTED\020e\022\"\n\036KPISAMPLETYPE_PACKETS_RECEIVED" + "\020f\022!\n\035KPISAMPLETYPE_PACKETS_DROPPED\020g\022$\n" + "\037KPISAMPLETYPE_BYTES_TRANSMITTED\020\311\001\022!\n\034K" + "PISAMPLETYPE_BYTES_RECEIVED\020\312\001\022 \n\033KPISAM" + "PLETYPE_BYTES_DROPPED\020\313\001\022+\n&KPISAMPLETYP" + "E_LINK_TOTAL_CAPACITY_GBPS\020\255\002\022*\n%KPISAMP" + "LETYPE_LINK_USED_CAPACITY_GBPS\020\256\002\022 \n\033KPI" + "SAMPLETYPE_ML_CONFIDENCE\020\221\003\022*\n%KPISAMPLE" + "TYPE_OPTICAL_SECURITY_STATUS\020\365\003\022)\n$KPISA" + "MPLETYPE_L3_UNIQUE_ATTACK_CONNS\020\331\004\022*\n%KP" + "ISAMPLETYPE_L3_TOTAL_DROPPED_PACKTS\020\332\004\022&" + "\n!KPISAMPLETYPE_L3_UNIQUE_ATTACKERS\020\333\004\0220" + "\n+KPISAMPLETYPE_L3_UNIQUE_COMPROMISED_CL" + "IENTS\020\334\004\022,\n\'KPISAMPLETYPE_L3_SECURITY_ST" + "ATUS_CRYPTO\020\335\004\022%\n KPISAMPLETYPE_SERVICE_" + "LATENCY_MS\020\275\005\0221\n,KPISAMPLETYPE_PACKETS_T" + "RANSMITTED_AGG_OUTPUT\020\315\010\022.\n)KPISAMPLETYP" + "E_PACKETS_RECEIVED_AGG_OUTPUT\020\316\010\022-\n(KPIS" + "AMPLETYPE_PACKETS_DROPPED_AGG_OUTPUT\020\317\010\022" + "/\n*KPISAMPLETYPE_BYTES_TRANSMITTED_AGG_O" + "UTPUT\020\261\t\022,\n\'KPISAMPLETYPE_BYTES_RECEIVED" + "_AGG_OUTPUT\020\262\t\022+\n&KPISAMPLETYPE_BYTES_DR" + "OPPED_AGG_OUTPUT\020\263\t\0220\n+KPISAMPLETYPE_SER" + "VICE_LATENCY_MS_AGG_OUTPUT\020\245\r\022\036\n\031KPISAMP" + "LETYPE_INT_SEQ_NUM\020\321\017\022\035\n\030KPISAMPLETYPE_I" + "NT_TS_ING\020\322\017\022\035\n\030KPISAMPLETYPE_INT_TS_EGR" + "\020\323\017\022\036\n\031KPISAMPLETYPE_INT_HOP_LAT\020\324\017\022\"\n\035K" + "PISAMPLETYPE_INT_PORT_ID_ING\020\325\017\022\"\n\035KPISA" + "MPLETYPE_INT_PORT_ID_EGR\020\326\017\022\"\n\035KPISAMPLE" + "TYPE_INT_QUEUE_OCCUP\020\327\017\022\037\n\032KPISAMPLETYPE" + "_INT_QUEUE_ID\020\330\017\022#\n\036KPISAMPLETYPE_INT_HO" + "P_LAT_SW01\020\265\020\022#\n\036KPISAMPLETYPE_INT_HOP_L" + "AT_SW02\020\266\020\022#\n\036KPISAMPLETYPE_INT_HOP_LAT_" + "SW03\020\267\020\022#\n\036KPISAMPLETYPE_INT_HOP_LAT_SW0" + "4\020\270\020\022#\n\036KPISAMPLETYPE_INT_HOP_LAT_SW05\020\271" + "\020\022#\n\036KPISAMPLETYPE_INT_HOP_LAT_SW06\020\272\020\022#" + "\n\036KPISAMPLETYPE_INT_HOP_LAT_SW07\020\273\020\022#\n\036K" + "PISAMPLETYPE_INT_HOP_LAT_SW08\020\274\020\022#\n\036KPIS" + "AMPLETYPE_INT_HOP_LAT_SW09\020\275\020\022#\n\036KPISAMP" + "LETYPE_INT_HOP_LAT_SW10\020\276\020\022#\n\036KPISAMPLET" + "YPE_INT_LAT_ON_TOTAL\020\310\020\022\036\n\031KPISAMPLETYPE" + "_INT_IS_DROP\020\231\021\022\"\n\035KPISAMPLETYPE_INT_DRO" + "P_REASON\020\232\021b\006proto3" }; + java.lang.String[] descriptorData = { "\n\026kpi_sample_types.proto\022\020kpi_sample_typ" + "es*\302\016\n\rKpiSampleType\022\031\n\025KPISAMPLETYPE_UN" + "KNOWN\020\000\022%\n!KPISAMPLETYPE_PACKETS_TRANSMI" + "TTED\020e\022\"\n\036KPISAMPLETYPE_PACKETS_RECEIVED" + "\020f\022!\n\035KPISAMPLETYPE_PACKETS_DROPPED\020g\022$\n" + "\037KPISAMPLETYPE_BYTES_TRANSMITTED\020\311\001\022!\n\034K" + "PISAMPLETYPE_BYTES_RECEIVED\020\312\001\022 \n\033KPISAM" + "PLETYPE_BYTES_DROPPED\020\313\001\022+\n&KPISAMPLETYP" + "E_LINK_TOTAL_CAPACITY_GBPS\020\255\002\022*\n%KPISAMP" + "LETYPE_LINK_USED_CAPACITY_GBPS\020\256\002\022 \n\033KPI" + "SAMPLETYPE_ML_CONFIDENCE\020\221\003\022*\n%KPISAMPLE" + "TYPE_OPTICAL_SECURITY_STATUS\020\365\003\022,\n\'KPISA" + "MPLETYPE_OPTICAL_POWER_TOTAL_INPUT\020\366\003\022,\n" + "\'KPISAMPLETYPE_OPTICAL_TOTAL_INPUT_POWER" + "\020\367\003\022)\n$KPISAMPLETYPE_L3_UNIQUE_ATTACK_CO" + "NNS\020\331\004\022*\n%KPISAMPLETYPE_L3_TOTAL_DROPPED" + "_PACKTS\020\332\004\022&\n!KPISAMPLETYPE_L3_UNIQUE_AT" + "TACKERS\020\333\004\0220\n+KPISAMPLETYPE_L3_UNIQUE_CO" + "MPROMISED_CLIENTS\020\334\004\022,\n\'KPISAMPLETYPE_L3" + "_SECURITY_STATUS_CRYPTO\020\335\004\022%\n KPISAMPLET" + "YPE_SERVICE_LATENCY_MS\020\275\005\0221\n,KPISAMPLETY" + "PE_PACKETS_TRANSMITTED_AGG_OUTPUT\020\315\010\022.\n)" + "KPISAMPLETYPE_PACKETS_RECEIVED_AGG_OUTPU" + "T\020\316\010\022-\n(KPISAMPLETYPE_PACKETS_DROPPED_AG" + "G_OUTPUT\020\317\010\022/\n*KPISAMPLETYPE_BYTES_TRANS" + "MITTED_AGG_OUTPUT\020\261\t\022,\n\'KPISAMPLETYPE_BY" + "TES_RECEIVED_AGG_OUTPUT\020\262\t\022+\n&KPISAMPLET" + "YPE_BYTES_DROPPED_AGG_OUTPUT\020\263\t\0220\n+KPISA" + "MPLETYPE_SERVICE_LATENCY_MS_AGG_OUTPUT\020\245" + "\r\022\036\n\031KPISAMPLETYPE_INT_SEQ_NUM\020\321\017\022\035\n\030KPI" + "SAMPLETYPE_INT_TS_ING\020\322\017\022\035\n\030KPISAMPLETYP" + "E_INT_TS_EGR\020\323\017\022\036\n\031KPISAMPLETYPE_INT_HOP" + "_LAT\020\324\017\022\"\n\035KPISAMPLETYPE_INT_PORT_ID_ING" + "\020\325\017\022\"\n\035KPISAMPLETYPE_INT_PORT_ID_EGR\020\326\017\022" + "\"\n\035KPISAMPLETYPE_INT_QUEUE_OCCUP\020\327\017\022\037\n\032K" + "PISAMPLETYPE_INT_QUEUE_ID\020\330\017\022#\n\036KPISAMPL" + "ETYPE_INT_HOP_LAT_SW01\020\265\020\022#\n\036KPISAMPLETY" + "PE_INT_HOP_LAT_SW02\020\266\020\022#\n\036KPISAMPLETYPE_" + "INT_HOP_LAT_SW03\020\267\020\022#\n\036KPISAMPLETYPE_INT" + "_HOP_LAT_SW04\020\270\020\022#\n\036KPISAMPLETYPE_INT_HO" + "P_LAT_SW05\020\271\020\022#\n\036KPISAMPLETYPE_INT_HOP_L" + "AT_SW06\020\272\020\022#\n\036KPISAMPLETYPE_INT_HOP_LAT_" + "SW07\020\273\020\022#\n\036KPISAMPLETYPE_INT_HOP_LAT_SW0" + "8\020\274\020\022#\n\036KPISAMPLETYPE_INT_HOP_LAT_SW09\020\275" + "\020\022#\n\036KPISAMPLETYPE_INT_HOP_LAT_SW10\020\276\020\022#" + "\n\036KPISAMPLETYPE_INT_LAT_ON_TOTAL\020\310\020\022\036\n\031K" + "PISAMPLETYPE_INT_IS_DROP\020\231\021\022\"\n\035KPISAMPLE" + "TYPE_INT_DROP_REASON\020\232\021b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom(descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] {}); } // @@protoc_insertion_point(outer_class_scope) diff --git a/src/ztp/target/kubernetes/kubernetes.yml b/src/ztp/target/kubernetes/kubernetes.yml index 9d2196ea66f9b8814517c60113a664dc6c925886..da7022906678b532e80f901533f1b0bf7caa67d9 100644 --- a/src/ztp/target/kubernetes/kubernetes.yml +++ b/src/ztp/target/kubernetes/kubernetes.yml @@ -3,8 +3,8 @@ apiVersion: v1 kind: Service metadata: annotations: - app.quarkus.io/commit-id: 9e3e0ebd57f108eb7c0e1946bfc122dfd1a3180e - app.quarkus.io/build-timestamp: 2026-02-24 - 06:37:09 +0000 + app.quarkus.io/commit-id: 81b25ce03beb7463bc8bb5fcecf6a2cf4f64ddc4 + app.quarkus.io/build-timestamp: 2026-03-30 - 08:41:09 +0000 prometheus.io/scrape: "true" prometheus.io/path: /q/metrics prometheus.io/port: "8080" @@ -16,10 +16,6 @@ metadata: name: ztp spec: ports: - - name: https - port: 443 - protocol: TCP - targetPort: 8443 - name: http port: 80 protocol: TCP @@ -28,6 +24,10 @@ spec: port: 9000 protocol: TCP targetPort: 9000 + - name: https + port: 443 + protocol: TCP + targetPort: 8443 selector: app.kubernetes.io/name: ztp app.kubernetes.io/version: 0.2.0 @@ -37,8 +37,8 @@ apiVersion: apps/v1 kind: Deployment metadata: annotations: - app.quarkus.io/commit-id: 9e3e0ebd57f108eb7c0e1946bfc122dfd1a3180e - app.quarkus.io/build-timestamp: 2026-02-24 - 06:37:09 +0000 + app.quarkus.io/commit-id: 81b25ce03beb7463bc8bb5fcecf6a2cf4f64ddc4 + app.quarkus.io/build-timestamp: 2026-03-30 - 08:41:09 +0000 prometheus.io/scrape: "true" prometheus.io/path: /q/metrics prometheus.io/port: "8080" @@ -57,8 +57,8 @@ spec: template: metadata: annotations: - app.quarkus.io/commit-id: 9e3e0ebd57f108eb7c0e1946bfc122dfd1a3180e - app.quarkus.io/build-timestamp: 2026-02-24 - 06:37:09 +0000 + app.quarkus.io/commit-id: 81b25ce03beb7463bc8bb5fcecf6a2cf4f64ddc4 + app.quarkus.io/build-timestamp: 2026-03-30 - 08:41:09 +0000 prometheus.io/scrape: "true" prometheus.io/path: /q/metrics prometheus.io/port: "8080" @@ -74,7 +74,7 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace - image: ubuntu/ztp:0.2.0 + image: tfs/ztp:0.2.0 imagePullPolicy: Always livenessProbe: failureThreshold: 3 @@ -88,15 +88,15 @@ spec: timeoutSeconds: 10 name: ztp ports: - - containerPort: 8443 - name: https - protocol: TCP - containerPort: 8080 name: http protocol: TCP - containerPort: 9000 name: grpc protocol: TCP + - containerPort: 8443 + name: https + protocol: TCP readinessProbe: failureThreshold: 3 httpGet: