diff --git a/proto/automation.proto b/proto/automation.proto index ccd17124956f51e15ae517d736a33fdfec226bbc..80ca0b7b9383ed2a248c04abdcb95e1bb24e29bf 100644 --- a/proto/automation.proto +++ b/proto/automation.proto @@ -17,6 +17,7 @@ package automation; import "context.proto"; import "policy.proto"; +import "analytics_frontend.proto"; // Automation service RPCs service AutomationService { @@ -37,9 +38,16 @@ enum ZSMServiceStateEnum { ZSM_REMOVED = 5; // ZSM loop is removed } +enum ZSMTypeEnum { + ZSMTYPE_UNKNOWN = 0; + ZSMTYPE = 1; +} + message ZSMCreateRequest { - context.ServiceId serviceId = 1; - policy.PolicyRuleList policyList = 2; + context.ServiceId target_service_id = 1; + context.ServiceId telemetry_service_id = 2; + analytics_frontend.Analyzer analyzer = 3; + policy.PolicyRuleService policy = 4; } message ZSMCreateUpdate { diff --git a/src/analytics/backend/service/AnalyzerHandlers.py b/src/analytics/backend/service/AnalyzerHandlers.py index 75f7ed252d1dbb86a10750e7be892f2d8af8bf9c..dbc9c9577ae77f16bc4c410fa8ae4c81e1c132a0 100644 --- a/src/analytics/backend/service/AnalyzerHandlers.py +++ b/src/analytics/backend/service/AnalyzerHandlers.py @@ -15,6 +15,7 @@ import logging from enum import Enum import pandas as pd +from collections import defaultdict logger = logging.getLogger(__name__) @@ -134,3 +135,45 @@ def aggregation_handler( return results else: return [] + +def find(data , type , value): + return next((item for item in data if item[type] == value), None) + + +def aggregation_handler_total_latency( + batch_type_name, key, batch, input_kpi_list, output_kpi_list, thresholds +): + """ + Process a batch of data and calculate aggregated values for each input KPI + and maps them to the output KPIs. """ + + # Group and sum + # Track sum and count + sum_dict = defaultdict(int) + count_dict = defaultdict(int) + + for item in batch: + kpi_id = item["kpi_id"] + if kpi_id in input_kpi_list: + sum_dict[kpi_id] += item["kpi_value"] + count_dict[kpi_id] += 1 + + # Compute average + avg_dict = {kpi_id: sum_dict[kpi_id] / count_dict[kpi_id] for kpi_id in sum_dict} + + total_kpi_lat = 0 + for kpi_id, total_value in avg_dict.items(): + total_kpi_lat += total_value + + result = { + "kpi_id": output_kpi_list[0], + "avg": total_kpi_lat, + "THRESHOLD_RAISE": bool(total_kpi_lat > 2600), + "THRESHOLD_FALL": bool(total_kpi_lat < 699) + } + results = [] + + results.append(result) + logger.warning(f"result : {result}.") + + return results diff --git a/src/automation/.gitlab-ci.yml b/src/automation/.gitlab-ci.yml index 3343a5e917706d9d0c93f6853d1b8d458b489de5..5d18a73ba12ffb750d35a9eda70b53bb8db86878 100644 --- a/src/automation/.gitlab-ci.yml +++ b/src/automation/.gitlab-ci.yml @@ -66,8 +66,7 @@ unit_test automation: - sleep 5 - docker ps -a - docker logs $IMAGE_NAME - - docker exec -i $IMAGE_NAME bash -c "coverage run --append -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_unitary_emulated.py --junitxml=/opt/results/${IMAGE_NAME}_report_emulated.xml" - - docker exec -i $IMAGE_NAME bash -c "coverage run --append -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_unitary_ietf_actn.py --junitxml=/opt/results/${IMAGE_NAME}_report_ietf_actn.xml" + - docker exec -i $IMAGE_NAME bash -c "coverage run --append -m pytest --log-level=INFO --verbose $IMAGE_NAME/tests/test_automation_handlers.py --junitxml=/opt/results/${IMAGE_NAME}_report_ietf_actn.xml" - docker exec -i $IMAGE_NAME bash -c "coverage report --include='${IMAGE_NAME}/*' --show-missing" coverage: '/TOTAL\s+\d+\s+\d+\s+(\d+%)/' after_script: diff --git a/src/automation/README.md b/src/automation/README.md new file mode 100644 index 0000000000000000000000000000000000000000..48b9c774225fa32d3d00c4bb59f8a9619b950c07 --- /dev/null +++ b/src/automation/README.md @@ -0,0 +1,22 @@ +# Plug In A ZSM LOOP + +### Initialization +The following requirements should be fulfilled before the execuation of Analytics service. + +1. IN ZSMFilterFieldEnum add the new available services +2. In zsm_handlers folder , on the init file and the ZSM_SERVICE_HANDLERS. Give the name of the file and the filter criteria +3. Add the handler with the same name as you add it in ZSM_SERVICE_HANDLERS. +4. Implement some of those methods in side your handler + +``` + def zsmCreate(self,request : ZSMCreateRequest, context : grpc.ServicerContext): + LOGGER.info('Init zsmUpdate method') + def zsmUpdate(self): + LOGGER.info('Init zsmUpdate method') + def zsmDelete(self): + LOGGER.info('Init zsmDelete method') + def ZSMGetById(self): + LOGGER.info('Init ZSMGetById method') + def ZSMGetByService(self): + LOGGER.info('Init ZSMGetByService method') +``` \ No newline at end of file diff --git a/src/automation/service/AutomationServiceServicerImpl.py b/src/automation/service/AutomationServiceServicerImpl.py index 1f94f572ec51e6accd2ccfedc7c32fe4381d7f1c..67b05af317ef8538a32791e3d4229d902e60bdc8 100644 --- a/src/automation/service/AutomationServiceServicerImpl.py +++ b/src/automation/service/AutomationServiceServicerImpl.py @@ -15,22 +15,14 @@ import grpc, json, logging from uuid import uuid4 from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method +from common.proto.automation_pb2_grpc import AutomationServiceServicer from common.method_wrappers.ServiceExceptions import InvalidArgumentException -from common.proto.analytics_frontend_pb2 import Analyzer, AnalyzerId from common.proto.automation_pb2 import ZSMCreateRequest, ZSMService, ZSMServiceID, ZSMServiceState, ZSMCreateUpdate -from common.proto.automation_pb2_grpc import AutomationServiceServicer from common.proto.context_pb2 import Service, ServiceId -from common.proto.kpi_manager_pb2 import KpiId, KpiDescriptor -from common.proto.policy_pb2 import PolicyRuleService, PolicyRuleState -from common.proto.policy_action_pb2 import PolicyRuleAction, PolicyRuleActionConfig -from common.proto.policy_condition_pb2 import PolicyRuleCondition -from common.proto.telemetry_frontend_pb2 import Collector, CollectorId - -from analytics.frontend.client.AnalyticsFrontendClient import AnalyticsFrontendClient -from automation.client.PolicyClient import PolicyClient +from automation.service.zsm_handlers import ZSM_SERVICE_HANDLERS +from automation.service.zsm_handler_api.ZSMFilterFields import ZSMFilterFieldEnum , TELEMETRY_SERVICE_TYPE_VALUES, TARGET_SERVICE_TYPE_VALUES , ZSM_FILTER_FIELD_ALLOWED_VALUES +from common.proto.context_pb2 import ServiceTypeEnum , DeviceDriverEnum from context.client.ContextClient import ContextClient -from kpi_manager.client.KpiManagerClient import KpiManagerClient -from telemetry.frontend.client.TelemetryFrontendClient import TelemetryFrontendClient LOGGER = logging.getLogger(__name__) METRICS_POOL = MetricsPool('Automation', 'RPC') @@ -41,165 +33,19 @@ class AutomationServiceServicerImpl(AutomationServiceServicer): @safe_and_metered_rpc_method(METRICS_POOL,LOGGER) def ZSMCreate(self, request : ZSMCreateRequest, context : grpc.ServicerContext) -> ZSMService: - - # check that service does not exist context_client = ContextClient() - kpi_manager_client = KpiManagerClient() - policy_client = PolicyClient() - telemetry_frontend_client = TelemetryFrontendClient() - analytics_frontend_client = AnalyticsFrontendClient() - - try: - - # TODO: Remove static variables(get them from ZSMCreateRequest) - # TODO: Refactor policy component (remove unnecessary variables) - - ####### GET Context ####################### - LOGGER.info('Get the service from Context: ') - service: Service = context_client.GetService(request.serviceId) - LOGGER.info('Service ({:s}) :'.format(str(service))) - ########################################### - - ####### SET Kpi Descriptor LAT ################ - LOGGER.info('Set Kpi Descriptor LAT: ') - - if(len(service.service_constraints) == 0): - raise InvalidArgumentException("service_constraints" , "empty", []); - - if(len(service.service_constraints) > 1): - raise InvalidArgumentException("service_constraints" , ">1", []); - - if(service.service_constraints[0].sla_latency is None ): - raise InvalidArgumentException("sla_latency", "empty", []); - - ## Static Implementation Applied only in case of SLA Latency Constraint ## - - # KPI Descriptor - kpi_descriptor_lat = KpiDescriptor() - kpi_descriptor_lat.kpi_sample_type = 701 #'KPISAMPLETYPE_SERVICE_LATENCY_MS' #static service.service_constraints[].sla_latency.e2e_latency_ms - kpi_descriptor_lat.service_id.service_uuid.uuid = request.serviceId.service_uuid.uuid - kpi_descriptor_lat.kpi_id.kpi_id.uuid = str(uuid4()) - - kpi_id_lat: KpiId = kpi_manager_client.SetKpiDescriptor(kpi_descriptor_lat) - LOGGER.info('The kpi_id_lat({:s})'.format(str(kpi_id_lat))) - ########################################### - - ####### SET Kpi Descriptor TX ################ - LOGGER.info('Set Kpi Descriptor TX: ') - - kpi_descriptor_tx = KpiDescriptor() - kpi_descriptor_tx.kpi_sample_type = 101 # static KPISAMPLETYPE_PACKETS_TRANSMITTED - kpi_descriptor_tx.service_id.service_uuid.uuid = request.serviceId.service_uuid.uuid - kpi_descriptor_tx.kpi_id.kpi_id.uuid = str(uuid4()) - - kpi_id_tx: KpiId = kpi_manager_client.SetKpiDescriptor(kpi_descriptor_tx) - LOGGER.info('The kpi_id_tx({:s})'.format(str(kpi_id_tx))) - ########################################### - - ####### SET Kpi Descriptor RX ################ - LOGGER.info('Set Kpi Descriptor RX: ') - - kpi_descriptor_rx = KpiDescriptor() - kpi_descriptor_rx.kpi_sample_type = 102 # static KPISAMPLETYPE_PACKETS_RECEIVED - kpi_descriptor_rx.service_id.service_uuid.uuid = request.serviceId.service_uuid.uuid - kpi_descriptor_rx.kpi_id.kpi_id.uuid = str(uuid4()) - kpi_id_rx: KpiId = kpi_manager_client.SetKpiDescriptor(kpi_descriptor_rx) - LOGGER.info('kpi_id_rx({:s})'.format(str(kpi_id_rx))) - ########################################### + targetService = context_client.GetService(request.targetServiceId) + telemetryService = context_client.GetService(request.telemetryServiceId) + handler_cls = self.get_service_handler_based_on_service_types(targetService.service_type , telemetryService.service_type , ZSM_SERVICE_HANDLERS) + if handler_cls: + handler_obj = handler_cls() # instantiate it + handler_obj.zsmCreate(request , context) + else: + LOGGER.info("No matching handler found.") - ####### START Collector TX ################# - collect_tx = Collector() - collect_tx.collector_id.collector_id.uuid = str(uuid4()) - collect_tx.kpi_id.kpi_id.uuid = kpi_id_tx.kpi_id.uuid - collect_tx.duration_s = 20000 # static - collect_tx.interval_s = 1 # static - LOGGER.info('Start Collector TX'.format(str(collect_tx))) - - collect_id_tx: CollectorId = telemetry_frontend_client.StartCollector(collect_tx) - LOGGER.info('collect_id_tx({:s})'.format(str(collect_id_tx))) - ############################################# - - ####### START Collector RX ################## - collect_rx = Collector() - collect_rx.collector_id.collector_id.uuid = str(uuid4()) - collect_rx.kpi_id.kpi_id.uuid = kpi_id_rx.kpi_id.uuid - collect_rx.duration_s = 20000 # static - collect_rx.interval_s = 1 # static - LOGGER.info('Start Collector RX'.format(str(collect_rx))) - - collect_id_rx: CollectorId = telemetry_frontend_client.StartCollector(collect_rx) - LOGGER.info('collect_id_tx({:s})'.format(str(collect_id_rx))) - ############################################### - - ####### START Analyzer LAT ################ - analyzer = Analyzer() - analyzer.analyzer_id.analyzer_id.uuid = str(uuid4()) - analyzer.algorithm_name = 'Test_Aggregate_and_Threshold' # static - analyzer.operation_mode = 2 - analyzer.input_kpi_ids.append(kpi_id_rx) - analyzer.input_kpi_ids.append(kpi_id_tx) - analyzer.output_kpi_ids.append(kpi_id_lat) - - thresholdStr = service.service_constraints[0].custom.constraint_type - - _threshold_dict = {thresholdStr: (0, int(service.service_constraints[0].custom.constraint_value))} - analyzer.parameters['thresholds'] = json.dumps(_threshold_dict) - analyzer.parameters['window_size'] = "60s" - analyzer.parameters['window_slider'] = "30s" - - analyzer_id_lat: AnalyzerId = analytics_frontend_client.StartAnalyzer(analyzer) - LOGGER.info('analyzer_id_lat({:s})'.format(str(analyzer_id_lat))) - ########################################################### - - ####### SET Policy LAT ################ - policy_lat = PolicyRuleService() - policy_lat.serviceId.service_uuid.uuid = request.serviceId.service_uuid.uuid - policy_lat.serviceId.context_id.context_uuid.uuid = request.serviceId.context_id.context_uuid.uuid - - # PolicyRuleBasic - policy_lat.policyRuleBasic.priority = 0 - policy_lat.policyRuleBasic.policyRuleId.uuid.uuid = str(uuid4()) - policy_lat.policyRuleBasic.booleanOperator = 2 - - # PolicyRuleAction - policyRuleActionConfig = PolicyRuleActionConfig() - policyRuleActionConfig.action_key = "" - policyRuleActionConfig.action_value = "" - - policyRuleAction = PolicyRuleAction() - policyRuleAction.action = 5 - policyRuleAction.action_config.append(policyRuleActionConfig) - policy_lat.policyRuleBasic.actionList.append(policyRuleAction) - - # for constraint in service.service_constraints: - - # PolicyRuleCondition - policyRuleCondition = PolicyRuleCondition() - policyRuleCondition.kpiId.kpi_id.uuid = kpi_id_lat.kpi_id.uuid - policyRuleCondition.numericalOperator = 5 - policyRuleCondition.kpiValue.floatVal = 300 - - policy_lat.policyRuleBasic.conditionList.append(policyRuleCondition) - - policy_rule_state: PolicyRuleState = policy_client.PolicyAddService(policy_lat) - LOGGER.info('policy_rule_state({:s})'.format(str(policy_rule_state))) - - except grpc.RpcError as e: - if e.code() != grpc.StatusCode.NOT_FOUND: raise # pylint: disable=no-member - LOGGER.exception('Unable to get Service({:s})'.format(str(request))) - context_client.close() - kpi_manager_client.close() - policy_client.close() - telemetry_frontend_client.close() - return None - - context_client.close() - kpi_manager_client.close() - policy_client.close() - telemetry_frontend_client.close() return ZSMService() @safe_and_metered_rpc_method(METRICS_POOL,LOGGER) @@ -222,3 +68,26 @@ class AutomationServiceServicerImpl(AutomationServiceServicer): def ZSMGetByService(self, request : ServiceId, context : grpc.ServicerContext) -> ZSMService: LOGGER.info('NOT IMPLEMENTED ZSMGetByService') return ZSMService() + + def get_service_handler_based_on_service_types(self, targetServiceType ,telemetryServiceType , ZSM_SERVICE_HANDLERS): + flag = True + for handler_cls, filters in ZSM_SERVICE_HANDLERS: + for filter in filters: + flag = self.check_if_requested_services_pass_filter_criteria(filter , targetServiceType, telemetryServiceType) + if flag: + return handler_cls + return None + + def check_if_requested_services_pass_filter_criteria(self ,filter , targetServiceType , telemetryServiceType): + flag = True + for filter_key, filter_value in filter.items(): + if filter_value in ZSM_FILTER_FIELD_ALLOWED_VALUES[filter_key.value]: + if filter_key.value == ZSMFilterFieldEnum.TARGET_SERVICE_TYPE.value: + if filter_value != targetServiceType: + flag = False + elif filter_key.value == ZSMFilterFieldEnum.TELEMETRY_SERVICE_TYPE.value: + if filter_value != telemetryServiceType: + flag = False + else: + flag = False + return flag diff --git a/src/automation/service/zsm_handler_api/ZSMFilterFields.py b/src/automation/service/zsm_handler_api/ZSMFilterFields.py new file mode 100644 index 0000000000000000000000000000000000000000..7b00de5bc474f8ac4a6a711acf9f7200723ddaae --- /dev/null +++ b/src/automation/service/zsm_handler_api/ZSMFilterFields.py @@ -0,0 +1,34 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from enum import Enum +from common.proto.context_pb2 import ServiceTypeEnum + +class ZSMFilterFieldEnum(Enum): + TARGET_SERVICE_TYPE = 'target_service_type' + TELEMETRY_SERVICE_TYPE = 'telemetry_service_type' + +TARGET_SERVICE_TYPE_VALUES = { + ServiceTypeEnum.SERVICETYPE_L2NM +} + +TELEMETRY_SERVICE_TYPE_VALUES = { + ServiceTypeEnum.SERVICETYPE_INT +} + +# Maps filter fields to allowed values per Filter field. # If no restriction (free text) None is specified +ZSM_FILTER_FIELD_ALLOWED_VALUES = { + ZSMFilterFieldEnum.TARGET_SERVICE_TYPE.value : TARGET_SERVICE_TYPE_VALUES, + ZSMFilterFieldEnum.TELEMETRY_SERVICE_TYPE.value : TELEMETRY_SERVICE_TYPE_VALUES, +} diff --git a/src/automation/service/zsm_handler_api/ZSMHandler.py b/src/automation/service/zsm_handler_api/ZSMHandler.py new file mode 100644 index 0000000000000000000000000000000000000000..165489793cc91bf07cf7c7a281fed979819120b1 --- /dev/null +++ b/src/automation/service/zsm_handler_api/ZSMHandler.py @@ -0,0 +1,37 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import grpc , logging +from common.proto.automation_pb2 import ZSMCreateRequest + +LOGGER = logging.getLogger(__name__) + +class ZSMHandler: + def __init__(self): + LOGGER.info('Init Scenario') + + def zsmCreate(self, request : ZSMCreateRequest, context : grpc.ServicerContext): + LOGGER.info('zsmCreate method') + + def zsmUpdate(self): + LOGGER.info('zsmUpdate method') + + def zsmDelete(self): + LOGGER.info('zsmDelete method') + + def zsmGetById(self): + LOGGER.info('zsmGetById method') + + def zsmGetByService(self): + LOGGER.info('zsmGetByService method') diff --git a/src/automation/tests/test_unitary_ietf_actn.py b/src/automation/service/zsm_handler_api/__init__.py similarity index 76% rename from src/automation/tests/test_unitary_ietf_actn.py rename to src/automation/service/zsm_handler_api/__init__.py index a175a7d62933d025c106a2ed7ccf50a3bb7e4b4a..c985cb35bce23982bca39b434a5157be339c45d6 100644 --- a/src/automation/tests/test_unitary_ietf_actn.py +++ b/src/automation/service/zsm_handler_api/__init__.py @@ -10,12 +10,4 @@ # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and -# limitations under the License. - -import logging - -LOGGER = logging.getLogger(__name__) - -def test_device_emulated_add_error_cases(): - LOGGER.info("Start Tests") - assert True +# limitations under the License. \ No newline at end of file diff --git a/src/automation/service/zsm_handlers/P4INTZSMPlugin.py b/src/automation/service/zsm_handlers/P4INTZSMPlugin.py new file mode 100644 index 0000000000000000000000000000000000000000..d70ef513cdd52976903bab7872336ef4567616bd --- /dev/null +++ b/src/automation/service/zsm_handlers/P4INTZSMPlugin.py @@ -0,0 +1,92 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import grpc , logging +from common.proto.analytics_frontend_pb2 import AnalyzerId +from common.proto.policy_pb2 import PolicyRuleState +from common.proto.automation_pb2 import ZSMCreateRequest, ZSMService + +from analytics.frontend.client.AnalyticsFrontendClient import AnalyticsFrontendClient +from automation.client.PolicyClient import PolicyClient +from context.client.ContextClient import ContextClient +from src.automation.service.zsm_handler_api.ZSMHandler import ZSMHandler + +LOGGER = logging.getLogger(__name__) + +class P4INTZSMPlugin(ZSMHandler): + def __init__(self): + LOGGER.info('Init P4INTZSMPlugin') + + def zsmCreate(self,request : ZSMCreateRequest, context : grpc.ServicerContext): + # check that service does not exist + context_client = ContextClient() + policy_client = PolicyClient() + analytics_frontend_client = AnalyticsFrontendClient() + + # Verify the input target service ID + try: + target_service_id = context_client.GetService(request.target_service_id) + except grpc.RpcError as ex: + if ex.code() != grpc.StatusCode.NOT_FOUND: raise # pylint: disable=no-member + LOGGER.exception('Unable to get target service({:s})'.format(str(target_service_id))) + context_client.close() + return None + + # Verify the input telemetry service ID + try: + telemetry_service_id = context_client.GetService(request.telemetry_service_id) + except grpc.RpcError as ex: + if ex.code() != grpc.StatusCode.NOT_FOUND: raise # pylint: disable=no-member + LOGGER.exception('Unable to get telemetry service({:s})'.format(str(telemetry_service_id))) + context_client.close() + return None + + # Start an analyzer + try: + analyzer_id_lat: AnalyzerId = analytics_frontend_client.StartAnalyzer(request.analyzer) # type: ignore + LOGGER.info('analyzer_id_lat({:s})'.format(str(analyzer_id_lat))) + except grpc.RpcError as ex: + if ex.code() != grpc.StatusCode.NOT_FOUND: raise # pylint: disable=no-member + LOGGER.exception('Unable to start analyzer({:s})'.format(str(request.analyzer))) + context_client.close() + analytics_frontend_client.close() + return None + + # Create a policy + try: + policy_rule_state: PolicyRuleState = policy_client.PolicyAddService(request.policy) # type: ignore + LOGGER.info('policy_rule_state({:s})'.format(str(policy_rule_state))) + except grpc.RpcError as ex: + if ex.code() != grpc.StatusCode.NOT_FOUND: raise # pylint: disable=no-member + LOGGER.exception('Unable to create policy({:s})'.format(str(request.policy))) + context_client.close() + policy_client.close() + return None + + context_client.close() + analytics_frontend_client.close() + policy_client.close() + return ZSMService() + + def zsmUpdate(self): + LOGGER.info('zsmUpdate method') + + def zsmDelete(self): + LOGGER.info('zsmDelete method') + + def zsmGetById(self): + LOGGER.info('zsmGetById method') + + def zsmGetByService(self): + LOGGER.info('zsmGetByService method') diff --git a/src/automation/service/zsm_handlers/__init__.py b/src/automation/service/zsm_handlers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cf26eaa10fe838db5fc72ba76c298975a7a6ce5a --- /dev/null +++ b/src/automation/service/zsm_handlers/__init__.py @@ -0,0 +1,26 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from common.proto.context_pb2 import ServiceTypeEnum +from ..zsm_handler_api.ZSMFilterFields import ZSMFilterFieldEnum +from src.automation.service.zsm_handlers.P4INTZSMPlugin import P4INTZSMPlugin + +ZSM_SERVICE_HANDLERS = [ + (P4INTZSMPlugin, [ + { + ZSMFilterFieldEnum.TARGET_SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_L2NM, + ZSMFilterFieldEnum.TELEMETRY_SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_INT, + } + ]) +] diff --git a/src/automation/tests/test_automation_handlers.py b/src/automation/tests/test_automation_handlers.py new file mode 100644 index 0000000000000000000000000000000000000000..16fbcb255bbfb03829f48ed8901cb23c5161deaa --- /dev/null +++ b/src/automation/tests/test_automation_handlers.py @@ -0,0 +1,83 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from ..service.AutomationServiceServicerImpl import AutomationServiceServicerImpl +from common.proto.context_pb2 import ServiceTypeEnum +from ..service.zsm_handler_api.ZSMFilterFields import ZSMFilterFieldEnum +from ..service.zsm_handlers import Poc1 , Poc2 + +LOGGER = logging.getLogger(__name__) + +ZSM_SERVICE_HANDLERS = [ + (Poc1, [ + { + ZSMFilterFieldEnum.TARGET_SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_L2NM, + ZSMFilterFieldEnum.TELEMETRY_SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_INT, + } + ]), + (Poc2, [ + { + ZSMFilterFieldEnum.TARGET_SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_INT, + ZSMFilterFieldEnum.TELEMETRY_SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_L2NM, + } + ]) +] + +def test_get_service_handler_based_on_service_types(): + automation = AutomationServiceServicerImpl() + + handler_cls = automation.get_service_handler_based_on_service_types(ServiceTypeEnum.SERVICETYPE_INT, ServiceTypeEnum.SERVICETYPE_L2NM , ZSM_SERVICE_HANDLERS) + if handler_cls: + assert True + else: + assert False + + +def test_get_service_handler_based_on_service_types_error(): + automation = AutomationServiceServicerImpl() + + handler_cls = automation.get_service_handler_based_on_service_types(ServiceTypeEnum.SERVICETYPE_INT , ServiceTypeEnum.SERVICETYPE_L2NM , ZSM_SERVICE_HANDLERS) + if handler_cls: + assert True + else: + assert False + +def test_check_if_requested_services_pass_filter_criteria(): + filter = { + ZSMFilterFieldEnum.TARGET_SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_L2NM, + ZSMFilterFieldEnum.TELEMETRY_SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_INT, + } + + automation = AutomationServiceServicerImpl() + flag = automation.check_if_requested_services_pass_filter_criteria(filter , ServiceTypeEnum.SERVICETYPE_L2NM , ServiceTypeEnum.SERVICETYPE_INT) + + if flag: + assert True + else: + assert False + +def test_check_if_requested_services_pass_filter_criteria_error(): + filter = { + ZSMFilterFieldEnum.TARGET_SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_L2NM, + ZSMFilterFieldEnum.TELEMETRY_SERVICE_TYPE : ServiceTypeEnum.SERVICETYPE_INT, + } + + automation = AutomationServiceServicerImpl() + flag = automation.check_if_requested_services_pass_filter_criteria(filter , ServiceTypeEnum.SERVICETYPE_INT , ServiceTypeEnum.SERVICETYPE_L2NM) + + if flag: + assert False + else: + assert True diff --git a/src/service/service/ServiceServiceServicerImpl.py b/src/service/service/ServiceServiceServicerImpl.py index bf923eed9880353b5fbde291dda1e66ba3f600e9..bb110d8c7aa1a51960cdb06c2435df4224da8032 100644 --- a/src/service/service/ServiceServiceServicerImpl.py +++ b/src/service/service/ServiceServiceServicerImpl.py @@ -21,7 +21,7 @@ from common.method_wrappers.ServiceExceptions import ( ) from common.proto.context_pb2 import ( Connection, ConstraintActionEnum, Empty, Service, ServiceId, ServiceStatusEnum, - ServiceTypeEnum, TopologyId + ServiceTypeEnum, TopologyId, Constraint, Constraint_Exclusions ) from common.proto.pathcomp_pb2 import PathCompRequest from common.proto.e2eorchestrator_pb2 import E2EOrchestratorRequest @@ -105,6 +105,20 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): service_id = context_client.SetService(request) return service_id + def _detect_connection(self, conn, label): + LOGGER.info(f"------------------------------------------------------------") + LOGGER.info(f"--- Connection - {label}") + for hop in conn.path_hops_endpoint_ids: + dev = self._context_client.GetDevice(hop.device_id) + if (dev.name in ["sw1", "sw5"]): + LOGGER.info(f"-----------> Skip edge swicthes") + continue + LOGGER.info(f" Device: {dev.name}") + return dev.name + LOGGER.info(f"------------------------------------------------------------") + return None + + @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) def UpdateService(self, request : Service, context : grpc.ServicerContext) -> ServiceId: # Set service status to "SERVICESTATUS_PLANNED" to ensure rest of components are aware the service is @@ -113,12 +127,15 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): _service : Optional[Service] = get_service_by_id( context_client, request.service_id, rw_copy=False, include_config_rules=True, include_constraints=True, include_endpoint_ids=True) + LOGGER.info('_service={:s}'.format(str(_service))) # Identify service constraints num_disjoint_paths = None is_diverse = False gps_location_aware = False + LOGGER.info('request={:s}'.format(str(request))) for constraint in request.service_constraints: + LOGGER.info(f"--------------------------------------------> Previous exclusion_constraint={constraint}") constraint_kind = constraint.WhichOneof('constraint') if constraint_kind == 'sla_availability': num_disjoint_paths = constraint.sla_availability.num_disjoint_paths @@ -127,6 +144,15 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): elif constraint_kind == 'endpoint_location': location = constraint.endpoint_location.location if location.WhichOneof('location') == 'gps_position': gps_location_aware = True + elif constraint_kind == 'exclusions': + current_dev_id = constraint.exclusions.device_ids[0].device_uuid.uuid + LOGGER.info(f"--------------------------------------------> Previously excluded_device={current_dev_id}") + possible_dev_ids = ["sw2", "sw3", "sw4"] + possible_dev_ids.remove(current_dev_id) + target_dev_to_exclude = possible_dev_ids + LOGGER.info(f"--------------------------------------------> Newly excluded_device={target_dev_to_exclude[0]}") + constraint.exclusions.device_ids[0].device_uuid.uuid = target_dev_to_exclude[0] + LOGGER.info(f"--------------------------------------------> New exclusion_constraint={constraint}") else: continue @@ -324,13 +350,17 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): tasks_scheduler.compose_from_pathcompreply( optical_reply, is_delete=False) else: + LOGGER.info('len(service_with_uuids.service_endpoint_ids)={:s}'.format(str(service_with_uuids.service_endpoint_ids))) + LOGGER.info('len(service_with_uuids.service_endpoint_ids)={:s}'.format(str(num_expected_endpoints))) if len(service_with_uuids.service_endpoint_ids) >= num_expected_endpoints: pathcomp_request = PathCompRequest() pathcomp_request.services.append(service_with_uuids) # pylint: disable=no-member - + LOGGER.info('pathcomp_request {:s}'.format(str(pathcomp_request))) if num_disjoint_paths is None or num_disjoint_paths in {0, 1} : + LOGGER.info('No other path is available') pathcomp_request.shortest_path.Clear() # pylint: disable=no-member else: + LOGGER.info('Number of path available {:s}'.format(str(num_disjoint_paths))) pathcomp_request.k_disjoint_path.num_disjoint = num_disjoint_paths # pylint: disable=no-member pathcomp = PathCompClient() @@ -340,6 +370,7 @@ class ServiceServiceServicerImpl(ServiceServiceServicer): # Feed TaskScheduler with this path computation reply. TaskScheduler identifies inter-dependencies among # the services and connections retrieved and produces a schedule of tasks (an ordered list of tasks to be # executed) to implement the requested create/update operation. + LOGGER.info(f"---------------------------> Calling compose_from_pathcompreply") tasks_scheduler.compose_from_pathcompreply(pathcomp_reply, is_delete=False) tasks_scheduler.execute_all() diff --git a/src/service/service/task_scheduler/TaskScheduler.py b/src/service/service/task_scheduler/TaskScheduler.py index b68b4839129e81ec94889cd1b651a1d901a3c117..716aaa37bd123cf92532bbd4ea6f2dd47b9493ec 100644 --- a/src/service/service/task_scheduler/TaskScheduler.py +++ b/src/service/service/task_scheduler/TaskScheduler.py @@ -15,8 +15,9 @@ import graphlib, logging, queue, time from typing import TYPE_CHECKING, Dict, Tuple from common.proto.context_pb2 import ( - Connection, ConnectionId, Service, ServiceId, ServiceStatusEnum, ConnectionList + Uuid, Empty, DeviceId, Connection, ConnectionId, EndPointId, EndPointIdList, Service, ServiceId, ServiceStatusEnum, ConnectionList, ServiceTypeEnum ) +from common.tools.object_factory.Device import json_device_id from common.proto.pathcomp_pb2 import PathCompReply from common.tools.grpc.Tools import grpc_message_to_json_string from context.client.ContextClient import ContextClient @@ -32,6 +33,8 @@ from .TaskExecutor import CacheableObjectType, TaskExecutor from .tasks.Task_OpticalServiceConfigDelete import Task_OpticalServiceConfigDelete from service.service.tools.OpticalTools import delete_lightpath +from uuid import uuid4 + if TYPE_CHECKING: from service.service.service_handler_api.ServiceHandlerFactory import ServiceHandlerFactory @@ -169,23 +172,313 @@ class TasksScheduler: self._dag.add(service_config_key, service_pending_removal_key) return service_config_key + def detect_middle_switch(self, conn): + hop = conn.path_hops_endpoint_ids[1] + dev = self._context_client.GetDevice(hop.device_id) + return dev.name + + def report_connection(self, conn, label): + LOGGER.info(f"------------------------------------------------------------") + LOGGER.info(f"--- Connection - {label}") + for hop in conn.path_hops_endpoint_ids: + dev_uuid = hop.device_id.device_uuid.uuid + dev = self._context_client.GetDevice(hop.device_id) + ep = hop.endpoint_uuid + LOGGER.info(f" Device: {dev.name}") + LOGGER.info(f"Endpoint: {ep}") + LOGGER.info("") + LOGGER.info(f"------------------------------------------------------------") + + def shorten_connection(self, conn): + new_conn = Connection() + new_conn.connection_id.CopyFrom(conn.connection_id) + new_conn.service_id.CopyFrom(conn.service_id) + for i, hop in enumerate(conn.path_hops_endpoint_ids): + if i == 0 or i == 5: + LOGGER.info(f"Skipping first or last hop") + else: + new_conn.path_hops_endpoint_ids.add().CopyFrom(hop) + # self.report_connection(new_conn, label="Shorter") + return new_conn + + def replaced_hops(self, conn): + import random + + # Find who is now in the middle + sw_to_exclude = self.detect_middle_switch(conn) + + new_conn = Connection() + new_conn.connection_id.CopyFrom(conn.connection_id) + new_conn.service_id.CopyFrom(conn.service_id) + + LOGGER.info(f"### To exclude {sw_to_exclude}") + possible_switches = ["sw2", "sw3", "sw4"] + possible_switches.remove(sw_to_exclude) + LOGGER.info(f"### Filtered switches found: {possible_switches}") + selected_sw = random.choice(possible_switches) + LOGGER.info(f"### Selected switch: {selected_sw}") + new_dev_id = DeviceId(**json_device_id(selected_sw)) + new_dev = self._context_client.GetDevice(new_dev_id) + LOGGER.info(f"### {selected_sw} ID: {new_dev.device_id}") + + repl_num = 1 + for hop in conn.path_hops_endpoint_ids: + cur_dev = self._context_client.GetDevice(hop.device_id) + cur_ep_uuid = hop.endpoint_uuid + LOGGER.info(f"################### cur_dev.name {cur_dev.name}") + + if cur_dev.name == sw_to_exclude: + LOGGER.info(f"################### Replace {sw_to_exclude} with {selected_sw}") + new_ep = EndPointId() + new_ep.topology_id.CopyFrom(hop.topology_id) + new_ep.device_id.CopyFrom(new_dev_id) + + new_ep_uuid = None + for ep in new_dev.device_endpoints: + if ep.name == "3": + LOGGER.info(f"##### Skipping INT endpoint {ep.name}") + continue + if ep.name == str(repl_num): + new_ep_uuid = ep.endpoint_id.endpoint_uuid + new_ep.endpoint_uuid.CopyFrom(new_ep_uuid) + + # Add path hops to new connection + new_conn.path_hops_endpoint_ids.add().CopyFrom(new_ep) + repl_num += 1 + + return new_conn, selected_sw, new_dev.device_id.device_uuid.uuid + + def modify_edge_hops(self, conn, replaced_dev_name, replaced_dev_uuid): + LOGGER.info(f"------------------> Looking for device {replaced_dev_name} with ID {replaced_dev_uuid}") + link_list = self._context_client.ListLinks(Empty()) + if not link_list: + LOGGER.info(f"------------------> LINKS CANNOT BE FETCHED") + return None + + LOGGER.info(f"------------------> Total links number: {len(link_list.links)}") + + dp_links = [] + for i, link in enumerate(link_list.links): + if "tfs" not in link.name: + dp_links.append(link) + + LOGGER.info(f"------------------> Dataplane links number: {len(dp_links)}") + + relevant_links = [] + for link in dp_links: + if not replaced_dev_name in link.name: + continue + relevant_links.append(link) + assert len(relevant_links) == 2, "Cannot have more than two relevant links" + + LOGGER.info(f"------------------> Relevant links number: {len(relevant_links)}") + LOGGER.info("") + + result = [] + paired_ep_uuid = None + paired_dev_id = None + for link in relevant_links: + LOGGER.info(f"------------------> Link name {link.name}") + for endpoint_id in link.link_endpoint_ids: + dev_id = endpoint_id.device_id + dev_uuid = endpoint_id.device_id.device_uuid.uuid + dev = self._context_client.GetDevice(dev_id) + # LOGGER.info(f"------------------> dev_id {dev_uuid} --- replaced_dev_uuid {replaced_dev_uuid}") + # LOGGER.info(f"------------------> dev.name {dev.name}") + # LOGGER.info(f"------------------> Replaced device {replaced_dev_name} with ID {replaced_dev_uuid}") + if dev_uuid != replaced_dev_uuid and dev.name in ["sw1", "sw5"]: + paired_dev = dev + paired_dev_id = dev_id + paired_ep_uuid = endpoint_id.endpoint_uuid + LOGGER.info(f"------------------> FOUND switch {paired_dev.name} with ID {paired_dev_id} and endpoint UUID {paired_ep_uuid}") + result.append((paired_dev_id, paired_ep_uuid)) + + return result + + def turn_edge_hops_to_connections(self, conn, hop_tuples): + assert len(hop_tuples) == 2, "Cannot have more than two edge hops" + + new_conn = Connection() + new_conn.connection_id.CopyFrom(conn.connection_id) + new_conn.service_id.CopyFrom(conn.service_id) + + ep_map = {} + for dev_id, dev_ep in hop_tuples: + # dev_id = DeviceId(**json_device_id(dev_name)) + # LOGGER.info(f"################### Edge device ID {dev_id}") + dev = self._context_client.GetDevice(dev_id) + assert dev.name in ["sw1", "sw5"] + LOGGER.info(f"################### Edge dev name {dev.name} - endpoint UUID {dev_ep}") + # Turn Endpoint UUID into EndpointId + new_ep = EndPointId() + new_ep.topology_id.CopyFrom(conn.path_hops_endpoint_ids[0].topology_id) + new_ep.device_id.CopyFrom(dev_id) + new_ep.endpoint_uuid.CopyFrom(dev_ep) + + ep_map[dev.name] = new_ep + + # Add it to the connection list alphabetically + LOGGER.info(f"################### Ordered map") + for k, v in dict(sorted(ep_map.items())).items(): + LOGGER.info(f"################### {k}: {v}") + new_conn.path_hops_endpoint_ids.add().CopyFrom(v) + return new_conn + + def new_l2_connection(self, conn): + new_conn = Connection() + new_con_id = ConnectionId() + new_uuid = Uuid() + new_uuid.uuid = str(uuid4()) + new_con_id.connection_uuid.CopyFrom(new_uuid) + new_conn.connection_id.CopyFrom(new_con_id) + new_conn.service_id.CopyFrom(conn.service_id) + + LOGGER.info("=================================================================================================") + self.report_connection(conn, label=">>>>>>>> Before <<<<<<<<") + LOGGER.info("=================================================================================================") + + LOGGER.info("\n") + + LOGGER.info("=================================================================================================") + self.report_connection(new_conn, label=">>>>>>>> Initial new <<<<<<<<") + LOGGER.info("=================================================================================================") + + LOGGER.info("\n") + + # Hop-1: client-sw1 + first_hop = conn.path_hops_endpoint_ids[0] + + # Hop 3 + Hop 4: middle-switch (sw2/3/4) to be replaced + LOGGER.info("=================================================================================================") + temp_conn = self.shorten_connection(conn) + self.report_connection(temp_conn, label=">>>>>>>> Initial Shorter <<<<<<<<") + middle_con, replaced_dev_name, replaced_dev_id = self.replaced_hops(temp_conn) + self.report_connection(middle_con, label=">>>>>>>> New middle only <<<<<<<<") + LOGGER.info("=================================================================================================") + + LOGGER.info("\n") + + # # Hop-6: sw5 to server + last_hop = conn.path_hops_endpoint_ids[5] + + # Hop-2+5: sw1 and sw5 to new middle switch + LOGGER.info("=================================================================================================") + temp_conn_2 = self.modify_edge_hops(middle_con, replaced_dev_name, replaced_dev_id) + assert temp_conn_2, "Cannot proceed without connection tuples" + replaced_conn = self.turn_edge_hops_to_connections(conn, temp_conn_2) + self.report_connection(replaced_conn, label=">>>>>>>> Replaced edge-infra hops <<<<<<<<") + LOGGER.info("=================================================================================================") + + # LOGGER.info("\n") + + # # Add them in order + # LOGGER.info("=================================================================================================") + assert len(new_conn.path_hops_endpoint_ids) == 0, "[1st hop] New connection must have no endpoints initially" + new_conn.path_hops_endpoint_ids.add().CopyFrom(first_hop) + self.report_connection(new_conn, label=">>>>>>>> NEW step 1 <<<<<<<<") + assert len(new_conn.path_hops_endpoint_ids) == 1, "[1st hop] client-sw1 is not properly established" + new_conn.path_hops_endpoint_ids.add().CopyFrom(replaced_conn.path_hops_endpoint_ids[0]) + assert len(new_conn.path_hops_endpoint_ids) == 2, "[2nd hop] sw1-{} is not properly established".format(replaced_dev_name) + self.report_connection(new_conn, label=">>>>>>>> NEW step 2 <<<<<<<<") + for c in middle_con.path_hops_endpoint_ids: + LOGGER.info(f"Replaced {c}") + new_conn.path_hops_endpoint_ids.add().CopyFrom(c) + assert len(new_conn.path_hops_endpoint_ids) == 4, "[3rd hop] {} is not properly established".format(replaced_dev_name) + self.report_connection(new_conn, label=">>>>>>>> NEW step 3 <<<<<<<<") + new_conn.path_hops_endpoint_ids.add().CopyFrom(replaced_conn.path_hops_endpoint_ids[1]) + assert len(new_conn.path_hops_endpoint_ids) == 5, "[4th hop] {}-sw5 is not properly established".format(replaced_dev_name) + self.report_connection(new_conn, label=">>>>>>>> NEW step 4 <<<<<<<<") + new_conn.path_hops_endpoint_ids.add().CopyFrom(last_hop) + assert len(new_conn.path_hops_endpoint_ids) == 6, "[5th hop] sw5-server is not properly established" + self.report_connection(new_conn, label=">>>>>>>> NEW step 5 <<<<<<<<") + LOGGER.info("=================================================================================================") + + return new_conn + + def new_int_connection(self, service_int_id): + new_conn = Connection() + new_con_id = ConnectionId() + new_uuid = Uuid() + new_uuid.uuid = str(uuid4()) + new_con_id.connection_uuid.CopyFrom(new_uuid) + new_conn.connection_id.CopyFrom(new_con_id) + new_conn.service_id.CopyFrom(service_int_id) + + for i in range(1, 6): + new_dev_id = DeviceId(**json_device_id("sw"+str(i))) + new_dev = self._context_client.GetDevice(new_dev_id) + LOGGER.info(f"++++++++++++++++++++++++++ [INT CONN] Dev {new_dev.name}") + + topology_id = new_dev.device_endpoints[0].endpoint_id.topology_id + + new_ep_id = EndPointId() + new_ep_id.topology_id.CopyFrom(topology_id) + new_ep_id.device_id.CopyFrom(new_dev_id) + + for ep in new_dev.device_endpoints: + if ep.endpoint_type == "port-int": + LOGGER.info(f"++++++++++++++++++++++++++ [INT CONN] Dev {new_dev.name} - INT endpoint {ep.name} with ID {ep.endpoint_id.endpoint_uuid}") + new_ep_id.endpoint_uuid.CopyFrom(ep.endpoint_id.endpoint_uuid) + + new_conn.path_hops_endpoint_ids.add().CopyFrom(new_ep_id) + + return new_conn + def compose_from_pathcompreply(self, pathcomp_reply : PathCompReply, is_delete : bool = False) -> None: t0 = time.time() include_service = self._service_remove if is_delete else self._service_create include_connection = self._connection_deconfigure if is_delete else self._connection_configure + is_l2 = False + is_int = False + service_l2_id = None + service_int_id = None for service in pathcomp_reply.services: + if service.service_type == ServiceTypeEnum.SERVICETYPE_L2NM: + LOGGER.info(f"----------------> Is L2") + is_l2 = True + service_l2_id = service.service_id + if service.service_type == ServiceTypeEnum.SERVICETYPE_INT: + LOGGER.info(f"----------------> Is INT") + is_int = True + service_int_id = service.service_id include_service(service.service_id) self._add_service_to_executor_cache(service) - for connection in pathcomp_reply.connections: + # connections_to_remove = [] + # if is_l2: + # # Cache existing connection to remove + # for connection in pathcomp_reply.connections: + # connections_to_remove.append(connection) + + if is_l2: + cached_conn = pathcomp_reply.connections[0] + for connection in pathcomp_reply.connections: + ################################################################ + # connection object gets modified only for the L2 service + con_id = connection.connection_id + LOGGER.info(f"++++++++++++++++++++ Removing connection {con_id}") + self._executor.delete_connection(con_id) + ################################################################ + + connection = self.new_l2_connection(cached_conn) + LOGGER.info(f"++++++++++++++++++++ Added new connection {connection.connection_id}") connection_key = include_connection(connection.connection_id, connection.service_id) self._add_connection_to_executor_cache(connection) self._executor.get_service(connection.service_id) for sub_service_id in connection.sub_service_ids: + # connection_key = include_connection(connection.connection_id, connection.service_id) + # self._add_connection_to_executor_cache(connection) + # self._executor.get_service(connection.service_id) + # for sub_service_id in connection.sub_service_ids: _,service_key_done = include_service(sub_service_id) self._executor.get_service(sub_service_id) self._dag.add(connection_key, service_key_done) + if is_int: + connection = self.new_int_connection(service_int_id) + connection_key = include_connection(connection.connection_id, connection.service_id) + self._add_connection_to_executor_cache(connection) + self._executor.get_service(connection.service_id) t1 = time.time() LOGGER.debug('[compose_from_pathcompreply] elapsed_time: {:f} sec'.format(t1-t0)) diff --git a/src/tests/tfs-eco-7-poc1/descriptors/links.json b/src/tests/tfs-eco-7-poc1/descriptors/links.json new file mode 100644 index 0000000000000000000000000000000000000000..01504144033dfbdd1ff290ba1272a24f211dd1a1 --- /dev/null +++ b/src/tests/tfs-eco-7-poc1/descriptors/links.json @@ -0,0 +1,82 @@ +{ + "links": [ + { + "link_id": {"link_uuid": {"uuid": "sw1/sw1-4==client/eth1"}}, "link_type": "LINKTYPE_COPPER", "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "sw1"}}, "endpoint_uuid": {"uuid": "sw1-4"}}, + {"device_id": {"device_uuid": {"uuid": "client"}}, "endpoint_uuid": {"uuid": "eth1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "sw1/sw1-1==sw2/sw2-1"}}, "link_type": "LINKTYPE_COPPER", "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "sw1"}}, "endpoint_uuid": {"uuid": "sw1-1"}}, + {"device_id": {"device_uuid": {"uuid": "sw2"}}, "endpoint_uuid": {"uuid": "sw2-1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "sw1/sw1-2==sw3/sw3-1"}}, "link_type": "LINKTYPE_COPPER", "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "sw1"}}, "endpoint_uuid": {"uuid": "sw1-2"}}, + {"device_id": {"device_uuid": {"uuid": "sw3"}}, "endpoint_uuid": {"uuid": "sw3-1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "sw1/sw1-3==sw4/sw4-1"}}, "link_type": "LINKTYPE_COPPER", "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "sw1"}}, "endpoint_uuid": {"uuid": "sw1-3"}}, + {"device_id": {"device_uuid": {"uuid": "sw4"}}, "endpoint_uuid": {"uuid": "sw4-1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "sw2/sw2-2==sw5/sw5-1"}}, "link_type": "LINKTYPE_COPPER", "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "sw2"}}, "endpoint_uuid": {"uuid": "sw2-2"}}, + {"device_id": {"device_uuid": {"uuid": "sw5"}}, "endpoint_uuid": {"uuid": "sw5-1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "sw3/sw3-2==sw5/sw5-2"}}, "link_type": "LINKTYPE_COPPER", "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "sw3"}}, "endpoint_uuid": {"uuid": "sw3-2"}}, + {"device_id": {"device_uuid": {"uuid": "sw5"}}, "endpoint_uuid": {"uuid": "sw5-2"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "sw4/sw4-2==sw5/sw5-3"}}, "link_type": "LINKTYPE_COPPER", "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "sw4"}}, "endpoint_uuid": {"uuid": "sw4-2"}}, + {"device_id": {"device_uuid": {"uuid": "sw5"}}, "endpoint_uuid": {"uuid": "sw5-3"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "sw5/sw5-4==server/eth1"}}, "link_type": "LINKTYPE_COPPER", "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "sw5"}}, "endpoint_uuid": {"uuid": "sw5-4"}}, + {"device_id": {"device_uuid": {"uuid": "server"}}, "endpoint_uuid": {"uuid": "eth1"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "sw1/sw1-5==tfs-sdn-controller/mgmt"}}, "link_type": "LINKTYPE_MANAGEMENT", "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "sw1"}}, "endpoint_uuid": {"uuid": "sw1-5"}}, + {"device_id": {"device_uuid": {"uuid": "tfs-sdn-controller"}}, "endpoint_uuid": {"uuid": "mgmt"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "sw2/sw2-3==tfs-sdn-controller/mgmt"}}, "link_type": "LINKTYPE_MANAGEMENT", "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "sw2"}}, "endpoint_uuid": {"uuid": "sw2-3"}}, + {"device_id": {"device_uuid": {"uuid": "tfs-sdn-controller"}}, "endpoint_uuid": {"uuid": "mgmt"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "sw3/sw3-3==tfs-sdn-controller/mgmt"}}, "link_type": "LINKTYPE_MANAGEMENT", "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "sw3"}}, "endpoint_uuid": {"uuid": "sw3-3"}}, + {"device_id": {"device_uuid": {"uuid": "tfs-sdn-controller"}}, "endpoint_uuid": {"uuid": "mgmt"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "sw4/sw4-3==tfs-sdn-controller/mgmt"}}, "link_type": "LINKTYPE_MANAGEMENT", "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "sw4"}}, "endpoint_uuid": {"uuid": "sw4-3"}}, + {"device_id": {"device_uuid": {"uuid": "tfs-sdn-controller"}}, "endpoint_uuid": {"uuid": "mgmt"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "sw5/sw5-5==tfs-sdn-controller/mgmt"}}, "link_type": "LINKTYPE_MANAGEMENT", "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "sw5"}}, "endpoint_uuid": {"uuid": "sw5-5"}}, + {"device_id": {"device_uuid": {"uuid": "tfs-sdn-controller"}}, "endpoint_uuid": {"uuid": "mgmt"}} + ] + } + ] +} diff --git a/src/tests/tfs-eco-7-poc1/descriptors/service-int.json b/src/tests/tfs-eco-7-poc1/descriptors/service-int.json new file mode 100644 index 0000000000000000000000000000000000000000..038b8cbeee4a7624bf7b7dcfc7fa9236dbad293e --- /dev/null +++ b/src/tests/tfs-eco-7-poc1/descriptors/service-int.json @@ -0,0 +1,107 @@ +{ + "services": [ + { + "service_id": { + "context_id": {"context_uuid": {"uuid": "admin"}}, + "service_uuid": {"uuid": "p4-service-int"} + }, + "name": "p4-service-int", + "service_type": "SERVICETYPE_INT", + "service_status": {"service_status": "SERVICESTATUS_PLANNED"}, + "service_endpoint_ids": [ + { + "device_id": {"device_uuid": {"uuid": "sw1"}}, + "endpoint_uuid": {"uuid": "sw1-4"} + }, + { + "device_id": {"device_uuid": {"uuid": "sw5"}}, + "endpoint_uuid": {"uuid": "sw5-4"} + } + ], + "service_config": { + "config_rules": [ + { + "action": "CONFIGACTION_SET", + "custom": { + "resource_key": "/settings", + "resource_value": { + "switch_info": [ + { + "sw1": { + "arch": "v1model", + "dpid": 1, + "mac": "ee:ee:8c:6c:f3:2c", + "ip": "192.168.5.139", + "int_port": { + "port_id": 5, + "port_type": "host" + } + } + }, + { + "sw2": { + "arch": "v1model", + "dpid": 2, + "mac": "ee:ee:8c:6c:f3:2c", + "ip": "192.168.5.139", + "int_port": { + "port_id": 3, + "port_type": "host" + } + } + }, + { + "sw3": { + "arch": "v1model", + "dpid": 3, + "mac": "ee:ee:8c:6c:f3:2c", + "ip": "192.168.5.139", + "int_port": { + "port_id": 3, + "port_type": "host" + } + } + }, + { + "sw4": { + "arch": "v1model", + "dpid": 4, + "mac": "ee:ee:8c:6c:f3:2c", + "ip": "192.168.5.139", + "int_port": { + "port_id": 3, + "port_type": "host" + } + } + }, + { + "sw5": { + "arch": "v1model", + "dpid": 5, + "mac": "ee:ee:8c:6c:f3:2c", + "ip": "192.168.5.139", + "int_port": { + "port_id": 5, + "port_type": "host" + } + } + } + ], + "int_collector_info": { + "iface": "eth0", + "mac": "5a:34:50:3d:1a:8b", + "ip": "192.168.5.250", + "port": 12345, + "vlan_id": 4094, + "duration_sec": 10000, + "interval_sec": 1 + } + } + } + } + ] + }, + "service_constraints": [] + } + ] +} diff --git a/src/tests/tfs-eco-7-poc1/tests-service/test_functional_service_deprovision_int.py b/src/tests/tfs-eco-7-poc1/tests-service/test_functional_service_deprovision_int.py new file mode 100644 index 0000000000000000000000000000000000000000..61ec2a71c2240e98c9f430d66f5558916448b6f5 --- /dev/null +++ b/src/tests/tfs-eco-7-poc1/tests-service/test_functional_service_deprovision_int.py @@ -0,0 +1,85 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from common.proto.context_pb2 import ServiceId, ServiceStatusEnum, ServiceTypeEnum +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Service import json_service_id +from context.client.ContextClient import ContextClient +from service.client.ServiceClient import ServiceClient +from tests.Fixtures import context_client, service_client # pylint: disable=unused-import +from tests.tools.test_tools_p4 import ADMIN_CONTEXT_ID, identify_number_of_p4_devices, get_number_of_rules, verify_active_service_type + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +DEV_NB = 8 +P4_DEV_NB = 5 +ACTIVE_P4_DEV_NB = 5 + +INT_RULES = 19 + +def test_service_deletion_int( + context_client : ContextClient, # pylint: disable=redefined-outer-name + service_client : ServiceClient # pylint: disable=redefined-outer-name +) -> None: + # Get the current number of devices + response = context_client.ListDevices(ADMIN_CONTEXT_ID) + LOGGER.warning('Devices[{:d}] = {:s}'.format(len(response.devices), grpc_message_to_json_string(response))) + + # Total devices + dev_nb = len(response.devices) + assert dev_nb == DEV_NB + + # P4 devices + p4_dev_nb = identify_number_of_p4_devices(response.devices) + assert p4_dev_nb == P4_DEV_NB + + # Get the current number of rules in the P4 devices + p4_rules_before_deletion = get_number_of_rules(response.devices) + + # Get the current number of services + response = context_client.ListServices(ADMIN_CONTEXT_ID) + services_nb_before_deletion = len(response.services) + assert verify_active_service_type(response.services, ServiceTypeEnum.SERVICETYPE_INT) + + for service in response.services: + # Ignore services of other types + if service.service_type != ServiceTypeEnum.SERVICETYPE_INT: + continue + + service_id = service.service_id + assert service_id + + service_uuid = service_id.service_uuid.uuid + context_uuid = service_id.context_id.context_uuid.uuid + assert service.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE + + # Delete INT service + service_client.DeleteService(ServiceId(**json_service_id(service_uuid, json_context_id(context_uuid)))) + + # Get an updated view of the services + response = context_client.ListServices(ADMIN_CONTEXT_ID) + services_nb_after_deletion = len(response.services) + # assert services_nb_after_deletion == services_nb_before_deletion - 1, "Exactly one new service must be deleted" + + # Get an updated view of the devices + response = context_client.ListDevices(ADMIN_CONTEXT_ID) + p4_rules_after_deletion = get_number_of_rules(response.devices) + + rules_diff = p4_rules_before_deletion - p4_rules_after_deletion + + assert p4_rules_after_deletion < p4_rules_before_deletion, "INT service must contain some rules" + assert rules_diff == ACTIVE_P4_DEV_NB * INT_RULES, "INT service must contain {} rules per device".format(INT_RULES) diff --git a/src/tests/tfs-eco-7-poc1/tests-service/test_functional_service_deprovision_l2.py b/src/tests/tfs-eco-7-poc1/tests-service/test_functional_service_deprovision_l2.py new file mode 100644 index 0000000000000000000000000000000000000000..5ccbe933da58b4dc6cc40d40665881901de50d7a --- /dev/null +++ b/src/tests/tfs-eco-7-poc1/tests-service/test_functional_service_deprovision_l2.py @@ -0,0 +1,85 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from common.proto.context_pb2 import ServiceId, ServiceStatusEnum, ServiceTypeEnum +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Service import json_service_id +from context.client.ContextClient import ContextClient +from service.client.ServiceClient import ServiceClient +from tests.Fixtures import context_client, service_client # pylint: disable=unused-import +from tests.tools.test_tools_p4 import ADMIN_CONTEXT_ID, identify_number_of_p4_devices, get_number_of_rules, verify_active_service_type + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +DEV_NB = 8 +P4_DEV_NB = 5 +ACTIVE_P4_DEV_NB = 3 + +L2_RULES = 10 + +def test_service_deletion_l2( + context_client : ContextClient, # pylint: disable=redefined-outer-name + service_client : ServiceClient # pylint: disable=redefined-outer-name +) -> None: + # Get the current number of devices + response = context_client.ListDevices(ADMIN_CONTEXT_ID) + LOGGER.warning('Devices[{:d}] = {:s}'.format(len(response.devices), grpc_message_to_json_string(response))) + + # Total devices + dev_nb = len(response.devices) + assert dev_nb == DEV_NB + + # P4 devices + p4_dev_nb = identify_number_of_p4_devices(response.devices) + assert p4_dev_nb == P4_DEV_NB + + # Get the current number of rules in the P4 devices + p4_rules_before_deletion = get_number_of_rules(response.devices) + + # Get the current number of services + response = context_client.ListServices(ADMIN_CONTEXT_ID) + services_nb_before_deletion = len(response.services) + assert verify_active_service_type(response.services, ServiceTypeEnum.SERVICETYPE_L2NM) + + for service in response.services: + # Ignore services of other types + if service.service_type != ServiceTypeEnum.SERVICETYPE_L2NM: + continue + + service_id = service.service_id + assert service_id + + service_uuid = service_id.service_uuid.uuid + context_uuid = service_id.context_id.context_uuid.uuid + assert service.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE + + # Delete L2 service + service_client.DeleteService(ServiceId(**json_service_id(service_uuid, json_context_id(context_uuid)))) + + # Get an updated view of the services + response = context_client.ListServices(ADMIN_CONTEXT_ID) + services_nb_after_deletion = len(response.services) + # assert services_nb_after_deletion == services_nb_before_deletion - 1, "Exactly one service must be deleted" + + # Get an updated view of the devices + response = context_client.ListDevices(ADMIN_CONTEXT_ID) + p4_rules_after_deletion = get_number_of_rules(response.devices) + + rules_diff = p4_rules_before_deletion - p4_rules_after_deletion + + assert p4_rules_after_deletion < p4_rules_before_deletion, "L2 service must contain some rules" + assert rules_diff == ACTIVE_P4_DEV_NB * L2_RULES, "L2 service must contain {} rules per device".format(L2_RULES) diff --git a/src/tests/tfs-eco-7-poc1/tests-service/test_functional_service_provision_int.py b/src/tests/tfs-eco-7-poc1/tests-service/test_functional_service_provision_int.py new file mode 100644 index 0000000000000000000000000000000000000000..8dabe34b93d871537e176102ef76cf2054497a3c --- /dev/null +++ b/src/tests/tfs-eco-7-poc1/tests-service/test_functional_service_provision_int.py @@ -0,0 +1,90 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import logging +from common.proto.context_pb2 import ServiceTypeEnum +from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results +from common.tools.grpc.Tools import grpc_message_to_json_string +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from service.client.ServiceClient import ServiceClient +from tests.Fixtures import context_client, device_client, service_client # pylint: disable=unused-import +from tests.tools.test_tools_p4 import ADMIN_CONTEXT_ID, identify_number_of_p4_devices, get_number_of_rules, verify_active_service_type + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +TEST_PATH = os.path.join( + os.path.dirname(os.path.dirname( + os.path.abspath(__file__) + )) + '/descriptors') +assert os.path.exists(TEST_PATH), "Invalid path to tests" + +DESC_FILE_SERVICE_P4_INT = os.path.join(TEST_PATH, 'service-int.json') +assert os.path.exists(DESC_FILE_SERVICE_P4_INT),\ + "Invalid path to the L2 simple service descriptor" + +DEV_NB = 8 +P4_DEV_NB = 5 +ACTIVE_P4_DEV_NB = 5 + +INT_RULES = 19 + +def test_service_creation_int( + context_client : ContextClient, # pylint: disable=redefined-outer-name + device_client : DeviceClient, # pylint: disable=redefined-outer-name + service_client : ServiceClient # pylint: disable=redefined-outer-name +) -> None: + # Get the current number of services + response = context_client.ListServices(ADMIN_CONTEXT_ID) + services_nb_before = len(response.services) + + # Get the current number of devices + response = context_client.ListDevices(ADMIN_CONTEXT_ID) + LOGGER.warning('Devices[{:d}] = {:s}'.format(len(response.devices), grpc_message_to_json_string(response))) + + # Total devices + dev_nb = len(response.devices) + assert dev_nb == DEV_NB + + # P4 devices + p4_dev_nb = identify_number_of_p4_devices(response.devices) + assert p4_dev_nb == P4_DEV_NB + + # Get the current number of rules in the P4 devices + p4_rules_before = get_number_of_rules(response.devices) + + # Load service + descriptor_loader = DescriptorLoader( + descriptors_file=DESC_FILE_SERVICE_P4_INT, + context_client=context_client, device_client=device_client, service_client=service_client + ) + results = descriptor_loader.process() + check_descriptor_load_results(results, descriptor_loader) + + # Get an updated view of the services + response = context_client.ListServices(ADMIN_CONTEXT_ID) + services_nb_after = len(response.services) + # assert services_nb_after == services_nb_before + 1, "Exactly one new service must be in place" + assert verify_active_service_type(response.services, ServiceTypeEnum.SERVICETYPE_INT) + + # Get an updated view of the devices + response = context_client.ListDevices(ADMIN_CONTEXT_ID) + p4_rules_after = get_number_of_rules(response.devices) + + rules_diff = p4_rules_after - p4_rules_before + + assert p4_rules_after > p4_rules_before, "INT service must install some rules" + assert rules_diff == ACTIVE_P4_DEV_NB * INT_RULES, "INT service must install {} rules per device".format(INT_RULES) diff --git a/src/tests/tfs-eco-7-poc1/tests-service/test_functional_service_provision_l2.py b/src/tests/tfs-eco-7-poc1/tests-service/test_functional_service_provision_l2.py new file mode 100644 index 0000000000000000000000000000000000000000..39c4dde085b5207138c9010be2739e650c183d40 --- /dev/null +++ b/src/tests/tfs-eco-7-poc1/tests-service/test_functional_service_provision_l2.py @@ -0,0 +1,90 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import logging +from common.proto.context_pb2 import ServiceStatusEnum, ServiceTypeEnum +from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results +from common.tools.grpc.Tools import grpc_message_to_json_string +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from service.client.ServiceClient import ServiceClient +from tests.Fixtures import context_client, device_client, service_client # pylint: disable=unused-import +from tests.tools.test_tools_p4 import ADMIN_CONTEXT_ID, identify_number_of_p4_devices, get_number_of_rules, verify_active_service_type + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +DEV_NB = 8 +P4_DEV_NB = 5 +ACTIVE_P4_DEV_NB = 3 + +L2_RULES = 10 + +TEST_PATH = os.path.join( + os.path.dirname(os.path.dirname( + os.path.abspath(__file__) + )) + '/descriptors') +assert os.path.exists(TEST_PATH), "Invalid path to tests" + +DESC_FILE_SERVICE_P4_L2_SIMPLE = os.path.join(TEST_PATH, 'service-l2-simple.json') +assert os.path.exists(DESC_FILE_SERVICE_P4_L2_SIMPLE),\ + "Invalid path to the L2 simple service descriptor" + +def test_service_creation_l2( + context_client : ContextClient, # pylint: disable=redefined-outer-name + device_client : DeviceClient, # pylint: disable=redefined-outer-name + service_client : ServiceClient # pylint: disable=redefined-outer-name +) -> None: + # Get the current number of services + response = context_client.ListServices(ADMIN_CONTEXT_ID) + services_nb_before = len(response.services) + + # Get the current number of devices + response = context_client.ListDevices(ADMIN_CONTEXT_ID) + LOGGER.warning('Devices[{:d}] = {:s}'.format(len(response.devices), grpc_message_to_json_string(response))) + + # Total devices + dev_nb = len(response.devices) + assert dev_nb == DEV_NB + + # P4 devices + p4_dev_nb = identify_number_of_p4_devices(response.devices) + assert p4_dev_nb == P4_DEV_NB + + # Get the current number of rules in the P4 devices + p4_rules_before = get_number_of_rules(response.devices) + + # Load service + descriptor_loader = DescriptorLoader( + descriptors_file=DESC_FILE_SERVICE_P4_L2_SIMPLE, + context_client=context_client, device_client=device_client, service_client=service_client + ) + results = descriptor_loader.process() + check_descriptor_load_results(results, descriptor_loader) + + # Get an updated view of the services + response = context_client.ListServices(ADMIN_CONTEXT_ID) + services_nb_after = len(response.services) + # assert services_nb_after == services_nb_before + 1, "Exactly one new service must be in place" + assert verify_active_service_type(response.services, ServiceTypeEnum.SERVICETYPE_L2NM) + + # Get an updated view of the devices + response = context_client.ListDevices(ADMIN_CONTEXT_ID) + p4_rules_after = get_number_of_rules(response.devices) + + rules_diff = p4_rules_after - p4_rules_before + + assert p4_rules_after > p4_rules_before, "L2 service must install some rules" + assert rules_diff == ACTIVE_P4_DEV_NB * L2_RULES, "L2 service must install {} rules per device".format(L2_RULES) diff --git a/src/tests/tfs-eco-7-poc1/tests-setup/test_functional_purge.py b/src/tests/tfs-eco-7-poc1/tests-setup/test_functional_purge.py new file mode 100644 index 0000000000000000000000000000000000000000..c721ae46001a3d997225e53980249ed71e884a2a --- /dev/null +++ b/src/tests/tfs-eco-7-poc1/tests-setup/test_functional_purge.py @@ -0,0 +1,83 @@ +# Copyright 2022-2024 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from common.proto.context_pb2 import ServiceId, DeviceId, LinkId, ServiceStatusEnum +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Device import json_device_id +from common.tools.object_factory.Service import json_service_id +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from service.client.ServiceClient import ServiceClient +from tests.Fixtures import context_client, device_client, service_client # pylint: disable=unused-import +from tests.tools.test_tools_p4 import ADMIN_CONTEXT_ID + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +def test_clean_services( + context_client : ContextClient, # pylint: disable=redefined-outer-name + service_client : ServiceClient # pylint: disable=redefined-outer-name +) -> None: + response = context_client.ListServices(ADMIN_CONTEXT_ID) + LOGGER.warning('Services[{:d}] = {:s}'.format(len(response.services), grpc_message_to_json_string(response))) + + for service in response.services: + service_id = service.service_id + assert service_id + + service_uuid = service_id.service_uuid.uuid + context_uuid = service_id.context_id.context_uuid.uuid + # assert service.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_ACTIVE + + # Delete service + service_client.DeleteService(ServiceId(**json_service_id(service_uuid, json_context_id(context_uuid)))) + +def test_clean_links( + context_client : ContextClient, # pylint: disable=redefined-outer-name +) -> None: + response = context_client.ListLinks(ADMIN_CONTEXT_ID) + + for link in response.links: + link_id = link.link_id + + # Delete link + context_client.RemoveLink(LinkId(**link_id)) + +def test_clean_devices( + context_client : ContextClient, # pylint: disable=redefined-outer-name + device_client : DeviceClient # pylint: disable=redefined-outer-name +) -> None: + response = context_client.ListDevices(ADMIN_CONTEXT_ID) + LOGGER.warning('Devices[{:d}] = {:s}'.format(len(response.devices), grpc_message_to_json_string(response))) + + for device in response.devices: + device_uuid = device.device_id.device_uuid.uuid + device_json = json_device_id(device_uuid) + + # Delete device + device_client.DeleteDevice(DeviceId(**device_json)) + +def test_clean_context( + context_client : ContextClient # pylint: disable=redefined-outer-name +) -> None: + # Verify the scenario has no services/slices + response = context_client.ListTopologies(ADMIN_CONTEXT_ID) + + for topology in response.topologies: + topology_id = topology.topology_id + response = context_client.RemoveTopology(topology_id) + + response = context_client.RemoveContext(ADMIN_CONTEXT_ID)