From a76319d6d0f54bc5c3c6aeb6138c9493dc7c6c03 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 23 Jun 2023 16:32:18 +0000 Subject: [PATCH 01/23] OECC/PSC'22: - Improved descriptors - Corrected dump_logs script --- ...ain-service.json => inter-domain-slice.json} | 17 ++++++++--------- src/tests/oeccpsc22/dump_logs.sh | 4 ---- 2 files changed, 8 insertions(+), 13 deletions(-) rename src/tests/oeccpsc22/descriptors/{inter-domain-service.json => inter-domain-slice.json} (79%) diff --git a/src/tests/oeccpsc22/descriptors/inter-domain-service.json b/src/tests/oeccpsc22/descriptors/inter-domain-slice.json similarity index 79% rename from src/tests/oeccpsc22/descriptors/inter-domain-service.json rename to src/tests/oeccpsc22/descriptors/inter-domain-slice.json index 4b53c433a..b1d71858f 100644 --- a/src/tests/oeccpsc22/descriptors/inter-domain-service.json +++ b/src/tests/oeccpsc22/descriptors/inter-domain-slice.json @@ -1,18 +1,17 @@ { - "services": [ + "slices": [ { - "service_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "service_uuid": {"uuid": "idc-l2-svc"}}, - "service_type": 2, - "service_status": {"service_status": 1}, - "service_endpoint_ids": [ - {"device_id":{"device_uuid":{"uuid":"DC1"}},"endpoint_uuid":{"uuid":"int"}}, - {"device_id":{"device_uuid":{"uuid":"DC2"}},"endpoint_uuid":{"uuid":"int"}} + "slice_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "slice_uuid": {"uuid": "idc-l2-slice"}}, + "slice_status": {"slice_status": 1}, + "slice_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "DC1"}}, "endpoint_uuid": {"uuid": "int"}}, + {"device_id": {"device_uuid": {"uuid": "DC2"}}, "endpoint_uuid": {"uuid": "int"}} ], - "service_constraints": [ + "slice_constraints": [ {"sla_capacity": {"capacity_gbps": 10.0}}, {"sla_latency": {"e2e_latency_ms": 15.2}} ], - "service_config": {"config_rules": [ + "slice_config": {"config_rules": [ {"action": 1, "custom": {"resource_key": "/settings", "resource_value": {"mtu": 1512, "vlan_id": 300}}}, {"action": 1, "custom": {"resource_key": "/device[R1@D1]/endpoint[2]/settings", "resource_value": {"remote_router": "10.0.0.2"}}}, {"action": 1, "custom": {"resource_key": "/device[R1@D1]/endpoint[5]/settings", "resource_value": {"remote_router": "10.0.0.5"}}}, diff --git a/src/tests/oeccpsc22/dump_logs.sh b/src/tests/oeccpsc22/dump_logs.sh index 3a2e51a56..ae02646aa 100755 --- a/src/tests/oeccpsc22/dump_logs.sh +++ b/src/tests/oeccpsc22/dump_logs.sh @@ -24,9 +24,7 @@ kubectl --namespace tfs-dom1 logs deployments/serviceservice server > tmp/exec/d kubectl --namespace tfs-dom1 logs deployments/pathcompservice frontend > tmp/exec/dom1/pathcomp-frontend.log kubectl --namespace tfs-dom1 logs deployments/pathcompservice backend > tmp/exec/dom1/pathcomp-backend.log kubectl --namespace tfs-dom1 logs deployments/sliceservice server > tmp/exec/dom1/slice.log -kubectl --namespace tfs-dom1 logs deployment/computeservice server > tmp/exec/dom1/compute.log kubectl --namespace tfs-dom1 logs deployment/interdomainservice server > tmp/exec/dom1/interdomain.log -kubectl --namespace tfs-dom1 logs deployment/monitoringservice server > tmp/exec/dom1/monitoring.log printf "\n" echo "Collecting logs for Domain 2..." @@ -37,9 +35,7 @@ kubectl --namespace tfs-dom2 logs deployments/serviceservice server > tmp/exec/d kubectl --namespace tfs-dom2 logs deployments/pathcompservice frontend > tmp/exec/dom2/pathcomp-frontend.log kubectl --namespace tfs-dom2 logs deployments/pathcompservice backend > tmp/exec/dom2/pathcomp-backend.log kubectl --namespace tfs-dom2 logs deployments/sliceservice server > tmp/exec/dom2/slice.log -kubectl --namespace tfs-dom2 logs deployment/computeservice server > tmp/exec/dom2/compute.log kubectl --namespace tfs-dom2 logs deployment/interdomainservice server > tmp/exec/dom2/interdomain.log -kubectl --namespace tfs-dom2 logs deployment/monitoringservice server > tmp/exec/dom2/monitoring.log printf "\n" echo "Done!" -- GitLab From 7137a5408bf0e81067b715fe1b8168204a61e9b9 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 23 Jun 2023 16:34:10 +0000 Subject: [PATCH 02/23] Common: - Added dockerignore file to reduce size of filesystem context sent to docker build --- .dockerignore | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 .dockerignore diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..2e001ed43 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,17 @@ +# Avoid including these folders when building the components +.git/ +.gitlab/ +.vscode/ +coverage/ +data/ +deploy/ +manifests/ +hackfest/ +scripts/ +tmp/ + +ecoc22/ +nfvsdn22/ +oeccpsc22/ +ofc22/ +ofc23/ -- GitLab From ecf1f4ccf58dc6f9fead8b096688eb0001814a4e Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 23 Jun 2023 16:36:09 +0000 Subject: [PATCH 03/23] Interdomain component: - Added filter to skip undesired device events - Corrected close of DLT connector client --- src/interdomain/service/RemoteDomainClients.py | 1 + .../service/topology_abstractor/TopologyAbstractor.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/interdomain/service/RemoteDomainClients.py b/src/interdomain/service/RemoteDomainClients.py index e28176ef4..d60450a18 100644 --- a/src/interdomain/service/RemoteDomainClients.py +++ b/src/interdomain/service/RemoteDomainClients.py @@ -69,6 +69,7 @@ class RemoteDomainClients(threading.Thread): if not isinstance(event, DeviceEvent): continue LOGGER.info('Processing Event({:s})...'.format(grpc_message_to_json_string(event))) domain_data = get_domain_data(self.context_client, event) + if domain_data is None: continue domain_name, domain_address, domain_port = domain_data try: self.add_peer(domain_name, domain_address, domain_port) diff --git a/src/interdomain/service/topology_abstractor/TopologyAbstractor.py b/src/interdomain/service/topology_abstractor/TopologyAbstractor.py index 20b186f30..40b40ac66 100644 --- a/src/interdomain/service/topology_abstractor/TopologyAbstractor.py +++ b/src/interdomain/service/topology_abstractor/TopologyAbstractor.py @@ -295,4 +295,4 @@ class TopologyAbstractor(threading.Thread): LOGGER.warning('Unsupported Event({:s})'.format(grpc_message_to_json_string(event))) dlt_record_sender.commit() - dlt_connector_client.close() + if dlt_connector_client is not None: dlt_connector_client.close() -- GitLab From 53dfe82885dc853589db1a16160e1333954ae2a2 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 23 Jun 2023 17:27:37 +0000 Subject: [PATCH 04/23] Manifests: - Adapted for the OECC/PSC tests (to be rolled back) --- manifests/contextservice.yaml | 48 +++++++++++++-------------- manifests/deviceservice.yaml | 4 +-- manifests/interdomainservice.yaml | 4 +-- manifests/pathcompservice.yaml | 54 +++++++++++++++---------------- manifests/serviceservice.yaml | 48 +++++++++++++-------------- manifests/sliceservice.yaml | 48 +++++++++++++-------------- manifests/webuiservice.yaml | 10 +++--- 7 files changed, 108 insertions(+), 108 deletions(-) diff --git a/manifests/contextservice.yaml b/manifests/contextservice.yaml index 96735bf5f..db592cffa 100644 --- a/manifests/contextservice.yaml +++ b/manifests/contextservice.yaml @@ -40,7 +40,7 @@ spec: - name: MB_BACKEND value: "nats" - name: LOG_LEVEL - value: "INFO" + value: "DEBUG" envFrom: - secretRef: name: crdb-data @@ -54,7 +54,7 @@ spec: command: ["/bin/grpc_health_probe", "-addr=:1010"] resources: requests: - cpu: 250m + cpu: 125m memory: 128Mi limits: cpu: 1000m @@ -79,25 +79,25 @@ spec: protocol: TCP port: 9192 targetPort: 9192 ---- -apiVersion: autoscaling/v2 -kind: HorizontalPodAutoscaler -metadata: - name: contextservice-hpa -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: contextservice - minReplicas: 1 - maxReplicas: 20 - metrics: - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: 80 - #behavior: - # scaleDown: - # stabilizationWindowSeconds: 30 +#--- +#apiVersion: autoscaling/v2 +#kind: HorizontalPodAutoscaler +#metadata: +# name: contextservice-hpa +#spec: +# scaleTargetRef: +# apiVersion: apps/v1 +# kind: Deployment +# name: contextservice +# minReplicas: 1 +# maxReplicas: 20 +# metrics: +# - type: Resource +# resource: +# name: cpu +# target: +# type: Utilization +# averageUtilization: 80 +# #behavior: +# # scaleDown: +# # stabilizationWindowSeconds: 30 diff --git a/manifests/deviceservice.yaml b/manifests/deviceservice.yaml index 22c0f5f9d..530275acb 100644 --- a/manifests/deviceservice.yaml +++ b/manifests/deviceservice.yaml @@ -39,7 +39,7 @@ spec: - containerPort: 9192 env: - name: LOG_LEVEL - value: "INFO" + value: "DEBUG" readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:2020"] @@ -48,7 +48,7 @@ spec: command: ["/bin/grpc_health_probe", "-addr=:2020"] resources: requests: - cpu: 250m + cpu: 125m memory: 128Mi limits: cpu: 1000m diff --git a/manifests/interdomainservice.yaml b/manifests/interdomainservice.yaml index 067f94327..378daef88 100644 --- a/manifests/interdomainservice.yaml +++ b/manifests/interdomainservice.yaml @@ -35,7 +35,7 @@ spec: - containerPort: 9192 env: - name: LOG_LEVEL - value: "INFO" + value: "DEBUG" readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:10010"] @@ -44,7 +44,7 @@ spec: command: ["/bin/grpc_health_probe", "-addr=:10010"] resources: requests: - cpu: 250m + cpu: 125m memory: 64Mi limits: cpu: 1000m diff --git a/manifests/pathcompservice.yaml b/manifests/pathcompservice.yaml index 3ba12750b..a1ff81fba 100644 --- a/manifests/pathcompservice.yaml +++ b/manifests/pathcompservice.yaml @@ -36,7 +36,7 @@ spec: - containerPort: 9192 env: - name: LOG_LEVEL - value: "INFO" + value: "DEBUG" readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:10020"] @@ -45,11 +45,11 @@ spec: command: ["/bin/grpc_health_probe", "-addr=:10020"] resources: requests: - cpu: 50m - memory: 64Mi + cpu: 125m + memory: 128Mi limits: - cpu: 500m - memory: 512Mi + cpu: 1000m + memory: 1024Mi - name: backend image: labs.etsi.org:5050/tfs/controller/pathcomp-backend:latest imagePullPolicy: Always @@ -98,25 +98,25 @@ spec: protocol: TCP port: 9192 targetPort: 9192 ---- -apiVersion: autoscaling/v2 -kind: HorizontalPodAutoscaler -metadata: - name: pathcompservice-hpa -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: pathcompservice - minReplicas: 1 - maxReplicas: 20 - metrics: - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: 80 - #behavior: - # scaleDown: - # stabilizationWindowSeconds: 30 +#--- +#apiVersion: autoscaling/v2 +#kind: HorizontalPodAutoscaler +#metadata: +# name: pathcompservice-hpa +#spec: +# scaleTargetRef: +# apiVersion: apps/v1 +# kind: Deployment +# name: pathcompservice +# minReplicas: 1 +# maxReplicas: 20 +# metrics: +# - type: Resource +# resource: +# name: cpu +# target: +# type: Utilization +# averageUtilization: 80 +# #behavior: +# # scaleDown: +# # stabilizationWindowSeconds: 30 diff --git a/manifests/serviceservice.yaml b/manifests/serviceservice.yaml index 7d7bdaa4e..26cce6a77 100644 --- a/manifests/serviceservice.yaml +++ b/manifests/serviceservice.yaml @@ -36,7 +36,7 @@ spec: - containerPort: 9192 env: - name: LOG_LEVEL - value: "INFO" + value: "DEBUG" readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:3030"] @@ -45,7 +45,7 @@ spec: command: ["/bin/grpc_health_probe", "-addr=:3030"] resources: requests: - cpu: 250m + cpu: 125m memory: 128Mi limits: cpu: 1000m @@ -70,25 +70,25 @@ spec: protocol: TCP port: 9192 targetPort: 9192 ---- -apiVersion: autoscaling/v2 -kind: HorizontalPodAutoscaler -metadata: - name: serviceservice-hpa -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: serviceservice - minReplicas: 1 - maxReplicas: 20 - metrics: - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: 80 - #behavior: - # scaleDown: - # stabilizationWindowSeconds: 30 +#--- +#apiVersion: autoscaling/v2 +#kind: HorizontalPodAutoscaler +#metadata: +# name: serviceservice-hpa +#spec: +# scaleTargetRef: +# apiVersion: apps/v1 +# kind: Deployment +# name: serviceservice +# minReplicas: 1 +# maxReplicas: 20 +# metrics: +# - type: Resource +# resource: +# name: cpu +# target: +# type: Utilization +# averageUtilization: 80 +# #behavior: +# # scaleDown: +# # stabilizationWindowSeconds: 30 diff --git a/manifests/sliceservice.yaml b/manifests/sliceservice.yaml index e7e5c1604..8abce1e1e 100644 --- a/manifests/sliceservice.yaml +++ b/manifests/sliceservice.yaml @@ -36,7 +36,7 @@ spec: - containerPort: 9192 env: - name: LOG_LEVEL - value: "INFO" + value: "DEBUG" - name: SLICE_GROUPING value: "DISABLE" envFrom: @@ -50,7 +50,7 @@ spec: command: ["/bin/grpc_health_probe", "-addr=:4040"] resources: requests: - cpu: 250m + cpu: 125m memory: 128Mi limits: cpu: 1000m @@ -75,25 +75,25 @@ spec: protocol: TCP port: 9192 targetPort: 9192 ---- -apiVersion: autoscaling/v2 -kind: HorizontalPodAutoscaler -metadata: - name: sliceservice-hpa -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: sliceservice - minReplicas: 1 - maxReplicas: 20 - metrics: - - type: Resource - resource: - name: cpu - target: - type: Utilization - averageUtilization: 80 - #behavior: - # scaleDown: - # stabilizationWindowSeconds: 30 +#--- +#apiVersion: autoscaling/v2 +#kind: HorizontalPodAutoscaler +#metadata: +# name: sliceservice-hpa +#spec: +# scaleTargetRef: +# apiVersion: apps/v1 +# kind: Deployment +# name: sliceservice +# minReplicas: 1 +# maxReplicas: 20 +# metrics: +# - type: Resource +# resource: +# name: cpu +# target: +# type: Utilization +# averageUtilization: 80 +# #behavior: +# # scaleDown: +# # stabilizationWindowSeconds: 30 diff --git a/manifests/webuiservice.yaml b/manifests/webuiservice.yaml index b6ddfc0a9..a061f8e13 100644 --- a/manifests/webuiservice.yaml +++ b/manifests/webuiservice.yaml @@ -39,7 +39,7 @@ spec: - containerPort: 8004 env: - name: LOG_LEVEL - value: "INFO" + value: "DEBUG" - name: WEBUISERVICE_SERVICE_BASEURL_HTTP value: "/webui/" readinessProbe: @@ -56,11 +56,11 @@ spec: timeoutSeconds: 1 resources: requests: - cpu: 50m - memory: 64Mi + cpu: 125m + memory: 128Mi limits: - cpu: 500m - memory: 512Mi + cpu: 1000m + memory: 1024Mi - name: grafana image: grafana/grafana:8.5.22 imagePullPolicy: IfNotPresent -- GitLab From 1a46ee953dccf301deaef71d67973d5ea981c9c5 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 23 Jun 2023 17:28:36 +0000 Subject: [PATCH 05/23] Slice component: - Corrected method used to detect inter-domain slices - Added delete inter-domain Slice --- src/slice/service/SliceServiceServicerImpl.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/slice/service/SliceServiceServicerImpl.py b/src/slice/service/SliceServiceServicerImpl.py index f91c55e28..e87ee0f97 100644 --- a/src/slice/service/SliceServiceServicerImpl.py +++ b/src/slice/service/SliceServiceServicerImpl.py @@ -18,7 +18,7 @@ from common.proto.context_pb2 import ( Empty, Service, ServiceId, ServiceStatusEnum, ServiceTypeEnum, Slice, SliceId, SliceStatusEnum) from common.proto.slice_pb2_grpc import SliceServiceServicer from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method -from common.tools.context_queries.InterDomain import is_multi_domain +from common.tools.context_queries.InterDomain import is_inter_domain #, is_multi_domain from common.tools.context_queries.Slice import get_slice_by_id from common.tools.grpc.ConfigRules import copy_config_rules from common.tools.grpc.Constraints import copy_constraints @@ -74,7 +74,7 @@ class SliceServiceServicerImpl(SliceServiceServicer): #changes = deepdiff.DeepDiff(json_current_slice, json_updated_slice) #LOGGER.info('changes = {:s}'.format(str(changes))) - if is_multi_domain(context_client, slice_with_uuids.slice_endpoint_ids): + if is_inter_domain(context_client, slice_with_uuids.slice_endpoint_ids): interdomain_client = InterdomainClient() slice_id = interdomain_client.RequestSlice(slice_with_uuids) slice_ = context_client.GetSlice(slice_id) @@ -203,10 +203,11 @@ class SliceServiceServicerImpl(SliceServiceServicer): context_client.close() return Empty() - if is_multi_domain(context_client, _slice.slice_endpoint_ids): - #interdomain_client = InterdomainClient() - #slice_id = interdomain_client.DeleteSlice(request) - raise NotImplementedError('Delete inter-domain slice') + if is_inter_domain(context_client, _slice.slice_endpoint_ids): + interdomain_client = InterdomainClient() + slice_id = interdomain_client.DeleteSlice(request) + #raise NotImplementedError('Delete inter-domain slice') + interdomain_client.close() else: current_slice = Slice() current_slice.CopyFrom(_slice) -- GitLab From 3afe3f8bebd6a1d42a91bb25a176c170c109738c Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 23 Jun 2023 17:28:58 +0000 Subject: [PATCH 06/23] Proto: - Added to Interdomain method to delete slice --- proto/interdomain.proto | 1 + 1 file changed, 1 insertion(+) diff --git a/proto/interdomain.proto b/proto/interdomain.proto index 3e44fb447..ca6a64b19 100644 --- a/proto/interdomain.proto +++ b/proto/interdomain.proto @@ -25,4 +25,5 @@ service InterdomainService { rpc CreateSliceAndAddToCatalog(context.Slice ) returns (context.Slice ) {} rpc OrderSliceWithSLA (context.Slice) returns (context.SliceId) {} // If slice with SLA already exists, returns slice. If not, it creates it. rpc UpdateSlice (context.Slice ) returns (context.Slice ) {} + rpc DeleteSlice (context.SliceId ) returns (context.Empty ) {} } -- GitLab From 10b0930e7120e936e60cc141d00f73f071e39d84 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 23 Jun 2023 17:29:51 +0000 Subject: [PATCH 07/23] Interdomain component: - Added to client method to delete slice - Corrected computation of slice owner --- src/interdomain/client/InterdomainClient.py | 37 ++++++++++++++++----- src/interdomain/service/Tools.py | 26 +++++++++------ 2 files changed, 44 insertions(+), 19 deletions(-) diff --git a/src/interdomain/client/InterdomainClient.py b/src/interdomain/client/InterdomainClient.py index f5631de61..ade3ef207 100644 --- a/src/interdomain/client/InterdomainClient.py +++ b/src/interdomain/client/InterdomainClient.py @@ -15,7 +15,7 @@ import grpc, logging from common.Constants import ServiceNameEnum from common.Settings import get_service_host, get_service_port_grpc -from common.proto.context_pb2 import AuthenticationResult, Slice, SliceId, SliceStatus, TeraFlowController +from common.proto.context_pb2 import AuthenticationResult, Empty, Slice, SliceId, SliceStatus, TeraFlowController from common.proto.interdomain_pb2_grpc import InterdomainServiceStub from common.tools.client.RetryDecorator import retry, delay_exponential from common.tools.grpc.Tools import grpc_message_to_json_string @@ -45,13 +45,6 @@ class InterdomainClient: self.channel = None self.stub = None - @RETRY_DECORATOR - def RequestSlice(self, request : Slice) -> SliceId: - LOGGER.debug('RequestSlice request: {:s}'.format(grpc_message_to_json_string(request))) - response = self.stub.RequestSlice(request) - LOGGER.debug('RequestSlice result: {:s}'.format(grpc_message_to_json_string(response))) - return response - @RETRY_DECORATOR def Authenticate(self, request : TeraFlowController) -> AuthenticationResult: LOGGER.debug('Authenticate request: {:s}'.format(grpc_message_to_json_string(request))) @@ -59,6 +52,13 @@ class InterdomainClient: LOGGER.debug('Authenticate result: {:s}'.format(grpc_message_to_json_string(response))) return response + @RETRY_DECORATOR + def RequestSlice(self, request : Slice) -> SliceId: + LOGGER.debug('RequestSlice request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.RequestSlice(request) + LOGGER.debug('RequestSlice result: {:s}'.format(grpc_message_to_json_string(response))) + return response + @RETRY_DECORATOR def LookUpSlice(self, request : Slice) -> SliceId: LOGGER.debug('LookUpSlice request: {:s}'.format(grpc_message_to_json_string(request))) @@ -79,3 +79,24 @@ class InterdomainClient: response = self.stub.CreateSliceAndAddToCatalog(request) LOGGER.debug('CreateSliceAndAddToCatalog result: {:s}'.format(grpc_message_to_json_string(response))) return response + + @RETRY_DECORATOR + def OrderSliceWithSLA(self, request : Slice) -> SliceId: + LOGGER.debug('OrderSliceWithSLA request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.OrderSliceWithSLA(request) + LOGGER.debug('OrderSliceWithSLA result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def UpdateSlice(self, request : Slice) -> Slice: + LOGGER.debug('UpdateSlice request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.UpdateSlice(request) + LOGGER.debug('UpdateSlice result: {:s}'.format(grpc_message_to_json_string(response))) + return response + + @RETRY_DECORATOR + def DeleteSlice(self, request : SliceId) -> Empty: + LOGGER.debug('DeleteSlice request: {:s}'.format(grpc_message_to_json_string(request))) + response = self.stub.DeleteSlice(request) + LOGGER.debug('DeleteSlice result: {:s}'.format(grpc_message_to_json_string(response))) + return response diff --git a/src/interdomain/service/Tools.py b/src/interdomain/service/Tools.py index 609dc6e07..94db60ed2 100644 --- a/src/interdomain/service/Tools.py +++ b/src/interdomain/service/Tools.py @@ -32,19 +32,23 @@ def compute_slice_owner( ) -> Optional[str]: traversed_domain_uuids = {traversed_domain[0] for traversed_domain in traversed_domains} - existing_topology_ids = context_client.ListTopologyIds(ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))) - existing_topology_uuids = { - topology_id.topology_uuid.uuid for topology_id in existing_topology_ids.topology_ids - } - existing_topology_uuids.discard(DEFAULT_TOPOLOGY_NAME) - existing_topology_uuids.discard(INTERDOMAIN_TOPOLOGY_NAME) - - candidate_owner_uuids = traversed_domain_uuids.intersection(existing_topology_uuids) + existing_topologies = context_client.ListTopologies(ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))) + existing_topology_uuids_names = set() + DISCARD_TOPOLOGY_NAMES = {DEFAULT_TOPOLOGY_NAME, INTERDOMAIN_TOPOLOGY_NAME} + for topology in existing_topologies.topologies: + topology_uuid = topology.topology_id.topology_uuid.uuid + if topology_uuid in DISCARD_TOPOLOGY_NAMES: continue + topology_name = topology.name + if topology_name in DISCARD_TOPOLOGY_NAMES: continue + existing_topology_uuids_names.add(topology_uuid) + existing_topology_uuids_names.add(topology_name) + + candidate_owner_uuids = traversed_domain_uuids.intersection(existing_topology_uuids_names) if len(candidate_owner_uuids) != 1: data = { - 'traversed_domain_uuids' : [td_uuid for td_uuid in traversed_domain_uuids ], - 'existing_topology_uuids': [et_uuid for et_uuid in existing_topology_uuids], - 'candidate_owner_uuids' : [co_uuid for co_uuid in candidate_owner_uuids ], + 'traversed_domain_uuids' : [td_uuid for td_uuid in traversed_domain_uuids ], + 'existing_topology_uuids_names': [et_uuid for et_uuid in existing_topology_uuids_names], + 'candidate_owner_uuids' : [co_uuid for co_uuid in candidate_owner_uuids ], } LOGGER.warning('Unable to identify slice owner: {:s}'.format(json.dumps(data))) return None -- GitLab From cc2a93dbf95cc1b25052577283821a2cb71523f5 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 23 Jun 2023 17:31:41 +0000 Subject: [PATCH 08/23] PathComp component - Frontend: - Deactivated use of interdomain topology - Tuned devicetype NETWORK to always require creation of a sub-service --- .../service/PathCompServiceServicerImpl.py | 22 ++++++++++--------- .../algorithms/tools/ResourceGroups.py | 2 +- .../service/algorithms/tools/ServiceTypes.py | 5 +++++ 3 files changed, 18 insertions(+), 11 deletions(-) diff --git a/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py b/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py index 52f1cd3d5..784a09e32 100644 --- a/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py +++ b/src/pathcomp/frontend/service/PathCompServiceServicerImpl.py @@ -46,16 +46,18 @@ class PathCompServiceServicerImpl(PathCompServiceServicer): context_client = ContextClient() context_id = json_context_id(DEFAULT_CONTEXT_NAME) - if (len(request.services) == 1) and is_inter_domain(context_client, request.services[0].service_endpoint_ids): - #devices = get_devices_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_NAME) - #links = get_links_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_NAME) - topology_id = json_topology_id(INTERDOMAIN_TOPOLOGY_NAME, context_id) - else: - # TODO: improve filtering of devices and links - # TODO: add contexts, topologies, and membership of devices/links in topologies - #devices = context_client.ListDevices(Empty()) - #links = context_client.ListLinks(Empty()) - topology_id = json_topology_id(DEFAULT_TOPOLOGY_NAME, context_id) + # TODO: improve definition of topologies; for interdomain the current topology design might be not convenient + #if (len(request.services) == 1) and is_inter_domain(context_client, request.services[0].service_endpoint_ids): + # #devices = get_devices_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_NAME) + # #links = get_links_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_NAME) + # topology_id = json_topology_id(INTERDOMAIN_TOPOLOGY_NAME, context_id) + #else: + # # TODO: improve filtering of devices and links + # # TODO: add contexts, topologies, and membership of devices/links in topologies + # #devices = context_client.ListDevices(Empty()) + # #links = context_client.ListLinks(Empty()) + # topology_id = json_topology_id(DEFAULT_TOPOLOGY_NAME, context_id) + topology_id = json_topology_id(DEFAULT_TOPOLOGY_NAME, context_id) topology_details = context_client.GetTopologyDetails(TopologyId(**topology_id)) diff --git a/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py b/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py index c1591dbeb..6f723009c 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py +++ b/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py @@ -22,7 +22,6 @@ from common.tools.grpc.Tools import grpc_message_to_json_string DEVICE_TYPE_TO_DEEPNESS = { DeviceTypeEnum.EMULATED_DATACENTER.value : 90, DeviceTypeEnum.DATACENTER.value : 90, - DeviceTypeEnum.NETWORK.value : 90, DeviceTypeEnum.TERAFLOWSDN_CONTROLLER.value : 80, DeviceTypeEnum.EMULATED_PACKET_ROUTER.value : 70, @@ -50,6 +49,7 @@ DEVICE_TYPE_TO_DEEPNESS = { DeviceTypeEnum.OPTICAL_ROADM.value : 10, DeviceTypeEnum.EMULATED_OPTICAL_SPLITTER.value : 0, + DeviceTypeEnum.NETWORK.value : 0, # network out of our control; always delegate } IGNORED_DEVICE_TYPES = {DeviceTypeEnum.EMULATED_OPTICAL_SPLITTER} diff --git a/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py b/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py index 463b8039b..73a741ae5 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py +++ b/src/pathcomp/frontend/service/algorithms/tools/ServiceTypes.py @@ -16,6 +16,10 @@ from common.DeviceTypes import DeviceTypeEnum from common.proto.context_pb2 import ServiceTypeEnum +NETWORK_DEVICE_TYPES = { + DeviceTypeEnum.NETWORK, +} + PACKET_DEVICE_TYPES = { DeviceTypeEnum.TERAFLOWSDN_CONTROLLER, DeviceTypeEnum.PACKET_ROUTER, DeviceTypeEnum.EMULATED_PACKET_ROUTER, @@ -45,6 +49,7 @@ def get_service_type(device_type : DeviceTypeEnum, prv_service_type : ServiceTyp if device_type in PACKET_DEVICE_TYPES and prv_service_type in SERVICE_TYPE_LXNM: return prv_service_type if device_type in L2_DEVICE_TYPES: return ServiceTypeEnum.SERVICETYPE_L2NM if device_type in OPTICAL_DEVICE_TYPES: return ServiceTypeEnum.SERVICETYPE_TAPI_CONNECTIVITY_SERVICE + if device_type in NETWORK_DEVICE_TYPES: return prv_service_type str_fields = ', '.join([ 'device_type={:s}'.format(str(device_type)), -- GitLab From 33265c97da2f7f3886cb2d4e6625333675936528 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 23 Jun 2023 17:31:59 +0000 Subject: [PATCH 09/23] OECC/PSC'22: - Corrected dump_logs script --- src/tests/oeccpsc22/dump_logs.sh | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/src/tests/oeccpsc22/dump_logs.sh b/src/tests/oeccpsc22/dump_logs.sh index ae02646aa..a30660f12 100755 --- a/src/tests/oeccpsc22/dump_logs.sh +++ b/src/tests/oeccpsc22/dump_logs.sh @@ -17,25 +17,25 @@ rm -rf tmp/exec echo "Collecting logs for Domain 1..." -mkdir -p tmp/exec/dom1 -kubectl --namespace tfs-dom1 logs deployments/contextservice server > tmp/exec/dom1/context.log -kubectl --namespace tfs-dom1 logs deployments/deviceservice server > tmp/exec/dom1/device.log -kubectl --namespace tfs-dom1 logs deployments/serviceservice server > tmp/exec/dom1/service.log -kubectl --namespace tfs-dom1 logs deployments/pathcompservice frontend > tmp/exec/dom1/pathcomp-frontend.log -kubectl --namespace tfs-dom1 logs deployments/pathcompservice backend > tmp/exec/dom1/pathcomp-backend.log -kubectl --namespace tfs-dom1 logs deployments/sliceservice server > tmp/exec/dom1/slice.log -kubectl --namespace tfs-dom1 logs deployment/interdomainservice server > tmp/exec/dom1/interdomain.log +mkdir -p tmp/tfs-dom1/exec +kubectl --namespace tfs-dom1 logs deployments/contextservice server > tmp/tfs-dom1/exec/context.log +kubectl --namespace tfs-dom1 logs deployments/deviceservice server > tmp/tfs-dom1/exec/device.log +kubectl --namespace tfs-dom1 logs deployments/serviceservice server > tmp/tfs-dom1/exec/service.log +kubectl --namespace tfs-dom1 logs deployments/pathcompservice frontend > tmp/tfs-dom1/exec/pathcomp-frontend.log +kubectl --namespace tfs-dom1 logs deployments/pathcompservice backend > tmp/tfs-dom1/exec/pathcomp-backend.log +kubectl --namespace tfs-dom1 logs deployments/sliceservice server > tmp/tfs-dom1/exec/slice.log +kubectl --namespace tfs-dom1 logs deployment/interdomainservice server > tmp/tfs-dom1/exec/interdomain.log printf "\n" echo "Collecting logs for Domain 2..." -mkdir -p tmp/exec/dom2 -kubectl --namespace tfs-dom2 logs deployments/contextservice server > tmp/exec/dom2/context.log -kubectl --namespace tfs-dom2 logs deployments/deviceservice server > tmp/exec/dom2/device.log -kubectl --namespace tfs-dom2 logs deployments/serviceservice server > tmp/exec/dom2/service.log -kubectl --namespace tfs-dom2 logs deployments/pathcompservice frontend > tmp/exec/dom2/pathcomp-frontend.log -kubectl --namespace tfs-dom2 logs deployments/pathcompservice backend > tmp/exec/dom2/pathcomp-backend.log -kubectl --namespace tfs-dom2 logs deployments/sliceservice server > tmp/exec/dom2/slice.log -kubectl --namespace tfs-dom2 logs deployment/interdomainservice server > tmp/exec/dom2/interdomain.log +mkdir -p tmp/tfs-dom2/exec +kubectl --namespace tfs-dom2 logs deployments/contextservice server > tmp/tfs-dom2/exec/context.log +kubectl --namespace tfs-dom2 logs deployments/deviceservice server > tmp/tfs-dom2/exec/device.log +kubectl --namespace tfs-dom2 logs deployments/serviceservice server > tmp/tfs-dom2/exec/service.log +kubectl --namespace tfs-dom2 logs deployments/pathcompservice frontend > tmp/tfs-dom2/exec/pathcomp-frontend.log +kubectl --namespace tfs-dom2 logs deployments/pathcompservice backend > tmp/tfs-dom2/exec/pathcomp-backend.log +kubectl --namespace tfs-dom2 logs deployments/sliceservice server > tmp/tfs-dom2/exec/slice.log +kubectl --namespace tfs-dom2 logs deployment/interdomainservice server > tmp/tfs-dom2/exec/interdomain.log printf "\n" echo "Done!" -- GitLab From a8c7843afe5cf207af5f60df940ec7cbef2789d4 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Fri, 23 Jun 2023 17:32:39 +0000 Subject: [PATCH 10/23] Common - Context Queries: - Improved InterDomain queries - Cleaned up code --- .../tools/context_queries/InterDomain.py | 105 +++++++++--------- 1 file changed, 55 insertions(+), 50 deletions(-) diff --git a/src/common/tools/context_queries/InterDomain.py b/src/common/tools/context_queries/InterDomain.py index edb640708..427794c8a 100644 --- a/src/common/tools/context_queries/InterDomain.py +++ b/src/common/tools/context_queries/InterDomain.py @@ -33,28 +33,33 @@ DATACENTER_DEVICE_TYPES = {DeviceTypeEnum.DATACENTER, DeviceTypeEnum.EMULATED_DA def get_local_device_uuids(context_client : ContextClient) -> Set[str]: topologies = context_client.ListTopologies(ADMIN_CONTEXT_ID) - topologies = {topology.topology_id.topology_uuid.uuid : topology for topology in topologies.topologies} - LOGGER.debug('[get_local_device_uuids] topologies.keys()={:s}'.format(str(topologies.keys()))) - local_topology_uuids = set(topologies.keys()) - local_topology_uuids.discard(INTERDOMAIN_TOPOLOGY_NAME) + local_topologies = dict() + for topology in topologies.topologies: + topology_uuid = topology.topology_id.topology_uuid.uuid + if topology_uuid == INTERDOMAIN_TOPOLOGY_NAME: continue + topology_name = topology.name + if topology_name == INTERDOMAIN_TOPOLOGY_NAME: continue + local_topologies[topology_uuid] = topology + LOGGER.debug('[get_local_device_uuids] local_topologies={:s}'.format(str(local_topologies))) + + local_topology_uuids = set(local_topologies.keys()) LOGGER.debug('[get_local_device_uuids] local_topology_uuids={:s}'.format(str(local_topology_uuids))) + # Add topology names except DEFAULT_TOPOLOGY_NAME and INTERDOMAIN_TOPOLOGY_NAME; they are abstracted as a + # local device in inter-domain and the name of the topology is used as abstract device name + # Add physical devices in the local topologies local_device_uuids = set() + for topology_uuid,topology in local_topologies.items(): + if topology_uuid == DEFAULT_TOPOLOGY_NAME: continue + topology_name = topology.name + if topology_name == DEFAULT_TOPOLOGY_NAME: continue + #local_device_uuids.add(topology_uuid) - # add topology names except DEFAULT_TOPOLOGY_NAME and INTERDOMAIN_TOPOLOGY_NAME; they are abstracted as a - # local device in inter-domain and the name of the topology is used as abstract device name - for local_topology_uuid in local_topology_uuids: - if local_topology_uuid == DEFAULT_TOPOLOGY_NAME: continue - local_device_uuids.add(local_topology_uuid) - - # add physical devices in the local topologies - for local_topology_uuid in local_topology_uuids: - topology_device_ids = topologies[local_topology_uuid].device_ids - topology_device_uuids = {device_id.device_uuid.uuid for device_id in topology_device_ids} - LOGGER.debug('[get_local_device_uuids] [loop] local_topology_uuid={:s} topology_device_uuids={:s}'.format( - str(local_topology_uuid), str(topology_device_uuids))) - local_device_uuids.update(topology_device_uuids) + device_uuids = {device_id.device_uuid.uuid for device_id in topology.device_ids} + LOGGER.debug('[get_local_device_uuids] [loop] topology_uuid={:s} device_uuids={:s}'.format( + str(topology_uuid), str(device_uuids))) + local_device_uuids.update(device_uuids) LOGGER.debug('[get_local_device_uuids] local_device_uuids={:s}'.format(str(local_device_uuids))) return local_device_uuids @@ -74,16 +79,16 @@ def get_interdomain_device_uuids(context_client : ContextClient) -> Set[str]: LOGGER.debug('[get_interdomain_device_uuids] interdomain_device_uuids={:s}'.format(str(interdomain_device_uuids))) return interdomain_device_uuids -def get_local_domain_devices(context_client : ContextClient) -> List[Device]: - local_device_uuids = get_local_device_uuids(context_client) - all_devices = context_client.ListDevices(Empty()) - local_domain_devices = list() - for device in all_devices.devices: - if not device_type_is_network(device.device_type): continue - device_uuid = device.device_id.device_uuid.uuid - if device_uuid not in local_device_uuids: continue - local_domain_devices.append(device) - return local_domain_devices +#def get_local_domain_devices(context_client : ContextClient) -> List[Device]: +# local_device_uuids = get_local_device_uuids(context_client) +# all_devices = context_client.ListDevices(Empty()) +# local_domain_devices = list() +# for device in all_devices.devices: +# if not device_type_is_network(device.device_type): continue +# device_uuid = device.device_id.device_uuid.uuid +# if device_uuid not in local_device_uuids: continue +# local_domain_devices.append(device) +# return local_domain_devices def is_inter_domain(context_client : ContextClient, endpoint_ids : List[EndPointId]) -> bool: interdomain_device_uuids = get_interdomain_device_uuids(context_client) @@ -102,22 +107,22 @@ def is_inter_domain(context_client : ContextClient, endpoint_ids : List[EndPoint LOGGER.debug('[is_inter_domain] is_inter_domain={:s}'.format(str(is_inter_domain_))) return is_inter_domain_ -def is_multi_domain(context_client : ContextClient, endpoint_ids : List[EndPointId]) -> bool: - local_device_uuids = get_local_device_uuids(context_client) - LOGGER.debug('[is_multi_domain] local_device_uuids={:s}'.format(str(local_device_uuids))) - remote_endpoint_ids = [ - endpoint_id - for endpoint_id in endpoint_ids - if endpoint_id.device_id.device_uuid.uuid not in local_device_uuids - ] - str_remote_endpoint_ids = [ - (endpoint_id.device_id.device_uuid.uuid, endpoint_id.endpoint_uuid.uuid) - for endpoint_id in remote_endpoint_ids - ] - LOGGER.debug('[is_multi_domain] remote_endpoint_ids={:s}'.format(str(str_remote_endpoint_ids))) - is_multi_domain_ = len(remote_endpoint_ids) > 0 - LOGGER.debug('[is_multi_domain] is_multi_domain={:s}'.format(str(is_multi_domain_))) - return is_multi_domain_ +#def is_multi_domain(context_client : ContextClient, endpoint_ids : List[EndPointId]) -> bool: +# local_device_uuids = get_local_device_uuids(context_client) +# LOGGER.debug('[is_multi_domain] local_device_uuids={:s}'.format(str(local_device_uuids))) +# remote_endpoint_ids = [ +# endpoint_id +# for endpoint_id in endpoint_ids +# if endpoint_id.device_id.device_uuid.uuid not in local_device_uuids +# ] +# str_remote_endpoint_ids = [ +# (endpoint_id.device_id.device_uuid.uuid, endpoint_id.endpoint_uuid.uuid) +# for endpoint_id in remote_endpoint_ids +# ] +# LOGGER.debug('[is_multi_domain] remote_endpoint_ids={:s}'.format(str(str_remote_endpoint_ids))) +# is_multi_domain_ = len(remote_endpoint_ids) > 0 +# LOGGER.debug('[is_multi_domain] is_multi_domain={:s}'.format(str(is_multi_domain_))) +# return is_multi_domain_ def compute_interdomain_path( pathcomp_client : PathCompClient, slice_ : Slice @@ -149,7 +154,7 @@ def compute_interdomain_path( service = next(iter([ service for service in pathcomp_rep.services - if service.service_id == pathcomp_req_svc.service_id + if service.service_id.service_uuid.uuid == pathcomp_req_svc.service_id.service_uuid.uuid ]), None) if service is None: str_service_id = grpc_message_to_json_string(pathcomp_req_svc.service_id) @@ -158,7 +163,7 @@ def compute_interdomain_path( connection = next(iter([ connection for connection in pathcomp_rep.connections - if connection.service_id == pathcomp_req_svc.service_id + if connection.service_id.service_uuid.uuid == pathcomp_req_svc.service_id.service_uuid.uuid ]), None) if connection is None: str_service_id = grpc_message_to_json_string(pathcomp_req_svc.service_id) @@ -222,11 +227,11 @@ def compute_traversed_domains( local_device_uuids = get_local_device_uuids(context_client) LOGGER.debug('[compute_traversed_domains] local_device_uuids={:s}'.format(str(local_device_uuids))) - interdomain_devices = get_devices_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_NAME) - interdomain_devices = { - device.device_id.device_uuid.uuid : device - for device in interdomain_devices - } + #interdomain_devices = get_devices_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_NAME) + #interdomain_devices = { + # device.device_id.device_uuid.uuid : device + # for device in interdomain_devices + #} devices_to_domains = get_device_to_domain_map(context_client) LOGGER.debug('[compute_traversed_domains] devices_to_domains={:s}'.format(str(devices_to_domains))) -- GitLab From 7ccdba0ad8e4e5838f0dfbc524753aa86fca911a Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 29 Jun 2023 13:58:01 +0000 Subject: [PATCH 11/23] PathComp component: - Added logic to extrapolate underlyign connections for remote domains --- .../algorithms/tools/ComputeSubServices.py | 18 ++++++++++++++++-- .../service/algorithms/tools/ResourceGroups.py | 1 + 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py b/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py index 8ffdfaf3e..dedc6f9c6 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py +++ b/src/pathcomp/frontend/service/algorithms/tools/ComputeSubServices.py @@ -49,7 +49,7 @@ import logging, queue, uuid from typing import Dict, List, Optional, Tuple from common.DeviceTypes import DeviceTypeEnum from common.proto.context_pb2 import Device, ServiceTypeEnum -from .ResourceGroups import IGNORED_DEVICE_TYPES, get_resource_classification +from .ResourceGroups import IGNORED_DEVICE_TYPES, REMOTEDOMAIN_DEVICE_TYPES, get_resource_classification from .ServiceTypes import get_service_type LOGGER = logging.getLogger(__name__) @@ -81,7 +81,21 @@ def convert_explicit_path_hops_to_connections( LOGGER.debug(' ignored') continue - if prv_res_class[0] is None: + if res_class[1] in REMOTEDOMAIN_DEVICE_TYPES: + LOGGER.debug(' create and terminate underlying connection') + + # create underlying connection + connection_uuid = str(uuid.uuid4()) + prv_service_type = connection_stack.queue[-1][1] + service_type = get_service_type(res_class[1], prv_service_type) + connection_stack.put((connection_uuid, service_type, [path_hop], [])) + + # underlying connection ended + connection = connection_stack.get() + connections.append(connection) + connection_stack.queue[-1][3].append(connection[0]) + #connection_stack.queue[-1][2].append(path_hop) + elif prv_res_class[0] is None: # path ingress LOGGER.debug(' path ingress') connection_stack.put((main_service_uuid, main_service_type, [path_hop], [])) diff --git a/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py b/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py index 6f723009c..843c41803 100644 --- a/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py +++ b/src/pathcomp/frontend/service/algorithms/tools/ResourceGroups.py @@ -53,6 +53,7 @@ DEVICE_TYPE_TO_DEEPNESS = { } IGNORED_DEVICE_TYPES = {DeviceTypeEnum.EMULATED_OPTICAL_SPLITTER} +REMOTEDOMAIN_DEVICE_TYPES = {DeviceTypeEnum.NETWORK} def get_device_controller_uuid( device : Device -- GitLab From 2e7d56b40f2b573a1b65e587b1da0d50b44cc0f3 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 29 Jun 2023 14:16:15 +0000 Subject: [PATCH 12/23] Common - Context Queries: - Added field name in method create_context - Enahnced filtering in Interdomain method get_device_to_domain_map() - Migrated old methods compute_interdomain_path() and compute_traversed_domains() to new compute_interdomain_sub_slices() --- src/common/tools/context_queries/Context.py | 4 +- .../tools/context_queries/InterDomain.py | 257 +++++++++--------- .../tools/context_queries/InterDomain_old.py | 143 ++++++++++ 3 files changed, 274 insertions(+), 130 deletions(-) create mode 100644 src/common/tools/context_queries/InterDomain_old.py diff --git a/src/common/tools/context_queries/Context.py b/src/common/tools/context_queries/Context.py index a627b9ba5..5e1facf2e 100644 --- a/src/common/tools/context_queries/Context.py +++ b/src/common/tools/context_queries/Context.py @@ -19,12 +19,12 @@ from common.tools.object_factory.Context import json_context from context.client.ContextClient import ContextClient def create_context( - context_client : ContextClient, context_uuid : str + context_client : ContextClient, context_uuid : str, name : Optional[str] = None ) -> None: existing_context_ids = context_client.ListContextIds(Empty()) existing_context_uuids = {context_id.context_uuid.uuid for context_id in existing_context_ids.context_ids} if context_uuid in existing_context_uuids: return - context_client.SetContext(Context(**json_context(context_uuid))) + context_client.SetContext(Context(**json_context(context_uuid, name=name))) def get_context(context_client : ContextClient, context_uuid : str, rw_copy : bool = False) -> Optional[Context]: try: diff --git a/src/common/tools/context_queries/InterDomain.py b/src/common/tools/context_queries/InterDomain.py index 427794c8a..aee7cbf7f 100644 --- a/src/common/tools/context_queries/InterDomain.py +++ b/src/common/tools/context_queries/InterDomain.py @@ -16,12 +16,12 @@ import logging from typing import Dict, List, Set, Tuple from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME, INTERDOMAIN_TOPOLOGY_NAME from common.DeviceTypes import DeviceTypeEnum -from common.proto.context_pb2 import ContextId, Device, Empty, EndPointId, ServiceTypeEnum, Slice +from common.proto.context_pb2 import ContextId, Empty, EndPointId, ServiceTypeEnum, Slice from common.proto.pathcomp_pb2 import PathCompRequest -from common.tools.context_queries.CheckType import device_type_is_network -from common.tools.context_queries.Device import get_devices_in_topology -from common.tools.context_queries.Topology import get_topology -from common.tools.grpc.Tools import grpc_message_to_json_string +from .CheckType import device_type_is_network +from .Device import get_device #, get_devices_in_topology +from .Topology import get_topology +from common.tools.grpc.Tools import grpc_message_list_to_json, grpc_message_to_json_string from common.tools.object_factory.Context import json_context_id from context.client.ContextClient import ContextClient from pathcomp.frontend.client.PathCompClient import PathCompClient @@ -41,7 +41,11 @@ def get_local_device_uuids(context_client : ContextClient) -> Set[str]: topology_name = topology.name if topology_name == INTERDOMAIN_TOPOLOGY_NAME: continue local_topologies[topology_uuid] = topology - LOGGER.debug('[get_local_device_uuids] local_topologies={:s}'.format(str(local_topologies))) + str_local_topologies = { + topology_uuid:grpc_message_to_json_string(topology) + for topology_uuid,topology in local_topologies.items() + } + LOGGER.debug('[get_local_device_uuids] local_topologies={:s}'.format(str(str_local_topologies))) local_topology_uuids = set(local_topologies.keys()) LOGGER.debug('[get_local_device_uuids] local_topology_uuids={:s}'.format(str(local_topology_uuids))) @@ -79,17 +83,6 @@ def get_interdomain_device_uuids(context_client : ContextClient) -> Set[str]: LOGGER.debug('[get_interdomain_device_uuids] interdomain_device_uuids={:s}'.format(str(interdomain_device_uuids))) return interdomain_device_uuids -#def get_local_domain_devices(context_client : ContextClient) -> List[Device]: -# local_device_uuids = get_local_device_uuids(context_client) -# all_devices = context_client.ListDevices(Empty()) -# local_domain_devices = list() -# for device in all_devices.devices: -# if not device_type_is_network(device.device_type): continue -# device_uuid = device.device_id.device_uuid.uuid -# if device_uuid not in local_device_uuids: continue -# local_domain_devices.append(device) -# return local_domain_devices - def is_inter_domain(context_client : ContextClient, endpoint_ids : List[EndPointId]) -> bool: interdomain_device_uuids = get_interdomain_device_uuids(context_client) LOGGER.debug('[is_inter_domain] interdomain_device_uuids={:s}'.format(str(interdomain_device_uuids))) @@ -107,93 +100,22 @@ def is_inter_domain(context_client : ContextClient, endpoint_ids : List[EndPoint LOGGER.debug('[is_inter_domain] is_inter_domain={:s}'.format(str(is_inter_domain_))) return is_inter_domain_ -#def is_multi_domain(context_client : ContextClient, endpoint_ids : List[EndPointId]) -> bool: -# local_device_uuids = get_local_device_uuids(context_client) -# LOGGER.debug('[is_multi_domain] local_device_uuids={:s}'.format(str(local_device_uuids))) -# remote_endpoint_ids = [ -# endpoint_id -# for endpoint_id in endpoint_ids -# if endpoint_id.device_id.device_uuid.uuid not in local_device_uuids -# ] -# str_remote_endpoint_ids = [ -# (endpoint_id.device_id.device_uuid.uuid, endpoint_id.endpoint_uuid.uuid) -# for endpoint_id in remote_endpoint_ids -# ] -# LOGGER.debug('[is_multi_domain] remote_endpoint_ids={:s}'.format(str(str_remote_endpoint_ids))) -# is_multi_domain_ = len(remote_endpoint_ids) > 0 -# LOGGER.debug('[is_multi_domain] is_multi_domain={:s}'.format(str(is_multi_domain_))) -# return is_multi_domain_ - -def compute_interdomain_path( - pathcomp_client : PathCompClient, slice_ : Slice -) -> List[Tuple[str, List[EndPointId]]]: - context_uuid = slice_.slice_id.context_id.context_uuid.uuid - slice_uuid = slice_.slice_id.slice_uuid.uuid - - pathcomp_req = PathCompRequest() - pathcomp_req.shortest_path.Clear() # pylint: disable=no-member - pathcomp_req_svc = pathcomp_req.services.add() # pylint: disable=no-member - pathcomp_req_svc.service_id.context_id.context_uuid.uuid = context_uuid - pathcomp_req_svc.service_id.service_uuid.uuid = slice_uuid - pathcomp_req_svc.service_type = ServiceTypeEnum.SERVICETYPE_L2NM - - for endpoint_id in slice_.slice_endpoint_ids: - service_endpoint_id = pathcomp_req_svc.service_endpoint_ids.add() - service_endpoint_id.CopyFrom(endpoint_id) - - constraint_sla_capacity = pathcomp_req_svc.service_constraints.add() - constraint_sla_capacity.sla_capacity.capacity_gbps = 10.0 - - constraint_sla_latency = pathcomp_req_svc.service_constraints.add() - constraint_sla_latency.sla_latency.e2e_latency_ms = 100.0 - - LOGGER.debug('pathcomp_req = {:s}'.format(grpc_message_to_json_string(pathcomp_req))) - pathcomp_rep = pathcomp_client.Compute(pathcomp_req) - LOGGER.debug('pathcomp_rep = {:s}'.format(grpc_message_to_json_string(pathcomp_rep))) - - service = next(iter([ - service - for service in pathcomp_rep.services - if service.service_id.service_uuid.uuid == pathcomp_req_svc.service_id.service_uuid.uuid - ]), None) - if service is None: - str_service_id = grpc_message_to_json_string(pathcomp_req_svc.service_id) - raise Exception('Service({:s}) not found'.format(str_service_id)) - - connection = next(iter([ - connection - for connection in pathcomp_rep.connections - if connection.service_id.service_uuid.uuid == pathcomp_req_svc.service_id.service_uuid.uuid - ]), None) - if connection is None: - str_service_id = grpc_message_to_json_string(pathcomp_req_svc.service_id) - raise Exception('Connection for Service({:s}) not found'.format(str_service_id)) - - domain_list : List[str] = list() - domain_to_endpoint_ids : Dict[str, List[EndPointId]] = dict() - for endpoint_id in connection.path_hops_endpoint_ids: - device_uuid = endpoint_id.device_id.device_uuid.uuid - #endpoint_uuid = endpoint_id.endpoint_uuid.uuid - if device_uuid not in domain_to_endpoint_ids: domain_list.append(device_uuid) - domain_to_endpoint_ids.setdefault(device_uuid, []).append(endpoint_id) - - return [ - (domain_uuid, domain_to_endpoint_ids.get(domain_uuid)) - for domain_uuid in domain_list - ] - def get_device_to_domain_map(context_client : ContextClient) -> Dict[str, str]: devices_to_domains : Dict[str, str] = dict() contexts = context_client.ListContexts(Empty()) for context in contexts.contexts: context_id = context.context_id context_uuid = context_id.context_uuid.uuid + context_name = context.name topologies = context_client.ListTopologies(context_id) - if context_uuid == DEFAULT_CONTEXT_NAME: + if (context_uuid == DEFAULT_CONTEXT_NAME) or (context_name == DEFAULT_CONTEXT_NAME): for topology in topologies.topologies: topology_id = topology.topology_id topology_uuid = topology_id.topology_uuid.uuid + topology_name = topology.name + if topology_uuid in {DEFAULT_TOPOLOGY_NAME, INTERDOMAIN_TOPOLOGY_NAME}: continue + if topology_name in {DEFAULT_TOPOLOGY_NAME, INTERDOMAIN_TOPOLOGY_NAME}: continue # add topology names except DEFAULT_TOPOLOGY_NAME and INTERDOMAIN_TOPOLOGY_NAME; they are # abstracted as a local device in inter-domain and the name of the topology is used as @@ -209,9 +131,11 @@ def get_device_to_domain_map(context_client : ContextClient) -> Dict[str, str]: for topology in topologies.topologies: topology_id = topology.topology_id topology_uuid = topology_id.topology_uuid.uuid + topology_name = topology.name # if topology is not interdomain if topology_uuid in {INTERDOMAIN_TOPOLOGY_NAME}: continue + if topology_name in {INTERDOMAIN_TOPOLOGY_NAME}: continue # add devices to the remote domain list for device_id in topology.device_ids: @@ -220,40 +144,117 @@ def get_device_to_domain_map(context_client : ContextClient) -> Dict[str, str]: return devices_to_domains -def compute_traversed_domains( - context_client : ContextClient, interdomain_path : List[Tuple[str, List[EndPointId]]] -) -> List[Tuple[str, bool, List[EndPointId]]]: +def compute_interdomain_sub_slices( + context_client : ContextClient, pathcomp_client : PathCompClient, slice_ : Slice +) -> Tuple[Dict[str, List[EndPointId]], Dict[str, List[EndPointId]]]: + context_uuid = slice_.slice_id.context_id.context_uuid.uuid + slice_uuid = slice_.slice_id.slice_uuid.uuid + + pathcomp_req = PathCompRequest() + pathcomp_req.shortest_path.Clear() # pylint: disable=no-member + pathcomp_req_svc = pathcomp_req.services.add() # pylint: disable=no-member + pathcomp_req_svc.service_id.context_id.context_uuid.uuid = context_uuid + pathcomp_req_svc.service_id.service_uuid.uuid = slice_uuid + pathcomp_req_svc.service_type = ServiceTypeEnum.SERVICETYPE_L2NM + + for endpoint_id in slice_.slice_endpoint_ids: + service_endpoint_id = pathcomp_req_svc.service_endpoint_ids.add() + service_endpoint_id.CopyFrom(endpoint_id) + + capacity_gbps = 10.0 # default value; to be overwritten by constraints in slice + e2e_latency_ms = 100.0 # default value; to be overwritten by constraints in slice + for constraint in slice_.slice_constraints: + kind = constraint.WhichOneof('constraint') + if kind == 'sla_capacity': + capacity_gbps = constraint.sla_capacity.capacity_gbps + elif kind == 'sla_latency': + e2e_latency_ms = constraint.sla_latency.e2e_latency_ms + + constraint_sla_capacity = pathcomp_req_svc.service_constraints.add() + constraint_sla_capacity.sla_capacity.capacity_gbps = capacity_gbps + + constraint_sla_latency = pathcomp_req_svc.service_constraints.add() + constraint_sla_latency.sla_latency.e2e_latency_ms = e2e_latency_ms + + LOGGER.debug('[compute_interdomain_sub_slices] pathcomp_req = {:s}'.format( + grpc_message_to_json_string(pathcomp_req))) + pathcomp_rep = pathcomp_client.Compute(pathcomp_req) + LOGGER.debug('[compute_interdomain_sub_slices] pathcomp_rep = {:s}'.format( + grpc_message_to_json_string(pathcomp_rep))) + + num_services = len(pathcomp_rep.services) + if num_services == 0: + raise Exception('No services received : {:s}'.format(grpc_message_to_json_string(pathcomp_rep))) + + num_connections = len(pathcomp_rep.connections) + if num_connections != num_services: + raise Exception('No connections received : {:s}'.format(grpc_message_to_json_string(pathcomp_rep))) local_device_uuids = get_local_device_uuids(context_client) - LOGGER.debug('[compute_traversed_domains] local_device_uuids={:s}'.format(str(local_device_uuids))) - - #interdomain_devices = get_devices_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_NAME) - #interdomain_devices = { - # device.device_id.device_uuid.uuid : device - # for device in interdomain_devices - #} - - devices_to_domains = get_device_to_domain_map(context_client) - LOGGER.debug('[compute_traversed_domains] devices_to_domains={:s}'.format(str(devices_to_domains))) - - traversed_domains : List[Tuple[str, bool, List[EndPointId]]] = list() - domains_dict : Dict[str, Tuple[str, bool, List[EndPointId]]] = dict() - for device_uuid, endpoint_ids in interdomain_path: - domain_uuid = devices_to_domains.get(device_uuid, '---') - domain = domains_dict.get(domain_uuid) - if domain is None: - is_local_domain = domain_uuid in local_device_uuids - domain = (domain_uuid, is_local_domain, []) - traversed_domains.append(domain) - domains_dict[domain_uuid] = domain - domain[2].extend(endpoint_ids) - - str_traversed_domains = [ - (domain_uuid, is_local_domain, [ - (endpoint_id.device_id.device_uuid.uuid, endpoint_id.endpoint_uuid.uuid) - for endpoint_id in endpoint_ids - ]) - for domain_uuid,is_local_domain,endpoint_ids in traversed_domains - ] - LOGGER.debug('[compute_traversed_domains] devices_to_domains={:s}'.format(str(str_traversed_domains))) - return traversed_domains + LOGGER.debug('[compute_interdomain_sub_slices] local_device_uuids={:s}'.format(str(local_device_uuids))) + + device_to_domain_map = get_device_to_domain_map(context_client) + LOGGER.debug('[compute_interdomain_sub_slices] device_to_domain_map={:s}'.format(str(device_to_domain_map))) + + local_slices : Dict[str, List[EndPointId]] = dict() + remote_slices : Dict[str, List[EndPointId]] = dict() + req_service_uuid = pathcomp_req_svc.service_id.service_uuid.uuid + for service in pathcomp_rep.services: + service_uuid = service.service_id.service_uuid.uuid + if service_uuid == req_service_uuid: continue # main synthetic service; we don't care + device_uuids = { + endpoint_id.device_id.device_uuid.uuid + for endpoint_id in service.service_endpoint_ids + } + + local_domain_uuids = set() + remote_domain_uuids = set() + for device_uuid in device_uuids: + if device_uuid in local_device_uuids: + domain_uuid = device_to_domain_map.get(device_uuid) + if domain_uuid is None: + raise Exception('Unable to map device({:s}) to a domain'.format(str(device_uuid))) + local_domain_uuids.add(domain_uuid) + else: + device = get_device( + context_client, device_uuid, include_endpoints=True, include_config_rules=False, + include_components=False) + if device is None: raise Exception('Device({:s}) not found'.format(str(device_uuid))) + if not device_type_is_network(device.device_type): + MSG = 'Weird device({:s}) is not local and not network' + raise Exception(MSG.format(grpc_message_to_json_string(device))) + remote_domain_uuids.add(device_uuid) + + if len(local_domain_uuids) > 1: + MSG = 'Devices({:s}) map to multiple local domains({:s})' + raise Exception(MSG.format(str(device_uuids), str(local_domain_uuids))) + is_local = len(local_domain_uuids) == 1 + + if len(remote_domain_uuids) > 1: + MSG = 'Devices({:s}) map to multiple remote domains({:s})' + raise Exception(MSG.format(str(device_uuids), str(remote_domain_uuids))) + is_remote = len(remote_domain_uuids) == 1 + + if is_local == is_remote: + MSG = 'Weird service combines local and remote devices: {:s}' + raise Exception(MSG.format(grpc_message_to_json_string(service))) + elif is_local: + local_domain_uuid = local_domain_uuids.pop() + local_slices.setdefault(local_domain_uuid, list()).append(service.service_endpoint_ids) + else: + remote_domain_uuid = remote_domain_uuids.pop() + remote_slices.setdefault(remote_domain_uuid, list()).append(service.service_endpoint_ids) + + str_local_slices = { + domain_uuid:grpc_message_list_to_json(endpoint_ids) + for domain_uuid,endpoint_ids in local_slices.items() + } + LOGGER.debug('[compute_interdomain_sub_slices] local_slices={:s}'.format(str(str_local_slices))) + + str_remote_slices = { + domain_uuid:grpc_message_list_to_json(endpoint_ids) + for domain_uuid,endpoint_ids in remote_slices.items() + } + LOGGER.debug('[compute_interdomain_sub_slices] remote_slices={:s}'.format(str(str_remote_slices))) + + return local_slices, remote_slices diff --git a/src/common/tools/context_queries/InterDomain_old.py b/src/common/tools/context_queries/InterDomain_old.py new file mode 100644 index 000000000..ef4f6aa80 --- /dev/null +++ b/src/common/tools/context_queries/InterDomain_old.py @@ -0,0 +1,143 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from common.tools.context_queries.Device import get_device #, get_devices_in_topology + +## DEPRECATED +#def get_local_domain_devices(context_client : ContextClient) -> List[Device]: +# local_device_uuids = get_local_device_uuids(context_client) +# all_devices = context_client.ListDevices(Empty()) +# local_domain_devices = list() +# for device in all_devices.devices: +# if not device_type_is_network(device.device_type): continue +# device_uuid = device.device_id.device_uuid.uuid +# if device_uuid not in local_device_uuids: continue +# local_domain_devices.append(device) +# return local_domain_devices + +## DEPRECATED +#def is_multi_domain(context_client : ContextClient, endpoint_ids : List[EndPointId]) -> bool: +# local_device_uuids = get_local_device_uuids(context_client) +# LOGGER.debug('[is_multi_domain] local_device_uuids={:s}'.format(str(local_device_uuids))) +# remote_endpoint_ids = [ +# endpoint_id +# for endpoint_id in endpoint_ids +# if endpoint_id.device_id.device_uuid.uuid not in local_device_uuids +# ] +# str_remote_endpoint_ids = [ +# (endpoint_id.device_id.device_uuid.uuid, endpoint_id.endpoint_uuid.uuid) +# for endpoint_id in remote_endpoint_ids +# ] +# LOGGER.debug('[is_multi_domain] remote_endpoint_ids={:s}'.format(str(str_remote_endpoint_ids))) +# is_multi_domain_ = len(remote_endpoint_ids) > 0 +# LOGGER.debug('[is_multi_domain] is_multi_domain={:s}'.format(str(is_multi_domain_))) +# return is_multi_domain_ + +## DEPRECATED +#def compute_interdomain_path( +# pathcomp_client : PathCompClient, slice_ : Slice +#) -> List[Tuple[str, List[EndPointId]]]: +# context_uuid = slice_.slice_id.context_id.context_uuid.uuid +# slice_uuid = slice_.slice_id.slice_uuid.uuid +# +# pathcomp_req = PathCompRequest() +# pathcomp_req.shortest_path.Clear() # pylint: disable=no-member +# pathcomp_req_svc = pathcomp_req.services.add() # pylint: disable=no-member +# pathcomp_req_svc.service_id.context_id.context_uuid.uuid = context_uuid +# pathcomp_req_svc.service_id.service_uuid.uuid = slice_uuid +# pathcomp_req_svc.service_type = ServiceTypeEnum.SERVICETYPE_L2NM +# +# for endpoint_id in slice_.slice_endpoint_ids: +# service_endpoint_id = pathcomp_req_svc.service_endpoint_ids.add() +# service_endpoint_id.CopyFrom(endpoint_id) +# +# constraint_sla_capacity = pathcomp_req_svc.service_constraints.add() +# constraint_sla_capacity.sla_capacity.capacity_gbps = 10.0 +# +# constraint_sla_latency = pathcomp_req_svc.service_constraints.add() +# constraint_sla_latency.sla_latency.e2e_latency_ms = 100.0 +# +# LOGGER.debug('pathcomp_req = {:s}'.format(grpc_message_to_json_string(pathcomp_req))) +# pathcomp_rep = pathcomp_client.Compute(pathcomp_req) +# LOGGER.debug('pathcomp_rep = {:s}'.format(grpc_message_to_json_string(pathcomp_rep))) +# +# service = next(iter([ +# service +# for service in pathcomp_rep.services +# if service.service_id.service_uuid.uuid == pathcomp_req_svc.service_id.service_uuid.uuid +# ]), None) +# if service is None: +# str_service_id = grpc_message_to_json_string(pathcomp_req_svc.service_id) +# raise Exception('Service({:s}) not found'.format(str_service_id)) +# +# connection = next(iter([ +# connection +# for connection in pathcomp_rep.connections +# if connection.service_id.service_uuid.uuid == pathcomp_req_svc.service_id.service_uuid.uuid +# ]), None) +# if connection is None: +# str_service_id = grpc_message_to_json_string(pathcomp_req_svc.service_id) +# raise Exception('Connection for Service({:s}) not found'.format(str_service_id)) +# +# domain_list : List[str] = list() +# domain_to_endpoint_ids : Dict[str, List[EndPointId]] = dict() +# for endpoint_id in connection.path_hops_endpoint_ids: +# device_uuid = endpoint_id.device_id.device_uuid.uuid +# #endpoint_uuid = endpoint_id.endpoint_uuid.uuid +# if device_uuid not in domain_to_endpoint_ids: domain_list.append(device_uuid) +# domain_to_endpoint_ids.setdefault(device_uuid, []).append(endpoint_id) +# +# return [ +# (domain_uuid, domain_to_endpoint_ids.get(domain_uuid)) +# for domain_uuid in domain_list +# ] + +## DEPRECATED +#def compute_traversed_domains( +# context_client : ContextClient, interdomain_path : List[Tuple[str, List[EndPointId]]] +#) -> List[Tuple[str, bool, List[EndPointId]]]: +# +# local_device_uuids = get_local_device_uuids(context_client) +# LOGGER.debug('[compute_traversed_domains] local_device_uuids={:s}'.format(str(local_device_uuids))) +# +# #interdomain_devices = get_devices_in_topology(context_client, ADMIN_CONTEXT_ID, INTERDOMAIN_TOPOLOGY_NAME) +# #interdomain_devices = { +# # device.device_id.device_uuid.uuid : device +# # for device in interdomain_devices +# #} +# +# devices_to_domains = get_device_to_domain_map(context_client) +# LOGGER.debug('[compute_traversed_domains] devices_to_domains={:s}'.format(str(devices_to_domains))) +# +# traversed_domains : List[Tuple[str, bool, List[EndPointId]]] = list() +# domains_dict : Dict[str, Tuple[str, bool, List[EndPointId]]] = dict() +# for device_uuid, endpoint_ids in interdomain_path: +# domain_uuid = devices_to_domains.get(device_uuid, '---') +# domain = domains_dict.get(domain_uuid) +# if domain is None: +# is_local_domain = domain_uuid in local_device_uuids +# domain = (domain_uuid, is_local_domain, []) +# traversed_domains.append(domain) +# domains_dict[domain_uuid] = domain +# domain[2].extend(endpoint_ids) +# +# str_traversed_domains = [ +# (domain_uuid, is_local_domain, [ +# (endpoint_id.device_id.device_uuid.uuid, endpoint_id.endpoint_uuid.uuid) +# for endpoint_id in endpoint_ids +# ]) +# for domain_uuid,is_local_domain,endpoint_ids in traversed_domains +# ] +# LOGGER.debug('[compute_traversed_domains] devices_to_domains={:s}'.format(str(str_traversed_domains))) +# return traversed_domains -- GitLab From 7271b2be5e3ac422ac0f9a6455d95c48aa27feb1 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 29 Jun 2023 14:16:55 +0000 Subject: [PATCH 13/23] Common - Object Factory: - Added field name in Device-related methods - Added field name in Link-related methods --- src/common/tools/object_factory/Device.py | 52 +++++++++++++---------- src/common/tools/object_factory/Link.py | 8 ++-- 2 files changed, 34 insertions(+), 26 deletions(-) diff --git a/src/common/tools/object_factory/Device.py b/src/common/tools/object_factory/Device.py index 66c87b14d..032aa4fb2 100644 --- a/src/common/tools/object_factory/Device.py +++ b/src/common/tools/object_factory/Device.py @@ -13,7 +13,7 @@ # limitations under the License. import copy -from typing import Dict, List, Tuple +from typing import Dict, List, Optional, Tuple from common.DeviceTypes import DeviceTypeEnum from common.proto.context_pb2 import DeviceDriverEnum, DeviceOperationalStatusEnum from common.tools.object_factory.ConfigRule import json_config_rule_set @@ -50,10 +50,10 @@ def json_device_id(device_uuid : str): return {'device_uuid': {'uuid': device_uuid}} def json_device( - device_uuid : str, device_type : str, status : DeviceOperationalStatusEnum, endpoints : List[Dict] = [], - config_rules : List[Dict] = [], drivers : List[Dict] = [] + device_uuid : str, device_type : str, status : DeviceOperationalStatusEnum, name : Optional[str] = None, + endpoints : List[Dict] = [], config_rules : List[Dict] = [], drivers : List[Dict] = [] ): - return { + result = { 'device_id' : json_device_id(device_uuid), 'device_type' : device_type, 'device_config' : {'config_rules': copy.deepcopy(config_rules)}, @@ -61,74 +61,80 @@ def json_device( 'device_drivers' : copy.deepcopy(drivers), 'device_endpoints' : copy.deepcopy(endpoints), } + if name is not None: result['name'] = name + return result def json_device_emulated_packet_router_disabled( - device_uuid : str, endpoints : List[Dict] = [], config_rules : List[Dict] = [], + device_uuid : str, name : Optional[str] = None, endpoints : List[Dict] = [], config_rules : List[Dict] = [], drivers : List[Dict] = DEVICE_EMU_DRIVERS ): return json_device( - device_uuid, DEVICE_EMUPR_TYPE, DEVICE_DISABLED, endpoints=endpoints, config_rules=config_rules, + device_uuid, DEVICE_EMUPR_TYPE, DEVICE_DISABLED, name=name, endpoints=endpoints, config_rules=config_rules, drivers=drivers) def json_device_emulated_tapi_disabled( - device_uuid : str, endpoints : List[Dict] = [], config_rules : List[Dict] = [], + device_uuid : str, name : Optional[str] = None, endpoints : List[Dict] = [], config_rules : List[Dict] = [], drivers : List[Dict] = DEVICE_EMU_DRIVERS ): return json_device( - device_uuid, DEVICE_EMUOLS_TYPE, DEVICE_DISABLED, endpoints=endpoints, config_rules=config_rules, + device_uuid, DEVICE_EMUOLS_TYPE, DEVICE_DISABLED, name=name, endpoints=endpoints, config_rules=config_rules, drivers=drivers) def json_device_emulated_datacenter_disabled( - device_uuid : str, endpoints : List[Dict] = [], config_rules : List[Dict] = [], + device_uuid : str, name : Optional[str] = None, endpoints : List[Dict] = [], config_rules : List[Dict] = [], drivers : List[Dict] = DEVICE_EMU_DRIVERS ): return json_device( - device_uuid, DEVICE_EMUDC_TYPE, DEVICE_DISABLED, endpoints=endpoints, config_rules=config_rules, + device_uuid, DEVICE_EMUDC_TYPE, DEVICE_DISABLED, name=name, endpoints=endpoints, config_rules=config_rules, drivers=drivers) def json_device_packetrouter_disabled( - device_uuid : str, endpoints : List[Dict] = [], config_rules : List[Dict] = [], + device_uuid : str, name : Optional[str] = None, endpoints : List[Dict] = [], config_rules : List[Dict] = [], drivers : List[Dict] = DEVICE_PR_DRIVERS ): return json_device( - device_uuid, DEVICE_PR_TYPE, DEVICE_DISABLED, endpoints=endpoints, config_rules=config_rules, drivers=drivers) + device_uuid, DEVICE_PR_TYPE, DEVICE_DISABLED, name=name, endpoints=endpoints, config_rules=config_rules, + drivers=drivers) def json_device_tapi_disabled( - device_uuid : str, endpoints : List[Dict] = [], config_rules : List[Dict] = [], + device_uuid : str, name : Optional[str] = None, endpoints : List[Dict] = [], config_rules : List[Dict] = [], drivers : List[Dict] = DEVICE_TAPI_DRIVERS ): return json_device( - device_uuid, DEVICE_TAPI_TYPE, DEVICE_DISABLED, endpoints=endpoints, config_rules=config_rules, drivers=drivers) + device_uuid, DEVICE_TAPI_TYPE, DEVICE_DISABLED, name=name, endpoints=endpoints, config_rules=config_rules, + drivers=drivers) def json_device_xr_constellation_disabled( - device_uuid : str, endpoints : List[Dict] = [], config_rules : List[Dict] = [], + device_uuid : str, name : Optional[str] = None, endpoints : List[Dict] = [], config_rules : List[Dict] = [], drivers : List[Dict] = DEVICE_XR_CONSTELLATION_DRIVERS ): return json_device( - device_uuid, DEVICE_XR_CONSTELLATION_TYPE, DEVICE_DISABLED, endpoints=endpoints, config_rules=config_rules, - drivers=drivers) + device_uuid, DEVICE_XR_CONSTELLATION_TYPE, DEVICE_DISABLED, name=name, endpoints=endpoints, + config_rules=config_rules, drivers=drivers) def json_device_microwave_disabled( - device_uuid : str, endpoints : List[Dict] = [], config_rules : List[Dict] = [], + device_uuid : str, name : Optional[str] = None, endpoints : List[Dict] = [], config_rules : List[Dict] = [], drivers : List[Dict] = DEVICE_MICROWAVE_DRIVERS ): return json_device( - device_uuid, DEVICE_MICROWAVE_TYPE, DEVICE_DISABLED, endpoints=endpoints, config_rules=config_rules, + device_uuid, DEVICE_MICROWAVE_TYPE, DEVICE_DISABLED, name=name, endpoints=endpoints, config_rules=config_rules, drivers=drivers) def json_device_p4_disabled( - device_uuid : str, endpoints : List[Dict] = [], config_rules : List[Dict] = [], + device_uuid : str, name : Optional[str] = None, endpoints : List[Dict] = [], config_rules : List[Dict] = [], drivers : List[Dict] = DEVICE_P4_DRIVERS ): return json_device( - device_uuid, DEVICE_P4_TYPE, DEVICE_DISABLED, endpoints=endpoints, config_rules=config_rules, drivers=drivers) + device_uuid, DEVICE_P4_TYPE, DEVICE_DISABLED, name=name, endpoints=endpoints, config_rules=config_rules, + drivers=drivers) def json_device_tfs_disabled( - device_uuid : str, endpoints : List[Dict] = [], config_rules : List[Dict] = [], + device_uuid : str, name : Optional[str] = None, endpoints : List[Dict] = [], config_rules : List[Dict] = [], drivers : List[Dict] = DEVICE_TFS_DRIVERS ): return json_device( - device_uuid, DEVICE_TFS_TYPE, DEVICE_DISABLED, endpoints=endpoints, config_rules=config_rules, drivers=drivers) + device_uuid, DEVICE_TFS_TYPE, DEVICE_DISABLED, name=name, endpoints=endpoints, config_rules=config_rules, + drivers=drivers) def json_device_connect_rules(address : str, port : int, settings : Dict = {}): return [ diff --git a/src/common/tools/object_factory/Link.py b/src/common/tools/object_factory/Link.py index dbb3d7fb1..5f8080d30 100644 --- a/src/common/tools/object_factory/Link.py +++ b/src/common/tools/object_factory/Link.py @@ -13,7 +13,7 @@ # limitations under the License. import copy -from typing import Dict, List, Tuple +from typing import Dict, List, Optional, Tuple def get_link_uuid(a_endpoint_id : Dict, z_endpoint_id : Dict) -> str: return '{:s}/{:s}=={:s}/{:s}'.format( @@ -23,8 +23,10 @@ def get_link_uuid(a_endpoint_id : Dict, z_endpoint_id : Dict) -> str: def json_link_id(link_uuid : str) -> Dict: return {'link_uuid': {'uuid': link_uuid}} -def json_link(link_uuid : str, endpoint_ids : List[Dict]) -> Dict: - return {'link_id': json_link_id(link_uuid), 'link_endpoint_ids': copy.deepcopy(endpoint_ids)} +def json_link(link_uuid : str, endpoint_ids : List[Dict], name : Optional[str] = None) -> Dict: + result = {'link_id': json_link_id(link_uuid), 'link_endpoint_ids': copy.deepcopy(endpoint_ids)} + if name is not None: result['name'] = name + return result def compose_link(endpoint_a, endpoint_z) -> Tuple[Dict, Dict]: link_uuid = get_link_uuid(endpoint_a['endpoint_id'], endpoint_z['endpoint_id']) -- GitLab From 90862915d3741429af093175d95a0fd83323a251 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 29 Jun 2023 14:22:54 +0000 Subject: [PATCH 14/23] Context component: - Improved EventsCollector to use priority queue and sort batches of events by timestamp --- src/context/client/EventsCollector.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/context/client/EventsCollector.py b/src/context/client/EventsCollector.py index a8783fa8e..c661e0cab 100644 --- a/src/context/client/EventsCollector.py +++ b/src/context/client/EventsCollector.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Callable import grpc, logging, queue, threading, time +from typing import Callable from common.proto.context_pb2 import Empty from common.tools.grpc.Tools import grpc_message_to_json_string from context.client.ContextClient import ContextClient @@ -23,7 +23,7 @@ LOGGER.setLevel(logging.DEBUG) class _Collector(threading.Thread): def __init__( - self, subscription_func : Callable, events_queue = queue.Queue, + self, subscription_func : Callable, events_queue = queue.PriorityQueue, terminate = threading.Event, log_events_received: bool = False ) -> None: super().__init__(daemon=False) @@ -45,7 +45,8 @@ class _Collector(threading.Thread): if self._log_events_received: str_event = grpc_message_to_json_string(event) LOGGER.info('[_collect] event: {:s}'.format(str_event)) - self._events_queue.put_nowait(event) + timestamp = event.event.timestamp.timestamp + self._events_queue.put_nowait((timestamp, event)) except grpc.RpcError as e: if e.code() == grpc.StatusCode.UNAVAILABLE: LOGGER.info('[_collect] UNAVAILABLE... retrying...') @@ -68,7 +69,7 @@ class EventsCollector: activate_slice_collector : bool = True, activate_connection_collector : bool = True, ) -> None: - self._events_queue = queue.Queue() + self._events_queue = queue.PriorityQueue() self._terminate = threading.Event() self._log_events_received = log_events_received @@ -120,7 +121,8 @@ class EventsCollector: def get_event(self, block : bool = True, timeout : float = 0.1): try: - return self._events_queue.get(block=block, timeout=timeout) + _,event = self._events_queue.get(block=block, timeout=timeout) + return event except queue.Empty: # pylint: disable=catching-non-exception return None -- GitLab From 4f8439f6cc1fb35e07cb96d860bc485f2b6923ad Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 29 Jun 2023 14:25:46 +0000 Subject: [PATCH 15/23] OECC/PSC'22 - Tests: - Updated domain descriptors - Updated interdomain slice descriptor - Updates deploy specs per domain - Updated dump_logs script - Disabled unneeded service manifests --- src/tests/oeccpsc22/deploy_specs_dom1.sh | 6 +- src/tests/oeccpsc22/deploy_specs_dom2.sh | 6 +- src/tests/oeccpsc22/descriptors/domain1.json | 124 +++++++++++------- src/tests/oeccpsc22/descriptors/domain2.json | 58 ++++---- .../descriptors/inter-domain-slice.json | 14 +- src/tests/oeccpsc22/dump_logs.sh | 2 + .../{ => old}/expose-services-dom1.yaml | 22 ++-- .../{ => old}/expose-services-dom2.yaml | 22 ++-- 8 files changed, 140 insertions(+), 114 deletions(-) rename src/tests/oeccpsc22/{ => old}/expose-services-dom1.yaml (90%) rename src/tests/oeccpsc22/{ => old}/expose-services-dom2.yaml (90%) diff --git a/src/tests/oeccpsc22/deploy_specs_dom1.sh b/src/tests/oeccpsc22/deploy_specs_dom1.sh index b269236b0..7db9159e0 100755 --- a/src/tests/oeccpsc22/deploy_specs_dom1.sh +++ b/src/tests/oeccpsc22/deploy_specs_dom1.sh @@ -30,7 +30,7 @@ export TFS_IMAGE_TAG="dev" export TFS_K8S_NAMESPACE="tfs-dom1" # Set additional manifest files to be applied after the deployment -export TFS_EXTRA_MANIFESTS="oeccpsc22/nginx-ingress-http-dom1.yaml oeccpsc22/expose-services-dom1.yaml" +export TFS_EXTRA_MANIFESTS="oeccpsc22/nginx-ingress-http-dom1.yaml" # Set the new Grafana admin password export TFS_GRAFANA_PASSWORD="admin123+" @@ -64,7 +64,7 @@ export CRDB_DATABASE="tfs_dom1" export CRDB_DEPLOY_MODE="single" # Disable flag for dropping database, if it exists. -export CRDB_DROP_DATABASE_IF_EXISTS="" +export CRDB_DROP_DATABASE_IF_EXISTS="YES" # Disable flag for re-deploying CockroachDB from scratch. export CRDB_REDEPLOY="" @@ -112,7 +112,7 @@ export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis" export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups" # Disable flag for dropping tables if they exist. -export QDB_DROP_TABLES_IF_EXIST="" +export QDB_DROP_TABLES_IF_EXIST="YES" # Disable flag for re-deploying QuestDB from scratch. export QDB_REDEPLOY="" diff --git a/src/tests/oeccpsc22/deploy_specs_dom2.sh b/src/tests/oeccpsc22/deploy_specs_dom2.sh index 112142437..3e23c56fe 100755 --- a/src/tests/oeccpsc22/deploy_specs_dom2.sh +++ b/src/tests/oeccpsc22/deploy_specs_dom2.sh @@ -30,7 +30,7 @@ export TFS_IMAGE_TAG="dev" export TFS_K8S_NAMESPACE="tfs-dom2" # Set additional manifest files to be applied after the deployment -export TFS_EXTRA_MANIFESTS="oeccpsc22/nginx-ingress-http-dom2.yaml oeccpsc22/expose-services-dom2.yaml" +export TFS_EXTRA_MANIFESTS="oeccpsc22/nginx-ingress-http-dom2.yaml" # Set the new Grafana admin password export TFS_GRAFANA_PASSWORD="admin123+" @@ -64,7 +64,7 @@ export CRDB_DATABASE="tfs_dom2" export CRDB_DEPLOY_MODE="single" # Disable flag for dropping database, if it exists. -export CRDB_DROP_DATABASE_IF_EXISTS="" +export CRDB_DROP_DATABASE_IF_EXISTS="YES" # Disable flag for re-deploying CockroachDB from scratch. export CRDB_REDEPLOY="" @@ -112,7 +112,7 @@ export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis" export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups" # Disable flag for dropping tables if they exist. -export QDB_DROP_TABLES_IF_EXIST="" +export QDB_DROP_TABLES_IF_EXIST="YES" # Disable flag for re-deploying QuestDB from scratch. export QDB_REDEPLOY="" diff --git a/src/tests/oeccpsc22/descriptors/domain1.json b/src/tests/oeccpsc22/descriptors/domain1.json index 2db10b4d1..3e12b2b31 100644 --- a/src/tests/oeccpsc22/descriptors/domain1.json +++ b/src/tests/oeccpsc22/descriptors/domain1.json @@ -4,6 +4,7 @@ ], "topologies": [ {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}}, + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D1"}}}, {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "inter"}}} ], "devices": [ @@ -15,7 +16,8 @@ {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ {"uuid": "int", "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"}, {"uuid": "D1", "context_uuid": "admin", "topology_uuid": "inter", "type": "copper/border" } - ]}}} + ]}}}, + {"action": 1, "custom": {"resource_key": "/settings", "resource_value": {"router_id": "10.0.0.1"}}} ]} }, { @@ -26,13 +28,14 @@ {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ {"uuid": "int", "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"}, {"uuid": "D2", "context_uuid": "admin", "topology_uuid": "inter", "type": "copper/border" } - ]}}} + ]}}}, + {"action": 1, "custom": {"resource_key": "/settings", "resource_value": {"router_id": "10.0.0.2"}}} ]} }, { "device_id": {"device_uuid": {"uuid": "D2"}}, "device_type": "network", "device_drivers": [0], "device_endpoints": [], "device_operational_status": 2, "device_config": {"config_rules": [ - {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "10.0.2.10"}}, + {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "interdomainservice.tfs-dom2"}}, {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "10010"}}, {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ {"uuid": "D1", "context_uuid": "admin", "topology_uuid": "inter", "type": "copper/border" }, @@ -46,10 +49,14 @@ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ - {"uuid": "2", "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"}, - {"uuid": "5", "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"}, - {"uuid": "DC1", "context_uuid": "admin", "topology_uuid": "inter", "type": "copper/border" } - ]}}} + {"uuid": "2", "context_uuid": "admin", "topology_uuid": "D1", "type": "copper/internal"}, + {"uuid": "5", "context_uuid": "admin", "topology_uuid": "D1", "type": "copper/internal"}, + {"uuid": "DC1", "context_uuid": "admin", "topology_uuid": "D1", "type": "copper/border" } + ]}}}, + {"action": 1, "custom": {"resource_key": "/settings", "resource_value": {"router_id": "10.0.1.1"}}}, + {"action": 1, "custom": {"resource_key": "/endpoints/endpoint[2]/settings", "resource_value": {"remote_router": "10.0.1.2"}}}, + {"action": 1, "custom": {"resource_key": "/endpoints/endpoint[5]/settings", "resource_value": {"remote_router": "10.0.1.5"}}}, + {"action": 1, "custom": {"resource_key": "/endpoints/endpoint[DC1]/settings", "resource_value": {"remote_router": "10.0.0.1"}}} ]} }, { @@ -58,10 +65,14 @@ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ - {"uuid": "1", "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"}, - {"uuid": "3", "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"}, - {"uuid": "5", "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"} - ]}}} + {"uuid": "1", "context_uuid": "admin", "topology_uuid": "D1", "type": "copper/internal"}, + {"uuid": "3", "context_uuid": "admin", "topology_uuid": "D1", "type": "copper/internal"}, + {"uuid": "5", "context_uuid": "admin", "topology_uuid": "D1", "type": "copper/internal"} + ]}}}, + {"action": 1, "custom": {"resource_key": "/settings", "resource_value": {"router_id": "10.0.1.2"}}}, + {"action": 1, "custom": {"resource_key": "/endpoints/endpoint[1]/settings", "resource_value": {"remote_router": "10.0.1.1"}}}, + {"action": 1, "custom": {"resource_key": "/endpoints/endpoint[3]/settings", "resource_value": {"remote_router": "10.0.1.3"}}}, + {"action": 1, "custom": {"resource_key": "/endpoints/endpoint[5]/settings", "resource_value": {"remote_router": "10.0.1.5"}}} ]} }, { @@ -70,9 +81,12 @@ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ - {"uuid": "2", "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"}, - {"uuid": "4", "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"} - ]}}} + {"uuid": "2", "context_uuid": "admin", "topology_uuid": "D1", "type": "copper/internal"}, + {"uuid": "4", "context_uuid": "admin", "topology_uuid": "D1", "type": "copper/internal"} + ]}}}, + {"action": 1, "custom": {"resource_key": "/settings", "resource_value": {"router_id": "10.0.1.3"}}}, + {"action": 1, "custom": {"resource_key": "/endpoints/endpoint[2]/settings", "resource_value": {"remote_router": "10.0.1.2"}}}, + {"action": 1, "custom": {"resource_key": "/endpoints/endpoint[4]/settings", "resource_value": {"remote_router": "10.0.1.4"}}} ]} }, { @@ -81,10 +95,15 @@ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ - {"uuid": "3", "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"}, - {"uuid": "5", "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"}, - {"uuid": "D2", "context_uuid": "admin", "topology_uuid": "inter", "type": "copper/border" } - ]}}} + {"uuid": "3", "context_uuid": "admin", "topology_uuid": "D1", "type": "copper/internal"}, + {"uuid": "5", "context_uuid": "admin", "topology_uuid": "D1", "type": "copper/internal"}, + {"uuid": "D2", "context_uuid": "admin", "topology_uuid": "D1", "type": "copper/border" } + ]}}}, + {"action": 1, "custom": {"resource_key": "/settings", "resource_value": {"router_id": "10.0.1.4"}}}, + {"action": 1, "custom": {"resource_key": "/endpoints/endpoint[3]/settings", "resource_value": {"remote_router": "10.0.1.3"}}}, + {"action": 1, "custom": {"resource_key": "/endpoints/endpoint[5]/settings", "resource_value": {"remote_router": "10.0.1.5"}}}, + {"action": 1, "custom": {"resource_key": "/endpoints/endpoint[D2]/settings", "resource_value": {"remote_router": "10.0.2.2"}}} + ]} }, { @@ -93,22 +112,27 @@ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ - {"uuid": "1", "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"}, - {"uuid": "2", "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"}, - {"uuid": "4", "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"} - ]}}} + {"uuid": "1", "context_uuid": "admin", "topology_uuid": "D1", "type": "copper/internal"}, + {"uuid": "2", "context_uuid": "admin", "topology_uuid": "D1", "type": "copper/internal"}, + {"uuid": "4", "context_uuid": "admin", "topology_uuid": "D1", "type": "copper/internal"} + ]}}}, + {"action": 1, "custom": {"resource_key": "/settings", "resource_value": {"router_id": "10.0.1.5"}}}, + {"action": 1, "custom": {"resource_key": "/endpoints/endpoint[1]/settings", "resource_value": {"remote_router": "10.0.1.1"}}}, + {"action": 1, "custom": {"resource_key": "/endpoints/endpoint[2]/settings", "resource_value": {"remote_router": "10.0.1.2"}}}, + {"action": 1, "custom": {"resource_key": "/endpoints/endpoint[4]/settings", "resource_value": {"remote_router": "10.0.1.4"}}} ]} } ], "links": [ {"link_id": {"link_uuid": {"uuid": "DC1/D1==R1@D1/DC1"}}, "link_endpoint_ids": [ {"device_id": {"device_uuid": {"uuid": "DC1"}}, "endpoint_uuid": {"uuid": "D1"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "inter"}}}, - {"device_id": {"device_uuid": {"uuid": "R1@D1"}}, "endpoint_uuid": {"uuid": "DC1"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "inter"}}} + {"device_id": {"device_uuid": {"uuid": "R1@D1"}}, "endpoint_uuid": {"uuid": "DC1"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D1"}}} ]}, {"link_id": {"link_uuid": {"uuid": "R1@D1/DC1==DC1/D1"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "R1@D1"}}, "endpoint_uuid": {"uuid": "DC1"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "inter"}}}, + {"device_id": {"device_uuid": {"uuid": "R1@D1"}}, "endpoint_uuid": {"uuid": "DC1"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D1"}}}, {"device_id": {"device_uuid": {"uuid": "DC1"}}, "endpoint_uuid": {"uuid": "D1"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "inter"}}} ]}, + {"link_id": {"link_uuid": {"uuid": "DC2/D2==D2/DC2"}}, "link_endpoint_ids": [ {"device_id": {"device_uuid": {"uuid": "DC2"}}, "endpoint_uuid": {"uuid": "D2"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "inter"}}}, {"device_id": {"device_uuid": {"uuid": "D2"}}, "endpoint_uuid": {"uuid": "DC2"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "inter"}}} @@ -118,61 +142,61 @@ {"device_id": {"device_uuid": {"uuid": "DC2"}}, "endpoint_uuid": {"uuid": "D2"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "inter"}}} ]}, {"link_id": {"link_uuid": {"uuid": "R4@D1/D2==D2/D1"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "R4@D1"}}, "endpoint_uuid": {"uuid": "D2"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "inter"}}}, + {"device_id": {"device_uuid": {"uuid": "R4@D1"}}, "endpoint_uuid": {"uuid": "D2"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D1"}}}, {"device_id": {"device_uuid": {"uuid": "D2"}}, "endpoint_uuid": {"uuid": "D1"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "inter"}}} ]}, {"link_id": {"link_uuid": {"uuid": "D2/D1==R4@D1/D2"}}, "link_endpoint_ids": [ {"device_id": {"device_uuid": {"uuid": "D2"}}, "endpoint_uuid": {"uuid": "D1"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "inter"}}}, - {"device_id": {"device_uuid": {"uuid": "R4@D1"}}, "endpoint_uuid": {"uuid": "D2"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "inter"}}} + {"device_id": {"device_uuid": {"uuid": "R4@D1"}}, "endpoint_uuid": {"uuid": "D2"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D1"}}} ]}, {"link_id": {"link_uuid": {"uuid": "R1@D1/2==R2@D1/1"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "R1@D1"}}, "endpoint_uuid": {"uuid": "2"}}, - {"device_id": {"device_uuid": {"uuid": "R2@D1"}}, "endpoint_uuid": {"uuid": "1"}} + {"device_id": {"device_uuid": {"uuid": "R1@D1"}}, "endpoint_uuid": {"uuid": "2"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D1"}}}, + {"device_id": {"device_uuid": {"uuid": "R2@D1"}}, "endpoint_uuid": {"uuid": "1"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D1"}}} ]}, {"link_id": {"link_uuid": {"uuid": "R1@D1/5==R5@D1/1"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "R1@D1"}}, "endpoint_uuid": {"uuid": "5"}}, - {"device_id": {"device_uuid": {"uuid": "R5@D1"}}, "endpoint_uuid": {"uuid": "1"}} + {"device_id": {"device_uuid": {"uuid": "R1@D1"}}, "endpoint_uuid": {"uuid": "5"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D1"}}}, + {"device_id": {"device_uuid": {"uuid": "R5@D1"}}, "endpoint_uuid": {"uuid": "1"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D1"}}} ]}, {"link_id": {"link_uuid": {"uuid": "R2@D1/1==R1@D1/2"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "R2@D1"}}, "endpoint_uuid": {"uuid": "1"}}, - {"device_id": {"device_uuid": {"uuid": "R1@D1"}}, "endpoint_uuid": {"uuid": "2"}} + {"device_id": {"device_uuid": {"uuid": "R2@D1"}}, "endpoint_uuid": {"uuid": "1"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D1"}}}, + {"device_id": {"device_uuid": {"uuid": "R1@D1"}}, "endpoint_uuid": {"uuid": "2"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D1"}}} ]}, {"link_id": {"link_uuid": {"uuid": "R2@D1/3==R3@D1/2"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "R2@D1"}}, "endpoint_uuid": {"uuid": "3"}}, - {"device_id": {"device_uuid": {"uuid": "R3@D1"}}, "endpoint_uuid": {"uuid": "2"}} + {"device_id": {"device_uuid": {"uuid": "R2@D1"}}, "endpoint_uuid": {"uuid": "3"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D1"}}}, + {"device_id": {"device_uuid": {"uuid": "R3@D1"}}, "endpoint_uuid": {"uuid": "2"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D1"}}} ]}, {"link_id": {"link_uuid": {"uuid": "R2@D1/5==R5@D1/2"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "R2@D1"}}, "endpoint_uuid": {"uuid": "5"}}, - {"device_id": {"device_uuid": {"uuid": "R5@D1"}}, "endpoint_uuid": {"uuid": "2"}} + {"device_id": {"device_uuid": {"uuid": "R2@D1"}}, "endpoint_uuid": {"uuid": "5"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D1"}}}, + {"device_id": {"device_uuid": {"uuid": "R5@D1"}}, "endpoint_uuid": {"uuid": "2"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D1"}}} ]}, {"link_id": {"link_uuid": {"uuid": "R3@D1/2==R2@D1/3"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "R3@D1"}}, "endpoint_uuid": {"uuid": "2"}}, - {"device_id": {"device_uuid": {"uuid": "R2@D1"}}, "endpoint_uuid": {"uuid": "3"}} + {"device_id": {"device_uuid": {"uuid": "R3@D1"}}, "endpoint_uuid": {"uuid": "2"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D1"}}}, + {"device_id": {"device_uuid": {"uuid": "R2@D1"}}, "endpoint_uuid": {"uuid": "3"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D1"}}} ]}, {"link_id": {"link_uuid": {"uuid": "R3@D1/4==R4@D1/3"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "R3@D1"}}, "endpoint_uuid": {"uuid": "4"}}, - {"device_id": {"device_uuid": {"uuid": "R4@D1"}}, "endpoint_uuid": {"uuid": "3"}} + {"device_id": {"device_uuid": {"uuid": "R3@D1"}}, "endpoint_uuid": {"uuid": "4"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D1"}}}, + {"device_id": {"device_uuid": {"uuid": "R4@D1"}}, "endpoint_uuid": {"uuid": "3"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D1"}}} ]}, {"link_id": {"link_uuid": {"uuid": "R4@D1/3==R3@D1/4"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "R4@D1"}}, "endpoint_uuid": {"uuid": "3"}}, - {"device_id": {"device_uuid": {"uuid": "R3@D1"}}, "endpoint_uuid": {"uuid": "4"}} + {"device_id": {"device_uuid": {"uuid": "R4@D1"}}, "endpoint_uuid": {"uuid": "3"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D1"}}}, + {"device_id": {"device_uuid": {"uuid": "R3@D1"}}, "endpoint_uuid": {"uuid": "4"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D1"}}} ]}, {"link_id": {"link_uuid": {"uuid": "R4@D1/5==R5@D1/4"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "R4@D1"}}, "endpoint_uuid": {"uuid": "5"}}, - {"device_id": {"device_uuid": {"uuid": "R5@D1"}}, "endpoint_uuid": {"uuid": "4"}} + {"device_id": {"device_uuid": {"uuid": "R4@D1"}}, "endpoint_uuid": {"uuid": "5"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D1"}}}, + {"device_id": {"device_uuid": {"uuid": "R5@D1"}}, "endpoint_uuid": {"uuid": "4"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D1"}}} ]}, {"link_id": {"link_uuid": {"uuid": "R5@D1/1==R1@D1/5"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "R5@D1"}}, "endpoint_uuid": {"uuid": "1"}}, - {"device_id": {"device_uuid": {"uuid": "R1@D1"}}, "endpoint_uuid": {"uuid": "5"}} + {"device_id": {"device_uuid": {"uuid": "R5@D1"}}, "endpoint_uuid": {"uuid": "1"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D1"}}}, + {"device_id": {"device_uuid": {"uuid": "R1@D1"}}, "endpoint_uuid": {"uuid": "5"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D1"}}} ]}, {"link_id": {"link_uuid": {"uuid": "R5@D1/2==R2@D1/5"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "R5@D1"}}, "endpoint_uuid": {"uuid": "2"}}, - {"device_id": {"device_uuid": {"uuid": "R2@D1"}}, "endpoint_uuid": {"uuid": "5"}} + {"device_id": {"device_uuid": {"uuid": "R5@D1"}}, "endpoint_uuid": {"uuid": "2"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D1"}}}, + {"device_id": {"device_uuid": {"uuid": "R2@D1"}}, "endpoint_uuid": {"uuid": "5"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D1"}}} ]}, {"link_id": {"link_uuid": {"uuid": "R5@D1/4==R4@D1/5"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "R5@D1"}}, "endpoint_uuid": {"uuid": "4"}}, - {"device_id": {"device_uuid": {"uuid": "R4@D1"}}, "endpoint_uuid": {"uuid": "5"}} + {"device_id": {"device_uuid": {"uuid": "R5@D1"}}, "endpoint_uuid": {"uuid": "4"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D1"}}}, + {"device_id": {"device_uuid": {"uuid": "R4@D1"}}, "endpoint_uuid": {"uuid": "5"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D1"}}} ]} ] } diff --git a/src/tests/oeccpsc22/descriptors/domain2.json b/src/tests/oeccpsc22/descriptors/domain2.json index e7a00f74e..6e282ca7a 100644 --- a/src/tests/oeccpsc22/descriptors/domain2.json +++ b/src/tests/oeccpsc22/descriptors/domain2.json @@ -4,6 +4,7 @@ ], "topologies": [ {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}}, + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D2" }}}, {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "inter"}}} ], "devices": [ @@ -13,9 +14,12 @@ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ - {"uuid": "2", "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"}, - {"uuid": "3", "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"} - ]}}} + {"uuid": "2", "context_uuid": "admin", "topology_uuid": "D2", "type": "copper/internal"}, + {"uuid": "3", "context_uuid": "admin", "topology_uuid": "D2", "type": "copper/internal"} + ]}}}, + {"action": 1, "custom": {"resource_key": "/settings", "resource_value": {"router_id": "10.0.2.1"}}}, + {"action": 1, "custom": {"resource_key": "/endpoints/endpoint[2]/settings", "resource_value": {"remote_router": "10.0.2.2"}}}, + {"action": 1, "custom": {"resource_key": "/endpoints/endpoint[3]/settings", "resource_value": {"remote_router": "10.0.2.3"}}} ]} }, { @@ -24,10 +28,14 @@ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ - {"uuid": "1", "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"}, - {"uuid": "3", "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"}, - {"uuid": "D1", "context_uuid": "admin", "topology_uuid": "inter", "type": "copper/border" } - ]}}} + {"uuid": "1", "context_uuid": "admin", "topology_uuid": "D2", "type": "copper/internal"}, + {"uuid": "3", "context_uuid": "admin", "topology_uuid": "D2", "type": "copper/internal"}, + {"uuid": "D1", "context_uuid": "admin", "topology_uuid": "D2", "type": "copper/border" } + ]}}}, + {"action": 1, "custom": {"resource_key": "/settings", "resource_value": {"router_id": "10.0.2.2"}}}, + {"action": 1, "custom": {"resource_key": "/endpoints/endpoint[1]/settings", "resource_value": {"remote_router": "10.0.2.1"}}}, + {"action": 1, "custom": {"resource_key": "/endpoints/endpoint[3]/settings", "resource_value": {"remote_router": "10.0.2.3"}}}, + {"action": 1, "custom": {"resource_key": "/endpoints/endpoint[D1]/settings", "resource_value": {"remote_router": "10.0.1.4"}}} ]} }, { @@ -36,37 +44,41 @@ {"action": 1, "custom": {"resource_key": "_connect/address", "resource_value": "127.0.0.1"}}, {"action": 1, "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, {"action": 1, "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ - {"uuid": "1", "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"}, - {"uuid": "2", "context_uuid": "admin", "topology_uuid": "admin", "type": "copper/internal"}, - {"uuid": "DC2", "context_uuid": "admin", "topology_uuid": "inter", "type": "copper/border" } - ]}}} + {"uuid": "1", "context_uuid": "admin", "topology_uuid": "D2", "type": "copper/internal"}, + {"uuid": "2", "context_uuid": "admin", "topology_uuid": "D2", "type": "copper/internal"}, + {"uuid": "DC2", "context_uuid": "admin", "topology_uuid": "D2", "type": "copper/border" } + ]}}}, + {"action": 1, "custom": {"resource_key": "/settings", "resource_value": {"router_id": "10.0.2.3"}}}, + {"action": 1, "custom": {"resource_key": "/endpoints/endpoint[1]/settings", "resource_value": {"remote_router": "10.0.2.1"}}}, + {"action": 1, "custom": {"resource_key": "/endpoints/endpoint[2]/settings", "resource_value": {"remote_router": "10.0.2.2"}}}, + {"action": 1, "custom": {"resource_key": "/endpoints/endpoint[DC2]/settings", "resource_value": {"remote_router": "10.0.0.2"}}} ]} } ], "links": [ {"link_id": {"link_uuid": {"uuid": "R1@D2/2==R2@D2/1"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "R1@D2"}}, "endpoint_uuid": {"uuid": "2"}}, - {"device_id": {"device_uuid": {"uuid": "R2@D2"}}, "endpoint_uuid": {"uuid": "1"}} + {"device_id": {"device_uuid": {"uuid": "R1@D2"}}, "endpoint_uuid": {"uuid": "2"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D2"}}}, + {"device_id": {"device_uuid": {"uuid": "R2@D2"}}, "endpoint_uuid": {"uuid": "1"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D2"}}} ]}, {"link_id": {"link_uuid": {"uuid": "R1@D2/3==R3@D2/1"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "R1@D2"}}, "endpoint_uuid": {"uuid": "3"}}, - {"device_id": {"device_uuid": {"uuid": "R3@D2"}}, "endpoint_uuid": {"uuid": "1"}} + {"device_id": {"device_uuid": {"uuid": "R1@D2"}}, "endpoint_uuid": {"uuid": "3"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D2"}}}, + {"device_id": {"device_uuid": {"uuid": "R3@D2"}}, "endpoint_uuid": {"uuid": "1"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D2"}}} ]}, {"link_id": {"link_uuid": {"uuid": "R2@D2/1==R1@D2/2"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "R2@D2"}}, "endpoint_uuid": {"uuid": "1"}}, - {"device_id": {"device_uuid": {"uuid": "R1@D2"}}, "endpoint_uuid": {"uuid": "2"}} + {"device_id": {"device_uuid": {"uuid": "R2@D2"}}, "endpoint_uuid": {"uuid": "1"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D2"}}}, + {"device_id": {"device_uuid": {"uuid": "R1@D2"}}, "endpoint_uuid": {"uuid": "2"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D2"}}} ]}, {"link_id": {"link_uuid": {"uuid": "R2@D2/3==R3@D2/2"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "R2@D2"}}, "endpoint_uuid": {"uuid": "3"}}, - {"device_id": {"device_uuid": {"uuid": "R3@D2"}}, "endpoint_uuid": {"uuid": "2"}} + {"device_id": {"device_uuid": {"uuid": "R2@D2"}}, "endpoint_uuid": {"uuid": "3"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D2"}}}, + {"device_id": {"device_uuid": {"uuid": "R3@D2"}}, "endpoint_uuid": {"uuid": "2"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D2"}}} ]}, {"link_id": {"link_uuid": {"uuid": "R3@D2/1==R1@D2/3"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "R3@D2"}}, "endpoint_uuid": {"uuid": "1"}}, - {"device_id": {"device_uuid": {"uuid": "R1@D2"}}, "endpoint_uuid": {"uuid": "3"}} + {"device_id": {"device_uuid": {"uuid": "R3@D2"}}, "endpoint_uuid": {"uuid": "1"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D2"}}}, + {"device_id": {"device_uuid": {"uuid": "R1@D2"}}, "endpoint_uuid": {"uuid": "3"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D2"}}} ]}, {"link_id": {"link_uuid": {"uuid": "R3@D2/2==R2@D2/3"}}, "link_endpoint_ids": [ - {"device_id": {"device_uuid": {"uuid": "R3@D2"}}, "endpoint_uuid": {"uuid": "2"}}, - {"device_id": {"device_uuid": {"uuid": "R2@D2"}}, "endpoint_uuid": {"uuid": "3"}} + {"device_id": {"device_uuid": {"uuid": "R3@D2"}}, "endpoint_uuid": {"uuid": "2"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D2"}}}, + {"device_id": {"device_uuid": {"uuid": "R2@D2"}}, "endpoint_uuid": {"uuid": "3"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "D2"}}} ]} ] } diff --git a/src/tests/oeccpsc22/descriptors/inter-domain-slice.json b/src/tests/oeccpsc22/descriptors/inter-domain-slice.json index b1d71858f..3651f569c 100644 --- a/src/tests/oeccpsc22/descriptors/inter-domain-slice.json +++ b/src/tests/oeccpsc22/descriptors/inter-domain-slice.json @@ -12,19 +12,7 @@ {"sla_latency": {"e2e_latency_ms": 15.2}} ], "slice_config": {"config_rules": [ - {"action": 1, "custom": {"resource_key": "/settings", "resource_value": {"mtu": 1512, "vlan_id": 300}}}, - {"action": 1, "custom": {"resource_key": "/device[R1@D1]/endpoint[2]/settings", "resource_value": {"remote_router": "10.0.0.2"}}}, - {"action": 1, "custom": {"resource_key": "/device[R1@D1]/endpoint[5]/settings", "resource_value": {"remote_router": "10.0.0.5"}}}, - {"action": 1, "custom": {"resource_key": "/device[R2@D1]/endpoint[1]/settings", "resource_value": {"remote_router": "10.0.0.1"}}}, - {"action": 1, "custom": {"resource_key": "/device[R2@D1]/endpoint[3]/settings", "resource_value": {"remote_router": "10.0.0.3"}}}, - {"action": 1, "custom": {"resource_key": "/device[R2@D1]/endpoint[5]/settings", "resource_value": {"remote_router": "10.0.0.5"}}}, - {"action": 1, "custom": {"resource_key": "/device[R3@D1]/endpoint[2]/settings", "resource_value": {"remote_router": "10.0.0.2"}}}, - {"action": 1, "custom": {"resource_key": "/device[R3@D1]/endpoint[4]/settings", "resource_value": {"remote_router": "10.0.0.4"}}}, - {"action": 1, "custom": {"resource_key": "/device[R4@D1]/endpoint[3]/settings", "resource_value": {"remote_router": "10.0.0.3"}}}, - {"action": 1, "custom": {"resource_key": "/device[R4@D1]/endpoint[5]/settings", "resource_value": {"remote_router": "10.0.0.5"}}}, - {"action": 1, "custom": {"resource_key": "/device[R5@D1]/endpoint[1]/settings", "resource_value": {"remote_router": "10.0.0.1"}}}, - {"action": 1, "custom": {"resource_key": "/device[R5@D1]/endpoint[2]/settings", "resource_value": {"remote_router": "10.0.0.2"}}}, - {"action": 1, "custom": {"resource_key": "/device[R5@D1]/endpoint[4]/settings", "resource_value": {"remote_router": "10.0.0.4"}}} + {"action": 1, "custom": {"resource_key": "/settings", "resource_value": {"mtu": 1512, "vlan_id": 300}}} ]} } ] diff --git a/src/tests/oeccpsc22/dump_logs.sh b/src/tests/oeccpsc22/dump_logs.sh index a30660f12..db96640b2 100755 --- a/src/tests/oeccpsc22/dump_logs.sh +++ b/src/tests/oeccpsc22/dump_logs.sh @@ -25,6 +25,7 @@ kubectl --namespace tfs-dom1 logs deployments/pathcompservice frontend > tmp/tfs kubectl --namespace tfs-dom1 logs deployments/pathcompservice backend > tmp/tfs-dom1/exec/pathcomp-backend.log kubectl --namespace tfs-dom1 logs deployments/sliceservice server > tmp/tfs-dom1/exec/slice.log kubectl --namespace tfs-dom1 logs deployment/interdomainservice server > tmp/tfs-dom1/exec/interdomain.log +kubectl --namespace tfs-dom1 logs deployment/webuiservice server > tmp/tfs-dom1/exec/webui.log printf "\n" echo "Collecting logs for Domain 2..." @@ -36,6 +37,7 @@ kubectl --namespace tfs-dom2 logs deployments/pathcompservice frontend > tmp/tfs kubectl --namespace tfs-dom2 logs deployments/pathcompservice backend > tmp/tfs-dom2/exec/pathcomp-backend.log kubectl --namespace tfs-dom2 logs deployments/sliceservice server > tmp/tfs-dom2/exec/slice.log kubectl --namespace tfs-dom2 logs deployment/interdomainservice server > tmp/tfs-dom2/exec/interdomain.log +kubectl --namespace tfs-dom2 logs deployment/webuiservice server > tmp/tfs-dom2/exec/webui.log printf "\n" echo "Done!" diff --git a/src/tests/oeccpsc22/expose-services-dom1.yaml b/src/tests/oeccpsc22/old/expose-services-dom1.yaml similarity index 90% rename from src/tests/oeccpsc22/expose-services-dom1.yaml rename to src/tests/oeccpsc22/old/expose-services-dom1.yaml index ebfb38fc4..f7eab1372 100644 --- a/src/tests/oeccpsc22/expose-services-dom1.yaml +++ b/src/tests/oeccpsc22/old/expose-services-dom1.yaml @@ -12,17 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -apiVersion: v1 -kind: Service -metadata: - name: remote-teraflow -spec: - type: ExternalName - externalName: interdomainservice.dom2.svc.cluster.local - ports: - - name: grpc - protocol: TCP - port: 10010 +#apiVersion: v1 +#kind: Service +#metadata: +# name: remote-teraflow +#spec: +# type: ExternalName +# externalName: interdomainservice.dom2.svc.cluster.local +# ports: +# - name: grpc +# protocol: TCP +# port: 10010 #--- #apiVersion: v1 #kind: Service diff --git a/src/tests/oeccpsc22/expose-services-dom2.yaml b/src/tests/oeccpsc22/old/expose-services-dom2.yaml similarity index 90% rename from src/tests/oeccpsc22/expose-services-dom2.yaml rename to src/tests/oeccpsc22/old/expose-services-dom2.yaml index cf04f3f5e..3ef4fd879 100644 --- a/src/tests/oeccpsc22/expose-services-dom2.yaml +++ b/src/tests/oeccpsc22/old/expose-services-dom2.yaml @@ -12,17 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -apiVersion: v1 -kind: Service -metadata: - name: remote-teraflow -spec: - type: ExternalName - externalName: interdomainservice.dom1.svc.cluster.local - ports: - - name: grpc - protocol: TCP - port: 10010 +#apiVersion: v1 +#kind: Service +#metadata: +# name: remote-teraflow +#spec: +# type: ExternalName +# externalName: interdomainservice.dom1.svc.cluster.local +# ports: +# - name: grpc +# protocol: TCP +# port: 10010 #--- #apiVersion: v1 #kind: Service -- GitLab From a6e8d8501583f40cdad0336174a580cfc3589843 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 29 Jun 2023 14:26:57 +0000 Subject: [PATCH 16/23] Slice component: - Fixed a slice name overwrite bug - Corrected slice deletion to support interdomain scenarios --- src/slice/service/SliceServiceServicerImpl.py | 36 +++++++++---------- 1 file changed, 17 insertions(+), 19 deletions(-) diff --git a/src/slice/service/SliceServiceServicerImpl.py b/src/slice/service/SliceServiceServicerImpl.py index e87ee0f97..cbe2dd5c7 100644 --- a/src/slice/service/SliceServiceServicerImpl.py +++ b/src/slice/service/SliceServiceServicerImpl.py @@ -48,7 +48,7 @@ class SliceServiceServicerImpl(SliceServiceServicer): slice_rw = Slice() slice_rw.CopyFrom(request if slice_ro is None else slice_ro) - slice_rw.name = request.name + if len(request.name) > 0: slice_rw.name = request.name slice_rw.slice_owner.CopyFrom(request.slice_owner) # pylint: disable=no-member slice_rw.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_PLANNED # pylint: disable=no-member @@ -203,30 +203,28 @@ class SliceServiceServicerImpl(SliceServiceServicer): context_client.close() return Empty() + _slice_rw = Slice() + _slice_rw.CopyFrom(_slice) + _slice_rw.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_DEINIT # pylint: disable=no-member + context_client.SetSlice(_slice_rw) + if is_inter_domain(context_client, _slice.slice_endpoint_ids): interdomain_client = InterdomainClient() slice_id = interdomain_client.DeleteSlice(request) - #raise NotImplementedError('Delete inter-domain slice') interdomain_client.close() else: - current_slice = Slice() - current_slice.CopyFrom(_slice) - current_slice.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_DEINIT # pylint: disable=no-member - context_client.SetSlice(current_slice) - if self._slice_grouper.is_enabled: - ungrouped = self._slice_grouper.ungroup(current_slice) # pylint: disable=unused-variable - - service_client = ServiceClient() - for service_id in _slice.slice_service_ids: - current_slice = Slice() - current_slice.slice_id.CopyFrom(_slice.slice_id) # pylint: disable=no-member - slice_service_id = current_slice.slice_service_ids.add() # pylint: disable=no-member - slice_service_id.CopyFrom(service_id) - context_client.UnsetSlice(current_slice) - - service_client.DeleteService(service_id) - service_client.close() + ungrouped = self._slice_grouper.ungroup(_slice_rw) # pylint: disable=unused-variable + + service_client = ServiceClient() + for service_id in _slice.slice_service_ids: + tmp_slice = Slice() + tmp_slice.slice_id.CopyFrom(_slice.slice_id) # pylint: disable=no-member + slice_service_id = tmp_slice.slice_service_ids.add() # pylint: disable=no-member + slice_service_id.CopyFrom(service_id) + context_client.UnsetSlice(tmp_slice) + service_client.DeleteService(service_id) + service_client.close() context_client.RemoveSlice(request) context_client.close() -- GitLab From b9a00b181b434cc9cae3d160238e7ce047cf4dff Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 29 Jun 2023 14:28:52 +0000 Subject: [PATCH 17/23] Service component: - Enhanced generic SettingsHandler with a method to dump config rules stored. - Extended L2NMEmulated Service Handler to support per-endpoint settings provided as device initialization parameters --- .../service_handler_api/SettingsHandler.py | 5 +- .../L2NMEmulatedServiceHandler.py | 46 ++++++++++++++++++- 2 files changed, 48 insertions(+), 3 deletions(-) diff --git a/src/service/service/service_handler_api/SettingsHandler.py b/src/service/service/service_handler_api/SettingsHandler.py index 85dd3a128..a58db00ce 100644 --- a/src/service/service/service_handler_api/SettingsHandler.py +++ b/src/service/service/service_handler_api/SettingsHandler.py @@ -16,7 +16,7 @@ import anytree, json, logging from typing import Any, List, Optional, Tuple, Union from common.proto.context_pb2 import ConfigActionEnum, ConfigRule, Device, EndPoint, ServiceConfig from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_string -from service.service.service_handler_api.AnyTreeTools import TreeNode, delete_subnode, get_subnode, set_subnode_value +from .AnyTreeTools import TreeNode, delete_subnode, dump_subtree, get_subnode, set_subnode_value LOGGER = logging.getLogger(__name__) @@ -86,3 +86,6 @@ class SettingsHandler: MSG = 'Unsupported Action({:s}) in ConfigRule({:s})' LOGGER.warning(MSG.format(str(action), grpc_message_to_json_string(config_rule))) return + + def dump_config_rules(self) -> List[Tuple[Any, Any]]: + return dump_subtree(self.__config) diff --git a/src/service/service/service_handlers/l2nm_emulated/L2NMEmulatedServiceHandler.py b/src/service/service/service_handlers/l2nm_emulated/L2NMEmulatedServiceHandler.py index 416c10f72..8bd164422 100644 --- a/src/service/service/service_handlers/l2nm_emulated/L2NMEmulatedServiceHandler.py +++ b/src/service/service/service_handlers/l2nm_emulated/L2NMEmulatedServiceHandler.py @@ -15,7 +15,7 @@ import json, logging from typing import Any, List, Optional, Tuple, Union from common.method_wrappers.Decorator import MetricsPool, metered_subclass_method -from common.proto.context_pb2 import ConfigRule, DeviceId, Service +from common.proto.context_pb2 import ConfigActionEnum, ConfigRule, DeviceId, Service from common.tools.object_factory.Device import json_device_id from common.type_checkers.Checkers import chk_type from service.service.service_handler_api.Tools import get_device_endpoint_uuids, get_endpoint_matching @@ -52,9 +52,30 @@ class L2NMEmulatedServiceHandler(_ServiceHandler): device_uuid, endpoint_uuid = get_device_endpoint_uuids(endpoint) device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) + device_name = device_obj.name + + for config_rule in device_obj.device_config.config_rules: + raw_data = SettingsHandler._config_rule_to_raw(config_rule) + if raw_data is None: continue + action, key_or_path, value = raw_data + if action != ConfigActionEnum.CONFIGACTION_SET: continue + if not key_or_path.startswith('/endpoints/endpoint['): continue + if not key_or_path.endswith(']/settings'): continue + key_or_path = key_or_path.replace('/endpoints/', '/device[{:s}]/'.format(device_name)) + LOGGER.debug('Setting key_or_path={:s} value={:s}'.format(str(key_or_path), str(value))) + self.__settings_handler.set(key_or_path, value) + + service_config_rules = self.__settings_handler.dump_config_rules() + LOGGER.debug('service_config_rules={:s}'.format(str(service_config_rules))) + endpoint_obj = get_endpoint_matching(device_obj, endpoint_uuid) - endpoint_settings = self.__settings_handler.get_endpoint_settings(device_obj, endpoint_obj) endpoint_name = endpoint_obj.name + endpoint_settings = self.__settings_handler.get_endpoint_settings(device_obj, endpoint_obj) + MSG = 'device_uuid={:s} device_name={:s} endpoint_uuid={:s} endpoint_name={:s} endpoint_settings={:s}' + str_endpoint_settings = str(None) if endpoint_settings is None else str(endpoint_settings.value) + LOGGER.debug(MSG.format( + str(device_uuid), str(device_name), str(endpoint_uuid), str(endpoint_name), str_endpoint_settings + )) json_config_rules = setup_config_rules( service_uuid, connection_uuid, device_uuid, endpoint_uuid, endpoint_name, @@ -89,9 +110,30 @@ class L2NMEmulatedServiceHandler(_ServiceHandler): device_uuid, endpoint_uuid = get_device_endpoint_uuids(endpoint) device_obj = self.__task_executor.get_device(DeviceId(**json_device_id(device_uuid))) + device_name = device_obj.name + + for config_rule in device_obj.device_config.config_rules: + raw_data = SettingsHandler._config_rule_to_raw(config_rule) + if raw_data is None: continue + action, key_or_path, value = raw_data + if action != ConfigActionEnum.CONFIGACTION_SET: continue + if not key_or_path.startswith('/endpoints/endpoint['): continue + if not key_or_path.endswith(']/settings'): continue + key_or_path = key_or_path.replace('/endpoints/', '/device[{:s}]/'.format(device_name)) + LOGGER.debug('Setting key_or_path={:s} value={:s}'.format(str(key_or_path), str(value))) + self.__settings_handler.set(key_or_path, value) + + service_config_rules = self.__settings_handler.dump_config_rules() + LOGGER.debug('service_config_rules={:s}'.format(str(service_config_rules))) + endpoint_obj = get_endpoint_matching(device_obj, endpoint_uuid) endpoint_settings = self.__settings_handler.get_endpoint_settings(device_obj, endpoint_obj) endpoint_name = endpoint_obj.name + MSG = 'device_uuid={:s} device_name={:s} endpoint_uuid={:s} endpoint_name={:s} endpoint_settings={:s}' + str_endpoint_settings = str(None) if endpoint_settings is None else str(endpoint_settings.value) + LOGGER.debug(MSG.format( + str(device_uuid), str(device_name), str(endpoint_uuid), str(endpoint_name), str_endpoint_settings + )) json_config_rules = teardown_config_rules( service_uuid, connection_uuid, device_uuid, endpoint_uuid, endpoint_name, -- GitLab From 08a77949a53551ddef5983e2e56b0437d0108055 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 29 Jun 2023 14:30:35 +0000 Subject: [PATCH 18/23] Interdomain component: - Added setting to (de)activate the topology abstractor module. --- src/interdomain/Config.py | 11 +++++++++++ src/interdomain/service/__main__.py | 10 +++++++--- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/src/interdomain/Config.py b/src/interdomain/Config.py index 38d04994f..f8e81dd2b 100644 --- a/src/interdomain/Config.py +++ b/src/interdomain/Config.py @@ -11,3 +11,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +from common.Settings import get_setting + +SETTING_NAME_TOPOLOGY_ABSTRACTOR = 'TOPOLOGY_ABSTRACTOR' +TRUE_VALUES = {'Y', 'YES', 'TRUE', 'T', 'E', 'ENABLE', 'ENABLED'} + +def is_topology_abstractor_enabled() -> bool: + is_enabled = get_setting(SETTING_NAME_TOPOLOGY_ABSTRACTOR, default=None) + if is_enabled is None: return False + str_is_enabled = str(is_enabled).upper() + return str_is_enabled in TRUE_VALUES diff --git a/src/interdomain/service/__main__.py b/src/interdomain/service/__main__.py index f867dc378..b986f8921 100644 --- a/src/interdomain/service/__main__.py +++ b/src/interdomain/service/__main__.py @@ -18,6 +18,7 @@ from common.Constants import ServiceNameEnum from common.Settings import ( ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, get_env_var_name, get_log_level, get_metrics_port, wait_for_environment_variables) +from interdomain.Config import is_topology_abstractor_enabled from .topology_abstractor.TopologyAbstractor import TopologyAbstractor from .InterdomainService import InterdomainService from .RemoteDomainClients import RemoteDomainClients @@ -63,14 +64,17 @@ def main(): grpc_service.start() # Subscribe to Context Events - topology_abstractor = TopologyAbstractor() - topology_abstractor.start() + topology_abstractor_enabled = is_topology_abstractor_enabled() + if topology_abstractor_enabled: + topology_abstractor = TopologyAbstractor() + topology_abstractor.start() # Wait for Ctrl+C or termination signal while not terminate.wait(timeout=1.0): pass LOGGER.info('Terminating...') - topology_abstractor.stop() + if topology_abstractor_enabled: + topology_abstractor.stop() grpc_service.stop() remote_domain_clients.stop() -- GitLab From ca80bc36f432c2c65a60b27b2449dc1e8052f8b2 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 29 Jun 2023 14:31:33 +0000 Subject: [PATCH 19/23] Common - Settings: - Upgraded methods wait_for_environment_variables() and find_missing_environment_variables() as they were failing in non-K8s environments. - Migrated method find_missing_environment_variables() to find_environment_variables() --- src/common/Settings.py | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/src/common/Settings.py b/src/common/Settings.py index 1efe80db7..6b35e42b7 100644 --- a/src/common/Settings.py +++ b/src/common/Settings.py @@ -13,7 +13,7 @@ # limitations under the License. import logging, os, time -from typing import List +from typing import Dict, List from common.Constants import ( DEFAULT_GRPC_BIND_ADDRESS, DEFAULT_GRPC_GRACE_PERIOD, DEFAULT_GRPC_MAX_WORKERS, DEFAULT_HTTP_BIND_ADDRESS, DEFAULT_LOG_LEVEL, DEFAULT_METRICS_PORT, DEFAULT_SERVICE_GRPC_PORTS, DEFAULT_SERVICE_HTTP_BASEURLS, @@ -37,23 +37,24 @@ ENVVAR_SUFIX_SERVICE_HOST = 'SERVICE_HOST' ENVVAR_SUFIX_SERVICE_PORT_GRPC = 'SERVICE_PORT_GRPC' ENVVAR_SUFIX_SERVICE_PORT_HTTP = 'SERVICE_PORT_HTTP' -def find_missing_environment_variables( - required_environment_variables : List[str] = [] -) -> List[str]: - if ENVVAR_KUBERNETES_PORT in os.environ: - missing_variables = set(required_environment_variables).difference(set(os.environ.keys())) - else: - # We're not running in Kubernetes, nothing to wait for - missing_variables = required_environment_variables - return missing_variables +def find_environment_variables( + environment_variable_names : List[str] = [] +) -> Dict[str, str]: + environment_variable : Dict[str, str] = dict() + for name in environment_variable_names: + if name not in os.environ: continue + environment_variable[name] = os.environ[name] + return environment_variable def wait_for_environment_variables( required_environment_variables : List[str] = [], wait_delay_seconds : float = DEFAULT_RESTART_DELAY ): - missing_variables = find_missing_environment_variables(required_environment_variables) - if len(missing_variables) == 0: return # We have all environment variables defined - msg = 'Variables({:s}) are missing in Environment({:s}), restarting in {:f} seconds...' - LOGGER.error(msg.format(str(missing_variables), str(os.environ), wait_delay_seconds)) + if ENVVAR_KUBERNETES_PORT not in os.environ: return # Not running in Kubernetes + found = find_environment_variables(required_environment_variables) + missing = set(required_environment_variables).difference(set(found.keys())) + if len(missing) == 0: return # We have all environment variables defined + MSG = 'Variables({:s}) are missing in Environment({:s}), restarting in {:f} seconds...' + LOGGER.error(MSG.format(str(missing), str(os.environ), wait_delay_seconds)) time.sleep(wait_delay_seconds) raise Exception('Restarting...') # pylint: disable=broad-exception-raised -- GitLab From 38fd7f22d61329c3523b1708db1dc50d55e025da Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 29 Jun 2023 14:31:59 +0000 Subject: [PATCH 20/23] Manifests: - Added env var TOPOLOGY_ABSTRACTOR to Interdomain manifest --- manifests/interdomainservice.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/manifests/interdomainservice.yaml b/manifests/interdomainservice.yaml index 378daef88..d28887e10 100644 --- a/manifests/interdomainservice.yaml +++ b/manifests/interdomainservice.yaml @@ -36,6 +36,8 @@ spec: env: - name: LOG_LEVEL value: "DEBUG" + - name: TOPOLOGY_ABSTRACTOR + value: "DISABLE" readinessProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:10010"] -- GitLab From d5d975d9d02957f16d496559962b69b0e9969911 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 29 Jun 2023 14:42:31 +0000 Subject: [PATCH 21/23] Interdomain component: - Migrated logic to Release 2.0 and used new common Interdomain methods. - Improvements detection of remote domain settings and instantiation of interdomain clients. - Updated Topology Abstractor to consider abstract device and link names and uuids. - Improved logging messages of Topology Abstractor - Corrected local topology retrieval in Topology Abstractor - Implemented DeleteSlice RPC method - Updated helper methods to improve detection of slice owner and store it as parameter of the slice. - Added new unitary tests - Added new dependency on service component's client --- src/interdomain/Dockerfile | 2 +- .../service/InterdomainServiceServicerImpl.py | 274 ++++++++++++++---- .../service/RemoteDomainClients.py | 38 ++- src/interdomain/service/Tools.py | 41 ++- .../topology_abstractor/AbstractDevice.py | 51 +++- .../topology_abstractor/AbstractLink.py | 10 + .../topology_abstractor/TopologyAbstractor.py | 59 ++-- .../tests/{test_unitary.py => old_tests.py} | 1 - src/interdomain/tests/test_compute_domains.py | 119 ++++++++ .../tests/test_topology_abstractor.py | 105 +++++++ 10 files changed, 576 insertions(+), 124 deletions(-) rename src/interdomain/tests/{test_unitary.py => old_tests.py} (99%) create mode 100644 src/interdomain/tests/test_compute_domains.py create mode 100644 src/interdomain/tests/test_topology_abstractor.py diff --git a/src/interdomain/Dockerfile b/src/interdomain/Dockerfile index 69fcf3d9c..66c6e938d 100644 --- a/src/interdomain/Dockerfile +++ b/src/interdomain/Dockerfile @@ -68,7 +68,7 @@ COPY src/dlt/. dlt/ COPY src/interdomain/. interdomain/ #COPY src/monitoring/. monitoring/ COPY src/pathcomp/. pathcomp/ -#COPY src/service/. service/ +COPY src/service/. service/ COPY src/slice/. slice/ # Start the service diff --git a/src/interdomain/service/InterdomainServiceServicerImpl.py b/src/interdomain/service/InterdomainServiceServicerImpl.py index 51c8ee39a..fa6bec912 100644 --- a/src/interdomain/service/InterdomainServiceServicerImpl.py +++ b/src/interdomain/service/InterdomainServiceServicerImpl.py @@ -12,25 +12,35 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Dict, Tuple import grpc, logging, uuid -from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME, ServiceNameEnum -from common.Settings import ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, find_missing_environment_variables, get_env_var_name -from common.proto.context_pb2 import AuthenticationResult, Slice, SliceId, SliceStatusEnum, TeraFlowController, TopologyId +from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME, INTERDOMAIN_TOPOLOGY_NAME, ServiceNameEnum +from common.Settings import ( + ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, find_environment_variables, get_env_var_name) +from common.proto.context_pb2 import ( + AuthenticationResult, Empty, EndPointId, Slice, SliceId, SliceStatusEnum, TeraFlowController, TopologyId) from common.proto.interdomain_pb2_grpc import InterdomainServiceServicer from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method +from common.tools.context_queries.CheckType import endpoint_type_is_border from common.tools.context_queries.Context import create_context +from common.tools.context_queries.Device import get_device from common.tools.context_queries.InterDomain import ( - compute_interdomain_path, compute_traversed_domains, get_local_device_uuids, is_inter_domain) -from common.tools.context_queries.Topology import create_topology -from common.tools.grpc.Tools import grpc_message_to_json_string + compute_interdomain_sub_slices, get_local_device_uuids, is_inter_domain) +from common.tools.context_queries.Slice import get_slice_by_id +from common.tools.context_queries.Topology import create_topology, get_topology +from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from common.tools.object_factory.Device import json_device_id +from common.tools.object_factory.EndPoint import json_endpoint_id from common.tools.object_factory.Topology import json_topology_id from context.client.ContextClient import ContextClient from dlt.connector.client.DltConnectorClient import DltConnectorClient -from interdomain.service.topology_abstractor.DltRecordSender import DltRecordSender from pathcomp.frontend.client.PathCompClient import PathCompClient +from service.client.ServiceClient import ServiceClient from slice.client.SliceClient import SliceClient +from .topology_abstractor.DltRecordSender import DltRecordSender from .RemoteDomainClients import RemoteDomainClients -from .Tools import compose_slice, compute_slice_owner, map_abstract_endpoints_to_real +from .Tools import compose_slice, compute_slice_owner #, map_abstract_endpoints_to_real LOGGER = logging.getLogger(__name__) @@ -57,39 +67,26 @@ class InterdomainServiceServicerImpl(InterdomainServiceServicer): str_slice = grpc_message_to_json_string(request) raise Exception('InterDomain can only handle inter-domain slice requests: {:s}'.format(str_slice)) - interdomain_path = compute_interdomain_path(pathcomp_client, request) - str_interdomain_path = [ - [device_uuid, [ - (endpoint_id.device_id.device_uuid.uuid, endpoint_id.endpoint_uuid.uuid) - for endpoint_id in endpoint_ids - ]] - for device_uuid, endpoint_ids in interdomain_path - ] - LOGGER.info('interdomain_path={:s}'.format(str(str_interdomain_path))) - - traversed_domains = compute_traversed_domains(context_client, interdomain_path) - str_traversed_domains = [ - (domain_uuid, is_local_domain, [ - (endpoint_id.device_id.device_uuid.uuid, endpoint_id.endpoint_uuid.uuid) - for endpoint_id in endpoint_ids - ]) - for domain_uuid,is_local_domain,endpoint_ids in traversed_domains - ] - LOGGER.info('traversed_domains={:s}'.format(str(str_traversed_domains))) - - slice_owner_uuid = compute_slice_owner(context_client, traversed_domains) - LOGGER.info('slice_owner_uuid={:s}'.format(str(slice_owner_uuid))) + local_slices, remote_slices = compute_interdomain_sub_slices( + context_client, pathcomp_client, request) + + traversed_domain_uuids = set() + traversed_domain_uuids.update(local_slices.keys()) + traversed_domain_uuids.update(remote_slices.keys()) + LOGGER.debug('traversed_domain_uuids={:s}'.format(str(traversed_domain_uuids))) + slice_owner_uuid = compute_slice_owner(context_client, traversed_domain_uuids) + LOGGER.debug('slice_owner_uuid={:s}'.format(str(slice_owner_uuid))) if slice_owner_uuid is None: raise Exception('Unable to identify slice owner') reply = Slice() reply.CopyFrom(request) - missing_env_vars = find_missing_environment_variables([ + env_vars = find_environment_variables([ get_env_var_name(ServiceNameEnum.DLT, ENVVAR_SUFIX_SERVICE_HOST ), get_env_var_name(ServiceNameEnum.DLT, ENVVAR_SUFIX_SERVICE_PORT_GRPC), ]) - if len(missing_env_vars) == 0: + if len(env_vars) == 2: # DLT available dlt_connector_client = DltConnectorClient() dlt_connector_client.connect() @@ -98,41 +95,80 @@ class InterdomainServiceServicerImpl(InterdomainServiceServicer): dlt_record_sender = DltRecordSender(context_client, dlt_connector_client) - for domain_uuid, is_local_domain, endpoint_ids in traversed_domains: - if is_local_domain: + for domain_uuid, endpoint_id_groups in local_slices.items(): + domain_topology = get_topology(context_client, domain_uuid) + if domain_topology is None: raise Exception('Topology({:s}) not found'.format(str(domain_uuid))) + domain_name = domain_topology.name + for endpoint_ids in endpoint_id_groups: slice_uuid = str(uuid.uuid4()) - LOGGER.info('[loop] [local] domain_uuid={:s} is_local_domain={:s} slice_uuid={:s}'.format( - str(domain_uuid), str(is_local_domain), str(slice_uuid))) + MSG = '[loop] [local] domain_uuid={:s} slice_uuid={:s} endpoint_ids={:s}' + LOGGER.debug(MSG.format(str(domain_uuid), str(slice_uuid), str([ + grpc_message_to_json(ep_id) for ep_id in endpoint_ids + ]))) # local slices always in DEFAULT_CONTEXT_NAME #context_uuid = request.slice_id.context_id.context_uuid.uuid context_uuid = DEFAULT_CONTEXT_NAME - endpoint_ids = map_abstract_endpoints_to_real(context_client, domain_uuid, endpoint_ids) + #endpoint_ids = map_abstract_endpoints_to_real(context_client, domain_uuid, endpoint_ids) + slice_name = '{:s}:local:{:s}'.format(request.name, domain_name) sub_slice = compose_slice( - context_uuid, slice_uuid, endpoint_ids, constraints=request.slice_constraints, + context_uuid, slice_uuid, endpoint_ids, slice_name=slice_name, constraints=request.slice_constraints, config_rules=request.slice_config.config_rules) - LOGGER.info('[loop] [local] sub_slice={:s}'.format(grpc_message_to_json_string(sub_slice))) + LOGGER.debug('[loop] [local] sub_slice={:s}'.format(grpc_message_to_json_string(sub_slice))) sub_slice_id = slice_client.CreateSlice(sub_slice) - else: - slice_uuid = request.slice_id.slice_uuid.uuid - LOGGER.info('[loop] [remote] domain_uuid={:s} is_local_domain={:s} slice_uuid={:s}'.format( - str(domain_uuid), str(is_local_domain), str(slice_uuid))) + + LOGGER.debug('[loop] adding sub-slice') + reply.slice_subslice_ids.add().CopyFrom(sub_slice_id) # pylint: disable=no-member + + for domain_uuid, endpoint_id_groups in remote_slices.items(): + domain_topology = get_device(context_client, domain_uuid) + if domain_topology is None: raise Exception('Device({:s}) not found'.format(str(domain_uuid))) + domain_name = domain_topology.name + domain_endpoint_ids_to_names = { + endpoint.endpoint_id.endpoint_uuid.uuid : endpoint.name + for endpoint in domain_topology.device_endpoints + if endpoint_type_is_border(endpoint.endpoint_type) + } + for endpoint_ids in endpoint_id_groups: + slice_uuid = str(uuid.uuid4()) + MSG = '[loop] [remote] domain_uuid={:s} slice_uuid={:s} endpoint_ids={:s}' + LOGGER.debug(MSG.format(str(domain_uuid), str(slice_uuid), str([ + grpc_message_to_json(ep_id) for ep_id in endpoint_ids + ]))) # create context/topology for the remote domains where we are creating slices - create_context(context_client, domain_uuid) + create_context(context_client, domain_uuid, name=domain_name) create_topology(context_client, domain_uuid, DEFAULT_TOPOLOGY_NAME) + create_topology(context_client, domain_uuid, INTERDOMAIN_TOPOLOGY_NAME) + + slice_name = '{:s}:remote:{:s}'.format(request.name, domain_name) + # convert endpoint ids to names to enable conversion to uuids on the remote domain + endpoint_ids = [ + EndPointId(**json_endpoint_id( + json_device_id(domain_name), + domain_endpoint_ids_to_names[endpoint_id.endpoint_uuid.uuid], + topology_id=json_topology_id( + INTERDOMAIN_TOPOLOGY_NAME, + context_id=json_context_id(DEFAULT_CONTEXT_NAME) + ) + )) + for endpoint_id in endpoint_ids + ] sub_slice = compose_slice( - domain_uuid, slice_uuid, endpoint_ids, constraints=request.slice_constraints, - config_rules=request.slice_config.config_rules, owner_uuid=slice_owner_uuid) - LOGGER.info('[loop] [remote] sub_slice={:s}'.format(grpc_message_to_json_string(sub_slice))) + DEFAULT_CONTEXT_NAME, slice_uuid, endpoint_ids, slice_name=slice_name, + constraints=request.slice_constraints, config_rules=request.slice_config.config_rules, + owner_uuid=slice_owner_uuid, owner_string=domain_uuid) + LOGGER.debug('[loop] [remote] sub_slice={:s}'.format(grpc_message_to_json_string(sub_slice))) sub_slice_id = context_client.SetSlice(sub_slice) if dlt_connector_client is not None: topology_id = TopologyId(**json_topology_id(domain_uuid)) dlt_record_sender.add_slice(topology_id, sub_slice) else: - interdomain_client = self.remote_domain_clients.get_peer('remote-teraflow') + interdomain_client = self.remote_domain_clients.get_peer(domain_uuid) + if interdomain_client is None: + raise Exception('InterDomain Client not found for Domain({:s})'.format(str(domain_uuid))) sub_slice_reply = interdomain_client.LookUpSlice(sub_slice) if sub_slice_reply == sub_slice.slice_id: # pylint: disable=no-member # successful case @@ -140,20 +176,23 @@ class InterdomainServiceServicerImpl(InterdomainServiceServicer): else: # not in catalog remote_sub_slice = interdomain_client.CreateSliceAndAddToCatalog(sub_slice) + + sub_slice.slice_status.slice_status = remote_sub_slice.slice_status.slice_status + context_client.SetSlice(sub_slice) if remote_sub_slice.slice_status.slice_status != SliceStatusEnum.SLICESTATUS_ACTIVE: raise Exception('Remote Slice creation failed. Wrong Slice status returned') - LOGGER.info('[loop] adding sub-slice') - reply.slice_subslice_ids.add().CopyFrom(sub_slice_id) # pylint: disable=no-member + LOGGER.debug('[loop] adding sub-slice') + reply.slice_subslice_ids.add().CopyFrom(sub_slice_id) # pylint: disable=no-member if dlt_connector_client is not None: - LOGGER.info('Recording Remote Slice requests to DLT') + LOGGER.debug('Recording Remote Slice requests to DLT') dlt_record_sender.commit() - LOGGER.info('Activating interdomain slice') + LOGGER.debug('Activating interdomain slice') reply.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_ACTIVE # pylint: disable=no-member - LOGGER.info('Updating interdomain slice') + LOGGER.debug('Updating interdomain slice') slice_id = context_client.SetSlice(reply) return slice_id @@ -168,22 +207,133 @@ class InterdomainServiceServicerImpl(InterdomainServiceServicer): def LookUpSlice(self, request : Slice, context : grpc.ServicerContext) -> SliceId: try: context_client = ContextClient() - slice_ = context_client.GetSlice(request.slice_id) + slice_id = SliceId() + slice_id.CopyFrom(request.slice_id) + slice_id.context_id.context_uuid.uuid = DEFAULT_CONTEXT_NAME + slice_ = context_client.GetSlice(slice_id) return slice_.slice_id except grpc.RpcError: #LOGGER.exception('Unable to get slice({:s})'.format(grpc_message_to_json_string(request.slice_id))) return SliceId() @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) - def OrderSliceFromCatalog(self, request : Slice, context : grpc.ServicerContext) -> Slice: - raise NotImplementedError('OrderSliceFromCatalog') - #return Slice() + def CreateSliceAndAddToCatalog(self, request : Slice, context : grpc.ServicerContext) -> Slice: + context_client = ContextClient() + slice_client = SliceClient() + _request = Slice() + _request.CopyFrom(request) + _request.slice_id.context_id.context_uuid.uuid = DEFAULT_CONTEXT_NAME + + #admin_context = context_client.GetContext(ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))) + #admin_context_uuid = admin_context.context_id.context_uuid.uuid + #admin_context_name = admin_context.name + + #interdomain_topology = context_client.GetTopology(TopologyId(**json_topology_id( + # DEFAULT_TOPOLOGY_NAME, context_id=json_context_id(DEFAULT_CONTEXT_NAME) + #))) + #interdomain_topology_uuid = interdomain_topology.topology_id.topology_uuid.uuid + #interdomain_topology_name = interdomain_topology.name + + devices = context_client.ListDevices(Empty()) + interdomain_endpoint_map : Dict[str, Tuple[str, str, str, str]] = dict() + for device in devices.devices: + device_uuid = device.device_id.device_uuid.uuid + device_name = device.name + for endpoint in device.device_endpoints: + if not endpoint_type_is_border(endpoint.endpoint_type): continue + #endpoint_context_uuid = endpoint.endpoint_id.topology_id.context_id.context_uuid.uuid + #if endpoint_context_uuid not in {admin_context_uuid, admin_context_name}: continue + #endpoint_topology_uuid = endpoint.endpoint_id.topology_id.topology_uuid.uuid + #if endpoint_topology_uuid not in {interdomain_topology_uuid, interdomain_topology_name}: continue + endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid + endpoint_name = endpoint.name + interdomain_endpoint_map[endpoint_name] = (device_uuid, device_name, endpoint_uuid, endpoint_name) + LOGGER.debug('interdomain_endpoint_map={:s}'.format(str(interdomain_endpoint_map))) + + # Map endpoints to local real counterparts + del _request.slice_endpoint_ids[:] + for endpoint_id in request.slice_endpoint_ids: + #endpoint_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid + #if endpoint_context_uuid not in {admin_context_uuid, admin_context_name}: + # MSG = 'Unexpected ContextId in EndPointId({:s})' + # raise Exception(MSG.format(grpc_message_to_json_string(endpoint_id))) + + #endpoint_topology_uuid = endpoint_id.topology_id.topology_uuid.uuid + #if endpoint_topology_uuid not in {admin_topology_uuid, admin_topology_name}: + # MSG = 'Unexpected TopologyId in EndPointId({:s})' + # raise Exception(MSG.format(grpc_message_to_json_string(endpoint_id))) + + endpoint_uuid = endpoint_id.endpoint_uuid.uuid + real_endpoint = interdomain_endpoint_map.get(endpoint_uuid) + if real_endpoint is None: + MSG = 'Unable to map EndPointId({:s}) to real endpoint. interdomain_endpoint_map={:s}' + raise Exception(MSG.format(grpc_message_to_json_string(endpoint_id), str(interdomain_endpoint_map))) + real_device_uuid, _, real_endpoint_uuid, _ = real_endpoint + + real_endpoint_id = _request.slice_endpoint_ids.add() + real_endpoint_id.topology_id.context_id.context_uuid.uuid = DEFAULT_CONTEXT_NAME + real_endpoint_id.topology_id.topology_uuid.uuid = DEFAULT_TOPOLOGY_NAME + real_endpoint_id.device_id.device_uuid.uuid = real_device_uuid + real_endpoint_id.endpoint_uuid.uuid = real_endpoint_uuid + + slice_id = slice_client.CreateSlice(_request) + return context_client.GetSlice(slice_id) @safe_and_metered_rpc_method(METRICS_POOL, LOGGER) - def CreateSliceAndAddToCatalog(self, request : Slice, context : grpc.ServicerContext) -> Slice: + def DeleteSlice(self, request : SliceId, context : grpc.ServicerContext) -> Empty: context_client = ContextClient() + try: + _slice = context_client.GetSlice(request) + except: # pylint: disable=bare-except + context_client.close() + return Empty() + + _slice_rw = Slice() + _slice_rw.CopyFrom(_slice) + _slice_rw.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_DEINIT # pylint: disable=no-member + context_client.SetSlice(_slice_rw) + + local_device_uuids = get_local_device_uuids(context_client) + slice_owner_uuid = _slice.slice_owner.owner_uuid.uuid + not_inter_domain = not is_inter_domain(context_client, _slice.slice_endpoint_ids) + no_slice_owner = len(slice_owner_uuid) == 0 + is_local_slice_owner = slice_owner_uuid in local_device_uuids + if not_inter_domain and (no_slice_owner or is_local_slice_owner): + str_slice = grpc_message_to_json_string(_slice) + raise Exception('InterDomain can only handle inter-domain slice requests: {:s}'.format(str_slice)) + slice_client = SliceClient() - reply = slice_client.CreateSlice(request) - if reply != request.slice_id: - raise Exception('Slice creation failed. Wrong Slice Id was returned') - return context_client.GetSlice(request.slice_id) + for subslice_id in _slice_rw.slice_subslice_ids: + sub_slice = get_slice_by_id(context_client, subslice_id, rw_copy=True) + if ':remote:' in sub_slice.name: + domain_uuid = sub_slice.slice_owner.owner_string + interdomain_client = self.remote_domain_clients.get_peer(domain_uuid) + if interdomain_client is None: + raise Exception('InterDomain Client not found for Domain({:s})'.format(str(domain_uuid))) + interdomain_client.DeleteSlice(subslice_id) + + tmp_slice = Slice() + tmp_slice.slice_id.CopyFrom(_slice_rw.slice_id) # pylint: disable=no-member + slice_subslice_id = tmp_slice.slice_subslice_ids.add() # pylint: disable=no-member + slice_subslice_id.CopyFrom(subslice_id) + context_client.UnsetSlice(tmp_slice) + + if ':remote:' in sub_slice.name: + context_client.RemoveSlice(subslice_id) + else: + slice_client.DeleteSlice(subslice_id) + + service_client = ServiceClient() + for service_id in _slice_rw.slice_service_ids: + tmp_slice = Slice() + tmp_slice.slice_id.CopyFrom(_slice_rw.slice_id) # pylint: disable=no-member + slice_service_id = tmp_slice.slice_service_ids.add() # pylint: disable=no-member + slice_service_id.CopyFrom(service_id) + context_client.UnsetSlice(tmp_slice) + service_client.DeleteService(service_id) + + context_client.RemoveSlice(request) + slice_client.close() + service_client.close() + context_client.close() + return Empty() diff --git a/src/interdomain/service/RemoteDomainClients.py b/src/interdomain/service/RemoteDomainClients.py index d60450a18..adc6fe52b 100644 --- a/src/interdomain/service/RemoteDomainClients.py +++ b/src/interdomain/service/RemoteDomainClients.py @@ -27,12 +27,13 @@ from interdomain.client.InterdomainClient import InterdomainClient LOGGER = logging.getLogger(__name__) -def get_domain_data(context_client : ContextClient, event : DeviceEvent) -> Optional[Tuple[str, str, int]]: +def get_domain_data(context_client : ContextClient, event : DeviceEvent) -> Optional[Tuple[str, str, str, int]]: device_uuid = event.device_id.device_uuid.uuid device = get_device( context_client, device_uuid, include_endpoints=False, include_components=False, include_config_rules=True) if device.device_type != DeviceTypeEnum.NETWORK.value: return None + idc_domain_uuid = device_uuid idc_domain_name = device.name idc_domain_address = None idc_domain_port = None @@ -45,7 +46,7 @@ def get_domain_data(context_client : ContextClient, event : DeviceEvent) -> Opti idc_domain_port = int(config_rule.custom.resource_value) if idc_domain_address is None: return None if idc_domain_port is None: return None - return idc_domain_name, idc_domain_address, idc_domain_port + return idc_domain_uuid, idc_domain_name, idc_domain_address, idc_domain_port class RemoteDomainClients(threading.Thread): def __init__(self) -> None: @@ -67,21 +68,22 @@ class RemoteDomainClients(threading.Thread): event = self.context_event_collector.get_event(timeout=0.1) if event is None: continue if not isinstance(event, DeviceEvent): continue - LOGGER.info('Processing Event({:s})...'.format(grpc_message_to_json_string(event))) + LOGGER.info('Processing DeviceEvent({:s})...'.format(grpc_message_to_json_string(event))) domain_data = get_domain_data(self.context_client, event) if domain_data is None: continue - domain_name, domain_address, domain_port = domain_data + domain_uuid, domain_name, domain_address, domain_port = domain_data try: - self.add_peer(domain_name, domain_address, domain_port) + self.add_peer(domain_uuid, domain_name, domain_address, domain_port) except: # pylint: disable=bare-except - MSG = 'Unable to connect to remote domain {:s} ({:s}:{:d})' - LOGGER.exception(MSG.format(domain_name, domain_address, domain_port)) + MSG = 'Unable to connect to remote domain {:s} {:s} ({:s}:{:d})' + LOGGER.exception(MSG.format(domain_uuid, domain_name, domain_address, domain_port)) self.context_event_collector.stop() self.context_client.close() def add_peer( - self, domain_name : str, domain_address : str, domain_port : int, context_uuid : str = DEFAULT_CONTEXT_NAME + self, domain_uuid : str, domain_name : str, domain_address : str, domain_port : int, + context_uuid : str = DEFAULT_CONTEXT_NAME ) -> None: request = TeraFlowController() request.context_id.context_uuid.uuid = context_uuid # pylint: disable=no-member @@ -96,18 +98,22 @@ class RemoteDomainClients(threading.Thread): if not reply.authenticated: MSG = 'Authentication against {:s}:{:d} with Context({:s}) rejected' # pylint: disable=broad-exception-raised - raise Exception(MSG.format(domain_address, domain_port, domain_name)) + raise Exception(MSG.format(domain_address, domain_port, context_uuid)) with self.lock: + self.peer_domains[domain_uuid] = interdomain_client self.peer_domains[domain_name] = interdomain_client - LOGGER.info('Added peer domain {:s} ({:s}:{:d})'.format(domain_name, domain_address, domain_port)) + MSG = 'Added peer domain {:s} {:s} ({:s}:{:d})' + LOGGER.info(MSG.format(domain_uuid, domain_name, domain_address, domain_port)) - def get_peer(self, domain_name : str) -> InterdomainClient: + def get_peer(self, domain_uuid_or_name : str) -> Optional[InterdomainClient]: with self.lock: - LOGGER.warning('peers: {:s}'.format(str(self.peer_domains))) - return self.peer_domains.get(domain_name) + LOGGER.debug('domain_uuid_or_name: {:s}'.format(str(domain_uuid_or_name))) + LOGGER.debug('peers: {:s}'.format(str(self.peer_domains))) + return self.peer_domains.get(domain_uuid_or_name) - def remove_peer(self, domain_name : str) -> None: + def remove_peer(self, domain_uuid_or_name : str) -> None: with self.lock: - self.peer_domains.pop(domain_name, None) - LOGGER.info('Removed peer domain {:s}'.format(domain_name)) + LOGGER.debug('domain_uuid_or_name: {:s}'.format(str(domain_uuid_or_name))) + self.peer_domains.pop(domain_uuid_or_name, None) + LOGGER.info('Removed peer domain {:s}'.format(domain_uuid_or_name)) diff --git a/src/interdomain/service/Tools.py b/src/interdomain/service/Tools.py index 94db60ed2..1c8fd90f1 100644 --- a/src/interdomain/service/Tools.py +++ b/src/interdomain/service/Tools.py @@ -13,10 +13,11 @@ # limitations under the License. import json, logging -from typing import List, Optional, Tuple +from typing import List, Optional, Set from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME, INTERDOMAIN_TOPOLOGY_NAME +from common.DeviceTypes import DeviceTypeEnum from common.proto.context_pb2 import ( - ConfigRule, Constraint, ContextId, Device, Empty, EndPointId, Slice, SliceStatusEnum) + ConfigRule, Constraint, ContextId, Empty, EndPointId, Slice, SliceStatusEnum) from common.tools.context_queries.CheckType import device_type_is_network, endpoint_type_is_border from common.tools.context_queries.InterDomain import get_local_device_uuids from common.tools.grpc.ConfigRules import copy_config_rules @@ -28,27 +29,32 @@ from context.client.ContextClient import ContextClient LOGGER = logging.getLogger(__name__) def compute_slice_owner( - context_client : ContextClient, traversed_domains : List[Tuple[str, Device, bool, List[EndPointId]]] + context_client : ContextClient, traversed_domain_uuids : Set[str] ) -> Optional[str]: - traversed_domain_uuids = {traversed_domain[0] for traversed_domain in traversed_domains} - existing_topologies = context_client.ListTopologies(ContextId(**json_context_id(DEFAULT_CONTEXT_NAME))) - existing_topology_uuids_names = set() + domain_uuids_names = set() DISCARD_TOPOLOGY_NAMES = {DEFAULT_TOPOLOGY_NAME, INTERDOMAIN_TOPOLOGY_NAME} for topology in existing_topologies.topologies: topology_uuid = topology.topology_id.topology_uuid.uuid if topology_uuid in DISCARD_TOPOLOGY_NAMES: continue topology_name = topology.name if topology_name in DISCARD_TOPOLOGY_NAMES: continue - existing_topology_uuids_names.add(topology_uuid) - existing_topology_uuids_names.add(topology_name) + domain_uuids_names.add(topology_uuid) + domain_uuids_names.add(topology_name) + + for topology in existing_topologies.topologies: + topology_details = context_client.GetTopologyDetails(topology.topology_id) + for device in topology_details.devices: + if device.device_type != DeviceTypeEnum.NETWORK.value: continue + domain_uuids_names.discard(device.device_id.device_uuid.uuid) + domain_uuids_names.discard(device.name) - candidate_owner_uuids = traversed_domain_uuids.intersection(existing_topology_uuids_names) + candidate_owner_uuids = traversed_domain_uuids.intersection(domain_uuids_names) if len(candidate_owner_uuids) != 1: data = { - 'traversed_domain_uuids' : [td_uuid for td_uuid in traversed_domain_uuids ], - 'existing_topology_uuids_names': [et_uuid for et_uuid in existing_topology_uuids_names], - 'candidate_owner_uuids' : [co_uuid for co_uuid in candidate_owner_uuids ], + 'traversed_domain_uuids': [td_uuid for td_uuid in traversed_domain_uuids], + 'domain_uuids_names' : [et_uuid for et_uuid in domain_uuids_names ], + 'candidate_owner_uuids' : [co_uuid for co_uuid in candidate_owner_uuids ], } LOGGER.warning('Unable to identify slice owner: {:s}'.format(json.dumps(data))) return None @@ -56,17 +62,24 @@ def compute_slice_owner( return candidate_owner_uuids.pop() def compose_slice( - context_uuid : str, slice_uuid : str, endpoint_ids : List[EndPointId], constraints : List[Constraint] = [], - config_rules : List[ConfigRule] = [], owner_uuid : Optional[str] = None + context_uuid : str, slice_uuid : str, endpoint_ids : List[EndPointId], slice_name : Optional[str] = None, + constraints : List[Constraint] = [], config_rules : List[ConfigRule] = [], owner_uuid : Optional[str] = None, + owner_string : Optional[str] = None ) -> Slice: slice_ = Slice() slice_.slice_id.context_id.context_uuid.uuid = context_uuid # pylint: disable=no-member slice_.slice_id.slice_uuid.uuid = slice_uuid # pylint: disable=no-member slice_.slice_status.slice_status = SliceStatusEnum.SLICESTATUS_PLANNED # pylint: disable=no-member + if slice_name is not None: + slice_.name = slice_name + if owner_uuid is not None: slice_.slice_owner.owner_uuid.uuid = owner_uuid # pylint: disable=no-member + if owner_string is not None: + slice_.slice_owner.owner_string = owner_string # pylint: disable=no-member + if len(endpoint_ids) >= 2: slice_.slice_endpoint_ids.add().CopyFrom(endpoint_ids[0]) # pylint: disable=no-member slice_.slice_endpoint_ids.add().CopyFrom(endpoint_ids[-1]) # pylint: disable=no-member diff --git a/src/interdomain/service/topology_abstractor/AbstractDevice.py b/src/interdomain/service/topology_abstractor/AbstractDevice.py index 0de93daa8..47832acc0 100644 --- a/src/interdomain/service/topology_abstractor/AbstractDevice.py +++ b/src/interdomain/service/topology_abstractor/AbstractDevice.py @@ -24,13 +24,15 @@ from common.tools.context_queries.Device import add_device_to_topology, get_exis from common.tools.object_factory.Context import json_context_id from common.tools.object_factory.Device import json_device, json_device_id from context.client.ContextClient import ContextClient +from context.service.database.uuids.EndPoint import endpoint_get_uuid LOGGER = logging.getLogger(__name__) class AbstractDevice: - def __init__(self, device_uuid : str, device_type : DeviceTypeEnum): + def __init__(self, device_uuid : str, device_name : str, device_type : DeviceTypeEnum): self.__context_client = ContextClient() self.__device_uuid : str = device_uuid + self.__device_name : str = device_name self.__device_type : DeviceTypeEnum = device_type self.__device : Optional[Device] = None self.__device_id : Optional[DeviceId] = None @@ -41,9 +43,23 @@ class AbstractDevice: # Dict[endpoint_uuid, device_uuid] self.__abstract_endpoint_to_device : Dict[str, str] = dict() + def to_json(self) -> Dict: + return { + 'device_uuid' : self.__device_uuid, + 'device_name' : self.__device_name, + 'device_type' : self.__device_type, + 'device' : self.__device, + 'device_id' : self.__device_id, + 'device_endpoint_to_abstract' : self.__device_endpoint_to_abstract, + 'abstract_endpoint_to_device' : self.__abstract_endpoint_to_device, + } + @property def uuid(self) -> str: return self.__device_uuid + @property + def name(self) -> str: return self.__device_name + @property def device_id(self) -> Optional[DeviceId]: return self.__device_id @@ -92,7 +108,7 @@ class AbstractDevice: device = Device(**json_device( device_uuid, self.__device_type.value, DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_ENABLED, - endpoints=[], config_rules=[], drivers=[DeviceDriverEnum.DEVICEDRIVER_UNDEFINED] + name=self.__device_name, endpoints=[], config_rules=[], drivers=[DeviceDriverEnum.DEVICEDRIVER_UNDEFINED] )) self.__context_client.SetDevice(device) self.__device = device @@ -126,6 +142,14 @@ class AbstractDevice: self.__abstract_endpoint_to_device\ .setdefault(endpoint_uuid, device_uuid) + def _update_endpoint_name(self, device_uuid : str, endpoint_uuid : str, endpoint_name : str) -> bool: + device_endpoint_to_abstract = self.__device_endpoint_to_abstract.get(device_uuid, {}) + interdomain_endpoint = device_endpoint_to_abstract.get(endpoint_uuid) + interdomain_endpoint_name = interdomain_endpoint.name + if endpoint_name == interdomain_endpoint_name: return False + interdomain_endpoint.name = endpoint_name + return True + def _update_endpoint_type(self, device_uuid : str, endpoint_uuid : str, endpoint_type : str) -> bool: device_endpoint_to_abstract = self.__device_endpoint_to_abstract.get(device_uuid, {}) interdomain_endpoint = device_endpoint_to_abstract.get(endpoint_uuid) @@ -134,16 +158,24 @@ class AbstractDevice: interdomain_endpoint.endpoint_type = endpoint_type return True - def _add_endpoint(self, device_uuid : str, endpoint_uuid : str, endpoint_type : str) -> EndPoint: + def _add_endpoint( + self, device_uuid : str, endpoint_uuid : str, endpoint_name : str, endpoint_type : str + ) -> EndPoint: interdomain_endpoint = self.__device.device_endpoints.add() + interdomain_endpoint.endpoint_id.topology_id.topology_uuid.uuid = INTERDOMAIN_TOPOLOGY_NAME + interdomain_endpoint.endpoint_id.topology_id.context_id.context_uuid.uuid = DEFAULT_CONTEXT_NAME interdomain_endpoint.endpoint_id.device_id.CopyFrom(self.__device_id) - interdomain_endpoint.endpoint_id.endpoint_uuid.uuid = endpoint_uuid + interdomain_endpoint.endpoint_id.endpoint_uuid.uuid = endpoint_name + interdomain_endpoint.name = endpoint_name interdomain_endpoint.endpoint_type = endpoint_type + uuids = endpoint_get_uuid(interdomain_endpoint.endpoint_id, endpoint_name=endpoint_name, allow_random=False) + _, _, interdomain_endpoint_uuid = uuids + self.__device_endpoint_to_abstract\ .setdefault(device_uuid, {}).setdefault(endpoint_uuid, interdomain_endpoint) self.__abstract_endpoint_to_device\ - .setdefault(endpoint_uuid, device_uuid) + .setdefault(interdomain_endpoint_uuid, device_uuid) return interdomain_endpoint @@ -160,7 +192,7 @@ class AbstractDevice: device_uuid = device.device_id.device_uuid.uuid device_border_endpoint_uuids = { - endpoint.endpoint_id.endpoint_uuid.uuid : endpoint.endpoint_type + endpoint.endpoint_id.endpoint_uuid.uuid : (endpoint.name, endpoint.endpoint_type) for endpoint in device.device_endpoints if endpoint_type_is_border(endpoint.endpoint_type) } @@ -177,14 +209,15 @@ class AbstractDevice: updated = True # for each border endpoint in device that is not in abstract device; add to abstract device - for endpoint_uuid,endpoint_type in device_border_endpoint_uuids.items(): - # if already added; just check endpoint type is not modified + for endpoint_uuid,(endpoint_name, endpoint_type) in device_border_endpoint_uuids.items(): + # if already added; just check endpoint name and type are not modified if endpoint_uuid in self.__abstract_endpoint_to_device: + updated = updated or self._update_endpoint_name(device_uuid, endpoint_uuid, endpoint_name) updated = updated or self._update_endpoint_type(device_uuid, endpoint_uuid, endpoint_type) continue # otherwise, add it to the abstract device - self._add_endpoint(device_uuid, endpoint_uuid, endpoint_type) + self._add_endpoint(device_uuid, endpoint_uuid, endpoint_name, endpoint_type) updated = True return updated diff --git a/src/interdomain/service/topology_abstractor/AbstractLink.py b/src/interdomain/service/topology_abstractor/AbstractLink.py index bdab62476..76b2a0311 100644 --- a/src/interdomain/service/topology_abstractor/AbstractLink.py +++ b/src/interdomain/service/topology_abstractor/AbstractLink.py @@ -33,6 +33,14 @@ class AbstractLink: # Dict[(device_uuid, endpoint_uuid), abstract EndPointId] self.__device_endpoint_to_abstract : Dict[Tuple[str, str], EndPointId] = dict() + def to_json(self) -> Dict: + return { + 'link_uuid' : self.__link_uuid, + 'link' : self.__link, + 'link_id' : self.__link_id, + 'device_endpoint_to_abstract' : self.__device_endpoint_to_abstract, + } + @property def uuid(self) -> str: return self.__link_uuid @@ -95,6 +103,8 @@ class AbstractLink: def _add_endpoint(self, device_uuid : str, endpoint_uuid : str) -> None: endpoint_id = self.__link.link_endpoint_ids.add() + endpoint_id.topology_id.topology_uuid.uuid = INTERDOMAIN_TOPOLOGY_NAME + endpoint_id.topology_id.context_id.context_uuid.uuid = DEFAULT_CONTEXT_NAME endpoint_id.device_id.device_uuid.uuid = device_uuid endpoint_id.endpoint_uuid.uuid = endpoint_uuid self.__device_endpoint_to_abstract.setdefault((device_uuid, endpoint_uuid), endpoint_id) diff --git a/src/interdomain/service/topology_abstractor/TopologyAbstractor.py b/src/interdomain/service/topology_abstractor/TopologyAbstractor.py index 40b40ac66..0d9faa040 100644 --- a/src/interdomain/service/topology_abstractor/TopologyAbstractor.py +++ b/src/interdomain/service/topology_abstractor/TopologyAbstractor.py @@ -16,15 +16,16 @@ import logging, threading from typing import Dict, Optional, Tuple from common.Constants import DEFAULT_CONTEXT_NAME, DEFAULT_TOPOLOGY_NAME, INTERDOMAIN_TOPOLOGY_NAME, ServiceNameEnum from common.DeviceTypes import DeviceTypeEnum -from common.Settings import ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, find_missing_environment_variables, get_env_var_name +from common.Settings import ( + ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, find_environment_variables, get_env_var_name) from common.proto.context_pb2 import ( ContextEvent, ContextId, Device, DeviceEvent, DeviceId, EndPoint, EndPointId, Link, LinkEvent, TopologyId, TopologyEvent) from common.tools.context_queries.CheckType import ( device_type_is_datacenter, device_type_is_network, endpoint_type_is_border) from common.tools.context_queries.Context import create_context -from common.tools.context_queries.Device import get_devices_in_topology, get_uuids_of_devices_in_topology -from common.tools.context_queries.Link import get_links_in_topology +from common.tools.context_queries.Device import get_uuids_of_devices_in_topology #, get_devices_in_topology +#from common.tools.context_queries.Link import get_links_in_topology from common.tools.context_queries.Topology import create_missing_topologies from common.tools.grpc.Tools import grpc_message_to_json_string from common.tools.object_factory.Context import json_context_id @@ -99,13 +100,13 @@ class TopologyAbstractor(threading.Thread): # return False def _get_or_create_abstract_device( - self, device_uuid : str, device_type : DeviceTypeEnum, dlt_record_sender : DltRecordSender, + self, device_uuid : str, device_name : str, device_type : DeviceTypeEnum, dlt_record_sender : DltRecordSender, abstract_topology_id : TopologyId ) -> AbstractDevice: abstract_device = self.abstract_devices.get(device_uuid) changed = False if abstract_device is None: - abstract_device = AbstractDevice(device_uuid, device_type) + abstract_device = AbstractDevice(device_uuid, device_name, device_type) changed = abstract_device.initialize() if changed: dlt_record_sender.add_device(abstract_topology_id, abstract_device.device) self.abstract_devices[device_uuid] = abstract_device @@ -117,16 +118,17 @@ class TopologyAbstractor(threading.Thread): abstract_device_uuid : Optional[str] = None ) -> None: device_uuid = device.device_id.device_uuid.uuid + device_name = device.name if device_type_is_datacenter(device.device_type): abstract_device_uuid = device_uuid abstract_device = self._get_or_create_abstract_device( - device_uuid, DeviceTypeEnum.EMULATED_DATACENTER, dlt_record_sender, abstract_topology_id) + device_uuid, device_name, DeviceTypeEnum.EMULATED_DATACENTER, dlt_record_sender, abstract_topology_id) elif device_type_is_network(device.device_type): LOGGER.warning('device_type is network; not implemented') return else: abstract_device = self._get_or_create_abstract_device( - abstract_device_uuid, DeviceTypeEnum.NETWORK, dlt_record_sender, abstract_topology_id) + abstract_device_uuid, None, DeviceTypeEnum.NETWORK, dlt_record_sender, abstract_topology_id) self.real_to_abstract_device_uuid[device_uuid] = abstract_device_uuid changed = abstract_device.update_endpoints(device) if changed: dlt_record_sender.add_device(abstract_topology_id, abstract_device.device) @@ -224,11 +226,11 @@ class TopologyAbstractor(threading.Thread): if changed: dlt_record_sender.add_link(INTERDOMAIN_TOPOLOGY_ID, abstract_link.link) def update_abstraction(self, event : EventTypes) -> None: - missing_env_vars = find_missing_environment_variables([ + env_vars = find_environment_variables([ get_env_var_name(ServiceNameEnum.DLT, ENVVAR_SUFIX_SERVICE_HOST ), get_env_var_name(ServiceNameEnum.DLT, ENVVAR_SUFIX_SERVICE_PORT_GRPC), ]) - if len(missing_env_vars) == 0: + if len(env_vars) == 2: # DLT available dlt_connector_client = DltConnectorClient() dlt_connector_client.connect() @@ -238,41 +240,55 @@ class TopologyAbstractor(threading.Thread): dlt_record_sender = DltRecordSender(self.context_client, dlt_connector_client) if isinstance(event, ContextEvent): - LOGGER.warning('Ignoring Event({:s})'.format(grpc_message_to_json_string(event))) + LOGGER.debug('Processing ContextEvent({:s})'.format(grpc_message_to_json_string(event))) + LOGGER.warning('Ignoring ContextEvent({:s})'.format(grpc_message_to_json_string(event))) elif isinstance(event, TopologyEvent): + LOGGER.debug('Processing TopologyEvent({:s})'.format(grpc_message_to_json_string(event))) topology_id = event.topology_id topology_uuid = topology_id.topology_uuid.uuid context_id = topology_id.context_id context_uuid = context_id.context_uuid.uuid topology_uuids = {DEFAULT_TOPOLOGY_NAME, INTERDOMAIN_TOPOLOGY_NAME} - if (context_uuid == DEFAULT_CONTEXT_NAME) and (topology_uuid not in topology_uuids): + + context = self.context_client.GetContext(context_id) + context_name = context.name + + topology_details = self.context_client.GetTopologyDetails(topology_id) + topology_name = topology_details.name + + if ((context_uuid == DEFAULT_CONTEXT_NAME) or (context_name == DEFAULT_CONTEXT_NAME)) and \ + (topology_uuid not in topology_uuids) and (topology_name not in topology_uuids): + abstract_topology_id = TopologyId(**json_topology_id(topology_uuid, context_id=ADMIN_CONTEXT_ID)) self._get_or_create_abstract_device( - topology_uuid, DeviceTypeEnum.NETWORK, dlt_record_sender, abstract_topology_id) + topology_uuid, topology_name, DeviceTypeEnum.NETWORK, dlt_record_sender, abstract_topology_id) - devices = get_devices_in_topology(self.context_client, context_id, topology_uuid) - for device in devices: + #devices = get_devices_in_topology(self.context_client, context_id, topology_uuid) + for device in topology_details.devices: self._update_abstract_device( device, dlt_record_sender, abstract_topology_id, abstract_device_uuid=topology_uuid) - links = get_links_in_topology(self.context_client, context_id, topology_uuid) - for link in links: + #links = get_links_in_topology(self.context_client, context_id, topology_uuid) + for link in topology_details.links: self._update_abstract_link(link, dlt_record_sender, abstract_topology_id) - for device in devices: + for device in topology_details.devices: self._infer_abstract_links(device, dlt_record_sender) else: - LOGGER.warning('Ignoring Event({:s})'.format(grpc_message_to_json_string(event))) - + MSG = 'Ignoring ({:s}/{:s})({:s}/{:s}) TopologyEvent({:s})' + args = context_uuid, context_name, topology_uuid, topology_name, grpc_message_to_json_string(event) + LOGGER.warning(MSG.format(*args)) + elif isinstance(event, DeviceEvent): + LOGGER.debug('Processing DeviceEvent({:s})'.format(grpc_message_to_json_string(event))) device_id = event.device_id device_uuid = device_id.device_uuid.uuid abstract_device_uuid = self.real_to_abstract_device_uuid.get(device_uuid) device = self.context_client.GetDevice(device_id) if abstract_device_uuid is None: - LOGGER.warning('Ignoring Event({:s})'.format(grpc_message_to_json_string(event))) + LOGGER.warning('Ignoring DeviceEvent({:s})'.format(grpc_message_to_json_string(event))) else: abstract_topology_id = self.abstract_device_to_topology_id[abstract_device_uuid] self._update_abstract_device( @@ -281,11 +297,12 @@ class TopologyAbstractor(threading.Thread): self._infer_abstract_links(device, dlt_record_sender) elif isinstance(event, LinkEvent): + LOGGER.debug('Processing LinkEvent({:s})'.format(grpc_message_to_json_string(event))) link_id = event.link_id link_uuid = link_id.link_uuid.uuid abstract_link_uuid = self.real_to_abstract_link_uuid.get(link_uuid) if abstract_link_uuid is None: - LOGGER.warning('Ignoring Event({:s})'.format(grpc_message_to_json_string(event))) + LOGGER.warning('Ignoring LinkEvent({:s})'.format(grpc_message_to_json_string(event))) else: abstract_topology_id = self.abstract_link_to_topology_id[abstract_link_uuid] link = self.context_client.GetLink(link_id) diff --git a/src/interdomain/tests/test_unitary.py b/src/interdomain/tests/old_tests.py similarity index 99% rename from src/interdomain/tests/test_unitary.py rename to src/interdomain/tests/old_tests.py index 403dea543..3543c9541 100644 --- a/src/interdomain/tests/test_unitary.py +++ b/src/interdomain/tests/old_tests.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. - #import logging, grpc #import os #import sqlite3 diff --git a/src/interdomain/tests/test_compute_domains.py b/src/interdomain/tests/test_compute_domains.py new file mode 100644 index 000000000..3332731dd --- /dev/null +++ b/src/interdomain/tests/test_compute_domains.py @@ -0,0 +1,119 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, pytest +from typing import Dict, List, Tuple +from common.DeviceTypes import DeviceTypeEnum +from common.proto.context_pb2 import EndPointId +from common.proto.pathcomp_pb2 import PathCompRequest +from common.tools.context_queries.Device import get_device +from common.tools.context_queries.InterDomain import get_device_to_domain_map, get_local_device_uuids +from common.tools.grpc.Tools import grpc_message_list_to_json, grpc_message_list_to_json_string, grpc_message_to_json_string +from context.client.ContextClient import ContextClient +from pathcomp.frontend.client.PathCompClient import PathCompClient + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +@pytest.fixture(scope='session') +def context_client(): + _client = ContextClient() + yield _client + _client.close() + +@pytest.fixture(scope='session') +def pathcomp_client(): + _client = PathCompClient() + yield _client + _client.close() + +def test_interdomain_topology_abstractor( + context_client : ContextClient, # pylint: disable=redefined-outer-name + pathcomp_client : PathCompClient, # pylint: disable=redefined-outer-name +) -> None: + + pathcomp_req = PathCompRequest(**{ + "services": [ + {"name": "", "service_constraints": [{"sla_capacity": {"capacity_gbps": 10.0}}, {"sla_latency": {"e2e_latency_ms": 100.0}}], "service_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "cda90d2f-e7b0-5837-8f2e-2fb29dd9b367"}}, "endpoint_uuid": {"uuid": "37ab67ef-0064-54e3-ae9b-d40100953834"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "43813baf-195e-5da6-af20-b3d0922e71a7"}}, "topology_uuid": {"uuid": "c76135e3-24a8-5e92-9bed-c3c9139359c8"}}}, + {"device_id": {"device_uuid": {"uuid": "800d5bd4-a7a3-5a66-82ab-d399767ca3d8"}}, "endpoint_uuid": {"uuid": "97f57787-cfec-5315-9718-7e850905f11a"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "43813baf-195e-5da6-af20-b3d0922e71a7"}}, "topology_uuid": {"uuid": "c76135e3-24a8-5e92-9bed-c3c9139359c8"}}} + ], "service_id": {"context_id": {"context_uuid": {"uuid": "43813baf-195e-5da6-af20-b3d0922e71a7"}}, "service_uuid": {"uuid": "77277b43-f9cd-5e01-a3e7-6c5fa4577137"}}, "service_type": "SERVICETYPE_L2NM"} + ], + "shortest_path": {} + }) + pathcomp_req_svc = pathcomp_req.services[0] + + pathcomp_rep = pathcomp_client.Compute(pathcomp_req) + LOGGER.warning('pathcomp_rep = {:s}'.format(grpc_message_to_json_string(pathcomp_rep))) + + num_services = len(pathcomp_rep.services) + if num_services == 0: + raise Exception('No services received : {:s}'.format(grpc_message_to_json_string(pathcomp_rep))) + + num_connections = len(pathcomp_rep.connections) + if num_connections == 0: + raise Exception('No connections received : {:s}'.format(grpc_message_to_json_string(pathcomp_rep))) + + local_device_uuids = get_local_device_uuids(context_client) + LOGGER.warning('local_device_uuids={:s}'.format(str(local_device_uuids))) + + device_to_domain_map = get_device_to_domain_map(context_client) + LOGGER.warning('device_to_domain_map={:s}'.format(str(device_to_domain_map))) + + local_slices : List[List[EndPointId]] = list() + remote_slices : List[List[EndPointId]] = list() + req_service_uuid = pathcomp_req_svc.service_id.service_uuid.uuid + for service in pathcomp_rep.services: + service_uuid = service.service_id.service_uuid.uuid + if service_uuid == req_service_uuid: continue # main synthetic service; we don't care + device_uuids = { + endpoint_id.device_id.device_uuid.uuid + for endpoint_id in service.service_endpoint_ids + } + local_domain_uuids = set() + remote_domain_uuids = set() + for device_uuid in device_uuids: + if device_uuid in local_device_uuids: + domain_uuid = device_to_domain_map.get(device_uuid) + if domain_uuid is None: + raise Exception('Unable to map device({:s}) to a domain'.format(str(device_uuid))) + local_domain_uuids.add(domain_uuid) + else: + device = get_device( + context_client, device_uuid, include_endpoints=True, include_config_rules=False, + include_components=False) + if device is None: raise Exception('Device({:s}) not found'.format(str(device_uuid))) + device_type = DeviceTypeEnum._value2member_map_.get(device.device_type) + is_remote = device_type == DeviceTypeEnum.NETWORK + if not is_remote: + MSG = 'Weird device({:s}) is not local and not network' + raise Exception(MSG.format(grpc_message_to_json_string(device))) + remote_domain_uuids.add(device_uuid) + is_local = len(local_domain_uuids) > 0 + is_remote = len(remote_domain_uuids) > 0 + if is_local == is_remote: + MSG = 'Weird service combines local and remote devices: {:s}' + raise Exception(MSG.format(grpc_message_to_json_string(service))) + elif is_local: + local_slices.append(service.service_endpoint_ids) + else: + remote_slices.append(service.service_endpoint_ids) + + str_local_slices = [grpc_message_list_to_json(endpoint_ids) for endpoint_ids in local_slices] + LOGGER.warning('local_slices={:s}'.format(str(str_local_slices))) + + str_remote_slices = [grpc_message_list_to_json(endpoint_ids) for endpoint_ids in remote_slices] + LOGGER.warning('remote_slices={:s}'.format(str(str_remote_slices))) + + raise Exception() diff --git a/src/interdomain/tests/test_topology_abstractor.py b/src/interdomain/tests/test_topology_abstractor.py new file mode 100644 index 000000000..e6243a236 --- /dev/null +++ b/src/interdomain/tests/test_topology_abstractor.py @@ -0,0 +1,105 @@ +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging, pytest, time +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import ContextId, Empty +from common.proto.context_pb2 import ConfigActionEnum, Device, DeviceId +from common.tools.descriptor.Loader import DescriptorLoader, check_descriptor_load_results, validate_empty_scenario +from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.object_factory.Context import json_context_id +from context.client.ContextClient import ContextClient +from device.client.DeviceClient import DeviceClient +from interdomain.service.topology_abstractor.TopologyAbstractor import TopologyAbstractor + +LOGGER = logging.getLogger(__name__) +LOGGER.setLevel(logging.DEBUG) + +@pytest.fixture(scope='session') +def context_client(): + _client = ContextClient() + yield _client + _client.close() + +@pytest.fixture(scope='session') +def device_client(): + _client = DeviceClient() + yield _client + _client.close() + +@pytest.fixture(scope='session') +def topology_abstractor(): + _topology_abstractor = TopologyAbstractor() + _topology_abstractor.start() + yield _topology_abstractor + _topology_abstractor.stop() + _topology_abstractor.join() + +def test_pre_cleanup_scenario( + context_client : ContextClient, # pylint: disable=redefined-outer-name + device_client : DeviceClient, # pylint: disable=redefined-outer-name +) -> None: + for link_id in context_client.ListLinkIds(Empty()).link_ids: context_client.RemoveLink(link_id) + for device_id in context_client.ListDeviceIds(Empty()).device_ids: device_client.DeleteDevice(device_id) + + contexts = context_client.ListContexts(Empty()) + for context in contexts.contexts: + assert len(context.slice_ids) == 0, 'Found Slices: {:s}'.format(grpc_message_to_json_string(context)) + assert len(context.service_ids) == 0, 'Found Services: {:s}'.format(grpc_message_to_json_string(context)) + for topology_id in context.topology_ids: context_client.RemoveTopology(topology_id) + context_client.RemoveContext(context.context_id) + +DESCRIPTOR_FILE = 'oeccpsc22/descriptors/domain1.json' +#DESCRIPTOR_FILE = 'oeccpsc22/descriptors/domain2.json' + +def test_interdomain_topology_abstractor( + context_client : ContextClient, # pylint: disable=redefined-outer-name + device_client : DeviceClient, # pylint: disable=redefined-outer-name + topology_abstractor : TopologyAbstractor, # pylint: disable=redefined-outer-name +) -> None: + #validate_empty_scenario(context_client) + + time.sleep(3) + + descriptor_loader = DescriptorLoader( + descriptors_file=DESCRIPTOR_FILE, context_client=context_client, device_client=device_client) + results = descriptor_loader.process() + check_descriptor_load_results(results, descriptor_loader) + #descriptor_loader.validate() + + time.sleep(3) + + LOGGER.warning('real_to_abstract_device_uuid={:s}'.format(str(topology_abstractor.real_to_abstract_device_uuid))) + LOGGER.warning('real_to_abstract_link_uuid={:s}'.format(str(topology_abstractor.real_to_abstract_link_uuid))) + + LOGGER.warning('abstract_device_to_topology_id={:s}'.format(str(topology_abstractor.abstract_device_to_topology_id))) + LOGGER.warning('abstract_link_to_topology_id={:s}'.format(str(topology_abstractor.abstract_link_to_topology_id))) + + LOGGER.warning('abstract_devices={:s}'.format(str({ + k:v.to_json() + for k,v in topology_abstractor.abstract_devices.items() + }))) + LOGGER.warning('abstract_links={:s}'.format(str({ + k:v.to_json() + for k,v in topology_abstractor.abstract_links.items() + }))) + + raise Exception() + + +#def test_post_cleanup_scenario( +# context_client : ContextClient, # pylint: disable=redefined-outer-name +# device_client : DeviceClient, # pylint: disable=redefined-outer-name +#) -> None: +# test_pre_cleanup_scenario(context_client, device_client) -- GitLab From 6f4ff684605f6a67e0847ed37e6630500776e564 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 29 Jun 2023 14:42:53 +0000 Subject: [PATCH 22/23] Scripts: - Added scripts to manually launch interdomain unitary tests --- ...tests_locally-interdomain-compute-domains.sh | 17 +++++++++++++++++ ...s_locally-interdomain-topology-abstractor.sh | 17 +++++++++++++++++ 2 files changed, 34 insertions(+) create mode 100755 scripts/run_tests_locally-interdomain-compute-domains.sh create mode 100755 scripts/run_tests_locally-interdomain-topology-abstractor.sh diff --git a/scripts/run_tests_locally-interdomain-compute-domains.sh b/scripts/run_tests_locally-interdomain-compute-domains.sh new file mode 100755 index 000000000..09fa4ce7e --- /dev/null +++ b/scripts/run_tests_locally-interdomain-compute-domains.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source tfs_runtime_env_vars_dom1.sh +PYTHONPATH=./src pytest --log-level=INFO --verbose src/interdomain/tests/test_compute_domains.py diff --git a/scripts/run_tests_locally-interdomain-topology-abstractor.sh b/scripts/run_tests_locally-interdomain-topology-abstractor.sh new file mode 100755 index 000000000..1e1dc1767 --- /dev/null +++ b/scripts/run_tests_locally-interdomain-topology-abstractor.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Copyright 2022-2023 ETSI TeraFlowSDN - TFS OSG (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +source tfs_runtime_env_vars_dom2.sh +PYTHONPATH=./src pytest --log-level=INFO --verbose src/interdomain/tests/test_topology_abstractor.py -- GitLab From 2469bd2aafea0010cd04b0835e9c6f28c350a910 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 29 Jun 2023 15:50:46 +0000 Subject: [PATCH 23/23] OECC/PSC'22 - Tests: - Added script to expose interdomain grpc port on the node --- src/tests/oeccpsc22/expose_interdomain_dom2.sh | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100755 src/tests/oeccpsc22/expose_interdomain_dom2.sh diff --git a/src/tests/oeccpsc22/expose_interdomain_dom2.sh b/src/tests/oeccpsc22/expose_interdomain_dom2.sh new file mode 100755 index 000000000..7bdddae0b --- /dev/null +++ b/src/tests/oeccpsc22/expose_interdomain_dom2.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +PATCH='{"data": {"10010": "tfs-dom2/interdomainservice:10010"}}' +kubectl patch configmap nginx-ingress-tcp-microk8s-conf-dom2 --namespace ingress --patch "${PATCH}" + +CONTAINER='{"name": "nginx-ingress-microk8s", "ports": [{"containerPort": 10010, "hostPort": 10010, "protocol": "TCP"}]}' +PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}' +kubectl patch daemonset nginx-ingress-microk8s-controller-dom2 --namespace ingress --patch "${PATCH}" -- GitLab