Skip to content
Snippets Groups Projects
Commit 1a85a101 authored by Lluis Gifre Renom's avatar Lluis Gifre Renom
Browse files

PathComp component:

Common:
- temporarily added backend to pathcomp service for debug purposes

Proto:
- Added KDisjointPath Algorithm to pathcomp proto

Frontend:
- refactored Servicer to cope with multiple algorithms
- added implementation of ShortestPath algorithm
- added implementation of KShortestPath algorithm
- added implementation of KDisjointPath algorithm (partial)
- moved PathComp servicer tools to algorithms subfolder
- added new unitary test scenario (DC's with CellSiteGWs and Transport Network) to validate KDisjointPath algorithm
parent 9af0d9f3
No related branches found
No related tags found
1 merge request!54Release 2.0.0
Showing
with 667 additions and 142 deletions
...@@ -93,3 +93,7 @@ spec: ...@@ -93,3 +93,7 @@ spec:
protocol: TCP protocol: TCP
port: 10020 port: 10020
targetPort: 10020 targetPort: 10020
- name: http
protocol: TCP
port: 8081
targetPort: 8081
...@@ -28,11 +28,16 @@ message Algorithm_KShortestPath { ...@@ -28,11 +28,16 @@ message Algorithm_KShortestPath {
uint32 k_return = 2; uint32 k_return = 2;
} }
message Algorithm_KDisjointPath {
uint32 num_disjoint = 1;
}
message PathCompRequest { message PathCompRequest {
repeated context.Service services = 1; repeated context.Service services = 1;
oneof algorithm { oneof algorithm {
Algorithm_ShortestPath shortest_path = 10; Algorithm_ShortestPath shortest_path = 10;
Algorithm_KShortestPath k_shortest_path = 11; Algorithm_KShortestPath k_shortest_path = 11;
Algorithm_KDisjointPath k_disjoint_path = 12;
} }
} }
......
...@@ -12,17 +12,14 @@ ...@@ -12,17 +12,14 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import grpc, json, logging, requests, uuid import grpc, logging
from typing import Dict, Tuple from common.proto.context_pb2 import Empty
from common.proto.context_pb2 import Empty, EndPointId, Service
from common.proto.pathcomp_pb2 import PathCompReply, PathCompRequest from common.proto.pathcomp_pb2 import PathCompReply, PathCompRequest
from common.proto.pathcomp_pb2_grpc import PathCompServiceServicer from common.proto.pathcomp_pb2_grpc import PathCompServiceServicer
from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method
from common.tools.grpc.Tools import grpc_message_to_json_string from common.tools.grpc.Tools import grpc_message_to_json_string
from context.client.ContextClient import ContextClient from context.client.ContextClient import ContextClient
from pathcomp.frontend.Config import BACKEND_URL from pathcomp.frontend.service.algorithms.Factory import get_algorithm
from pathcomp.frontend.service.tools.ComposeRequest import compose_device, compose_link, compose_service
#from pathcomp.frontend.service.tools.Constants import CapacityUnit
LOGGER = logging.getLogger(__name__) LOGGER = logging.getLogger(__name__)
...@@ -39,123 +36,26 @@ class PathCompServiceServicerImpl(PathCompServiceServicer): ...@@ -39,123 +36,26 @@ class PathCompServiceServicerImpl(PathCompServiceServicer):
def Compute(self, request : PathCompRequest, context : grpc.ServicerContext) -> PathCompReply: def Compute(self, request : PathCompRequest, context : grpc.ServicerContext) -> PathCompReply:
LOGGER.info('[Compute] begin ; request = {:s}'.format(grpc_message_to_json_string(request))) LOGGER.info('[Compute] begin ; request = {:s}'.format(grpc_message_to_json_string(request)))
algorithm = request.WhichOneof('algorithm')
if algorithm == 'shortest_path':
# no attributes
pass
elif algorithm == 'k_shortest_path':
k_inspection = request.k_shortest_path.k_inspection
k_return = request.k_shortest_path.k_return
else:
raise NotImplementedError('Unsupported Algorithm: {:s}'.format(str(algorithm)))
context_client = ContextClient() context_client = ContextClient()
algorithm = {'id': 'KSP', 'sync': False, 'k_paths': k_return} # TODO: add filtering of devices and links
service_list = [ # TODO: add contexts, topologies, and membership of devices/links in topologies
compose_service(grpc_service, algorithm) algorithm = get_algorithm(request)
for grpc_service in request.services algorithm.add_devices(context_client.ListDevices(Empty()))
] algorithm.add_links(context_client.ListLinks(Empty()))
get_service_key = lambda service_id: (service_id['contextId'], service_id['service_uuid']) algorithm.add_service_requests(request)
service_dict : Dict[Tuple[str, str], Tuple[Dict, Service]] = {
get_service_key(json_service['serviceId']): (json_service, grpc_service) #LOGGER.debug('device_list = {:s}' .format(str(algorithm.device_list )))
for json_service,grpc_service in zip(service_list, request.services) #LOGGER.debug('endpoint_dict = {:s}'.format(str(algorithm.endpoint_dict)))
} #LOGGER.debug('link_list = {:s}' .format(str(algorithm.link_list )))
#LOGGER.info('service_dict = {:s}'.format(str(service_dict))) #LOGGER.debug('service_list = {:s}' .format(str(algorithm.service_list )))
#LOGGER.debug('service_dict = {:s}' .format(str(algorithm.service_dict )))
# TODO: consider filtering resources
#import time
#grpc_contexts = context_client.ListContexts(Empty()) #ts = time.time()
#for grpc_context in grpc_contexts.contexts: #algorithm.execute('request-{:f}.json'.format(ts), 'reply-{:f}.json'.format(ts))
# # TODO: add context to request algorithm.execute()
# grpc_topologies = context_client.ListTopologies(grpc_context.context_id)
# for grpc_topology in grpc_topologies.topologies: #pylint: disable=unused-variable reply = algorithm.get_reply()
# # TODO: add topology to request
# pass
grpc_devices = context_client.ListDevices(Empty())
device_list = [
compose_device(grpc_device)
for grpc_device in grpc_devices.devices
]
endpoint_dict : Dict[str, Dict[str, Tuple[Dict, EndPointId]]] = {
json_device['device_Id']: {
json_endpoint['endpoint_id']['endpoint_uuid']: (json_endpoint['endpoint_id'], grpc_endpoint.endpoint_id)
for json_endpoint,grpc_endpoint in zip(json_device['device_endpoints'], grpc_device.device_endpoints)
}
for json_device,grpc_device in zip(device_list, grpc_devices.devices)
}
#LOGGER.info('endpoint_dict = {:s}'.format(str(endpoint_dict)))
grpc_links = context_client.ListLinks(Empty())
link_list = [
compose_link(grpc_link)
for grpc_link in grpc_links.links
]
request = {
'serviceList': service_list,
'deviceList' : device_list,
'linkList' : link_list,
}
#with open('pc-req.json', 'w', encoding='UTF-8') as f:
# f.write(json.dumps(request, sort_keys=True, indent=4))
reply = requests.post(BACKEND_URL, json=request)
if reply.status_code not in {requests.codes.ok}:
raise Exception('Backend error({:s}) for request({:s})'.format(
str(reply.content.decode('UTF-8')), json.dumps(request, sort_keys=True)))
LOGGER.info('status_code={:s} reply={:s}'.format(
str(reply.status_code), str(reply.content.decode('UTF-8'))))
json_reply = reply.json()
response_list = json_reply.get('response-list', [])
reply = PathCompReply()
for response in response_list:
service_key = get_service_key(response['serviceId'])
tuple_service = service_dict.get(service_key)
if tuple_service is None: raise Exception('ServiceKey({:s}) not found'.format(str(service_key)))
json_service, grpc_service = tuple_service
# TODO: implement support for multi-point services
service_endpoint_ids = grpc_service.service_endpoint_ids
if len(service_endpoint_ids) != 2: raise NotImplementedError('Service must have 2 endpoints')
service = reply.services.add()
service.CopyFrom(grpc_service)
connection = reply.connections.add()
connection.connection_id.connection_uuid.uuid = str(uuid.uuid4())
connection.service_id.CopyFrom(service.service_id)
no_path_issue = response.get('noPath', {}).get('issue')
if no_path_issue is not None:
# no path found: leave connection with no endpoints
# no_path_issue == 1 => no path due to a constraint
continue
service_paths = response['path']
for service_path in service_paths:
# ... "path-capacity": {"total-size": {"value": 200, "unit": 0}},
# ... "path-latency": {"fixed-latency-characteristic": "10.000000"},
# ... "path-cost": {"cost-name": "", "cost-value": "5.000000", "cost-algorithm": "0.000000"},
#path_capacity = service_path['path-capacity']['total-size']
#path_capacity_value = path_capacity['value']
#path_capacity_unit = CapacityUnit(path_capacity['unit'])
#path_latency = service_path['path-latency']['fixed-latency-characteristic']
#path_cost = service_path['path-cost']
#path_cost_name = path_cost['cost-name']
#path_cost_value = path_cost['cost-value']
#path_cost_algorithm = path_cost['cost-algorithm']
path_endpoints = service_path['devices']
for endpoint in path_endpoints:
device_uuid = endpoint['device_id']
endpoint_uuid = endpoint['endpoint_uuid']
endpoint_id = connection.path_hops_endpoint_ids.add()
endpoint_id.CopyFrom(endpoint_dict[device_uuid][endpoint_uuid][1])
LOGGER.info('[Compute] end ; reply = {:s}'.format(grpc_message_to_json_string(reply))) LOGGER.info('[Compute] end ; reply = {:s}'.format(grpc_message_to_json_string(reply)))
return reply return reply
# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._Algorithm import _Algorithm
from .KDisjointPathAlgorithm import KDisjointPathAlgorithm
from .KShortestPathAlgorithm import KShortestPathAlgorithm
from .ShortestPathAlgorithm import ShortestPathAlgorithm
ALGORITHMS = {
'shortest_path' : ShortestPathAlgorithm,
'k_shortest_path': KShortestPathAlgorithm,
'k_disjoint_path': KDisjointPathAlgorithm,
}
def get_algorithm(request) -> _Algorithm:
algorithm_name = request.WhichOneof('algorithm')
algorithm_class = ALGORITHMS.get(algorithm_name)
if algorithm_class is None:
raise Exception('Algorithm({:s}) not supported'.format(str(algorithm_name)))
algorithm_settings = getattr(request, algorithm_name)
algorithm_instance = algorithm_class(algorithm_settings)
return algorithm_instance
# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from typing import Optional
from common.proto.pathcomp_pb2 import Algorithm_KDisjointPath, Algorithm_KShortestPath, PathCompReply
from ._Algorithm import _Algorithm
from .KShortestPathAlgorithm import KShortestPathAlgorithm
class KDisjointPathAlgorithm(_Algorithm):
def __init__(self, algorithm : Algorithm_KDisjointPath, class_name=__name__) -> None:
super().__init__('KDP', False, class_name=class_name)
self.num_disjoint = algorithm.num_disjoint
def execute(self, dump_request_filename: Optional[str] = None, dump_reply_filename: Optional[str] = None) -> None:
algorithm = KShortestPathAlgorithm(Algorithm_KShortestPath(k_inspection=0, k_return=1))
algorithm.sync_paths = True
algorithm.device_list = self.device_list
algorithm.device_dict = self.device_dict
algorithm.endpoint_dict = self.endpoint_dict
algorithm.link_list = self.link_list
algorithm.link_dict = self.link_dict
algorithm.endpoint_to_link_dict = self.endpoint_to_link_dict
algorithm.service_list = self.service_list
algorithm.service_dict = self.service_dict
disjoint_paths = dict()
for num_path in range(self.num_disjoint):
algorithm.execute('ksp-{:d}-request.json'.format(num_path), 'ksp-{:d}-reply.txt'.format(num_path))
response_list = algorithm.json_reply.get('response-list', [])
for response in response_list:
service_id = response['serviceId']
service_key = (service_id['contextId'], service_id['service_uuid'])
disjoint_paths_service = disjoint_paths.setdefault(service_key, list())
no_path_issue = response.get('noPath', {}).get('issue')
if no_path_issue is not None:
disjoint_paths_service.append(None)
continue
path_endpoints = response['path'][0]['devices']
path_links = list()
path_link_ids = set()
for endpoint in path_endpoints:
device_uuid = endpoint['device_id']
endpoint_uuid = endpoint['endpoint_uuid']
item = algorithm.endpoint_to_link_dict.get((device_uuid, endpoint_uuid))
if item is None:
MSG = 'Link for Endpoint({:s}, {:s}) not found'
self.logger.warning(MSG.format(device_uuid, endpoint_uuid))
continue
json_link,_ = item
json_link_id = json_link['link_Id']
if len(path_links) == 0 or path_links[-1]['link_Id'] != json_link_id:
path_links.append(json_link)
path_link_ids.add(json_link_id)
self.logger.info('path_links = {:s}'.format(str(path_links)))
disjoint_paths_service.append(path_links)
new_link_list = list(filter(lambda l: l['link_Id'] not in path_link_ids, algorithm.link_list))
self.logger.info('algorithm.link_list = {:s}'.format(str(algorithm.link_list)))
self.logger.info('new_link_list = {:s}'.format(str(new_link_list)))
algorithm.link_list = new_link_list
# TODO: find used links and remove them from algorithm.link_list
# TODO: compose disjoint path found
self.logger.info('disjoint_paths = {:s}'.format(str(disjoint_paths)))
self.json_reply = {}
# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common.proto.pathcomp_pb2 import Algorithm_KShortestPath
from ._Algorithm import _Algorithm
class KShortestPathAlgorithm(_Algorithm):
def __init__(self, algorithm : Algorithm_KShortestPath, class_name=__name__) -> None:
super().__init__('KSP', False, class_name=class_name)
self.k_inspection = algorithm.k_inspection
self.k_return = algorithm.k_return
def add_service_requests(self, requested_services) -> None:
super().add_service_requests(requested_services)
for service_request in self.service_list:
service_request['algId' ] = self.algorithm_id
service_request['syncPaths'] = self.sync_paths
service_request['kPaths' ] = self.k_return
# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common.proto.pathcomp_pb2 import Algorithm_ShortestPath
from ._Algorithm import _Algorithm
class ShortestPathAlgorithm(_Algorithm):
def __init__(self, algorithm : Algorithm_ShortestPath, class_name=__name__) -> None:
super().__init__('SP', False, class_name=class_name)
self.k_paths = 1
def add_service_requests(self, requested_services) -> None:
super().add_service_requests(requested_services)
for service_request in self.service_list:
service_request['algId' ] = self.algorithm_id
service_request['syncPaths'] = self.sync_paths
service_request['kPaths' ] = self.k_paths
# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json, logging, requests, uuid
from typing import Dict, List, Optional, Tuple
from common.proto.context_pb2 import Device, DeviceList, EndPointId, Link, LinkList, Service
from common.proto.pathcomp_pb2 import PathCompReply, PathCompRequest
from common.tools.grpc.Tools import grpc_message_to_json_string
from pathcomp.frontend.Config import BACKEND_URL
from .tools.ComposeRequest import compose_device, compose_link, compose_service
class _Algorithm:
def __init__(self, algorithm_id : str, sync_paths : bool, class_name=__name__) -> None:
# algorithm_id: algorithm to be executed
# sync_paths: if multiple services are included in the request, tunes how to prevent contention. If true,
# services are computed one after the other and resources assigned to service i, are considered as
# used when computing services i+1..n; otherwise, resources are never marked as used during the
# path computation.
self.logger = logging.getLogger(class_name)
self.algorithm_id = algorithm_id
self.sync_paths = sync_paths
self.device_list : List[Device] = list()
self.device_dict : Dict[str, Tuple[Dict, Device]] = dict()
self.endpoint_dict : Dict[str, Dict[str, Tuple[Dict, EndPointId]]] = dict()
self.link_list : List[Link] = list()
self.link_dict : Dict[str, Tuple[Dict, Link]] = dict()
self.endpoint_to_link_dict : Dict[Tuple[str, str], Tuple[Dict, Link]] = dict()
self.service_list : List[Service] = list()
self.service_dict : Dict[Tuple[str, str], Tuple[Dict, Service]] = dict()
def add_devices(self, grpc_devices : DeviceList) -> None:
for grpc_device in grpc_devices.devices:
json_device = compose_device(grpc_device)
self.device_list.append(json_device)
device_uuid = json_device['device_Id']
self.device_dict[device_uuid] = (json_device, grpc_device)
device_endpoint_dict : Dict[str, Tuple[Dict, EndPointId]] = dict()
for json_endpoint,grpc_endpoint in zip(json_device['device_endpoints'], grpc_device.device_endpoints):
endpoint_uuid = json_endpoint['endpoint_id']['endpoint_uuid']
endpoint_tuple = (json_endpoint['endpoint_id'], grpc_endpoint.endpoint_id)
device_endpoint_dict[endpoint_uuid] = endpoint_tuple
self.endpoint_dict[device_uuid] = device_endpoint_dict
def add_links(self, grpc_links : LinkList) -> None:
for grpc_link in grpc_links.links:
json_link = compose_link(grpc_link)
self.link_list.append(json_link)
link_uuid = json_link['link_Id']
self.link_dict[link_uuid] = (json_link, grpc_link)
for link_endpoint_id in json_link['link_endpoint_ids']:
link_endpoint_id = link_endpoint_id['endpoint_id']
device_uuid = link_endpoint_id['device_id']
endpoint_uuid = link_endpoint_id['endpoint_uuid']
endpoint_key = (device_uuid, endpoint_uuid)
link_tuple = (json_link, grpc_link)
self.endpoint_to_link_dict[endpoint_key] = link_tuple
def add_service_requests(self, request : PathCompRequest) -> None:
for grpc_service in request.services:
json_service = compose_service(grpc_service)
self.service_list.append(json_service)
service_id = json_service['serviceId']
service_key = (service_id['contextId'], service_id['service_uuid'])
service_tuple = (json_service, grpc_service)
self.service_dict[service_key] = service_tuple
def execute(self, dump_request_filename : Optional[str] = None, dump_reply_filename : Optional[str] = None) -> None:
request = {'serviceList': self.service_list, 'deviceList': self.device_list, 'linkList': self.link_list}
if dump_request_filename is not None:
with open(dump_request_filename, 'w', encoding='UTF-8') as f:
f.write(json.dumps(request, sort_keys=True, indent=4))
reply = requests.post(BACKEND_URL, json=request)
self.status_code = reply.status_code
self.raw_reply = reply.content.decode('UTF-8')
if dump_reply_filename is not None:
with open(dump_reply_filename, 'w', encoding='UTF-8') as f:
f.write('status_code={:s} reply={:s}'.format(str(self.status_code), str(self.raw_reply)))
self.logger.info('status_code={:s} reply={:s}'.format(str(reply.status_code), str(self.raw_reply)))
if reply.status_code not in {requests.codes.ok}:
raise Exception('Backend error({:s}) for request({:s})'.format(
str(self.raw_reply), json.dumps(request, sort_keys=True)))
self.json_reply = reply.json()
def get_reply(self) -> PathCompReply:
response_list = self.json_reply.get('response-list', [])
reply = PathCompReply()
for response in response_list:
service_id = response['serviceId']
service_key = (service_id['contextId'], service_id['service_uuid'])
tuple_service = self.service_dict.get(service_key)
if tuple_service is None: raise Exception('ServiceKey({:s}) not found'.format(str(service_key)))
json_service, grpc_service = tuple_service
# TODO: implement support for multi-point services
service_endpoint_ids = grpc_service.service_endpoint_ids
if len(service_endpoint_ids) != 2: raise NotImplementedError('Service must have 2 endpoints')
service = reply.services.add()
service.CopyFrom(grpc_service)
connection = reply.connections.add()
connection.connection_id.connection_uuid.uuid = str(uuid.uuid4())
connection.service_id.CopyFrom(service.service_id)
no_path_issue = response.get('noPath', {}).get('issue')
if no_path_issue is not None:
# no path found: leave connection with no endpoints
# no_path_issue == 1 => no path due to a constraint
continue
service_paths = response['path']
for service_path in service_paths:
# ... "path-capacity": {"total-size": {"value": 200, "unit": 0}},
# ... "path-latency": {"fixed-latency-characteristic": "10.000000"},
# ... "path-cost": {"cost-name": "", "cost-value": "5.000000", "cost-algorithm": "0.000000"},
#path_capacity = service_path['path-capacity']['total-size']
#path_capacity_value = path_capacity['value']
#path_capacity_unit = CapacityUnit(path_capacity['unit'])
#path_latency = service_path['path-latency']['fixed-latency-characteristic']
#path_cost = service_path['path-cost']
#path_cost_name = path_cost['cost-name']
#path_cost_value = path_cost['cost-value']
#path_cost_algorithm = path_cost['cost-algorithm']
path_endpoints = service_path['devices']
for endpoint in path_endpoints:
device_uuid = endpoint['device_id']
endpoint_uuid = endpoint['endpoint_uuid']
endpoint_id = connection.path_hops_endpoint_ids.add()
endpoint_id.CopyFrom(self.endpoint_dict[device_uuid][endpoint_uuid][1])
return reply
...@@ -101,7 +101,7 @@ def compose_link(grpc_link : Link) -> Dict: ...@@ -101,7 +101,7 @@ def compose_link(grpc_link : Link) -> Dict:
'cost-characteristics': cost_characteristics, 'latency-characteristics': latency_characteristics, 'cost-characteristics': cost_characteristics, 'latency-characteristics': latency_characteristics,
} }
def compose_service(grpc_service : Service, algorithm : Dict) -> Dict: def compose_service(grpc_service : Service) -> Dict:
service_id = compose_service_id(grpc_service.service_id) service_id = compose_service_id(grpc_service.service_id)
service_type = grpc_service.service_type service_type = grpc_service.service_type
...@@ -115,23 +115,9 @@ def compose_service(grpc_service : Service, algorithm : Dict) -> Dict: ...@@ -115,23 +115,9 @@ def compose_service(grpc_service : Service, algorithm : Dict) -> Dict:
for service_constraint in grpc_service.service_constraints for service_constraint in grpc_service.service_constraints
] ]
# algorithm to be executed
algorithm_id = algorithm.get('id', 'SP')
# if multiple services included in the request, prevent contention
# If true, services are computed one after the other and resources
# assigned to service i, are considered as used by services i+1..n
sync_paths = algorithm.get('sync', False)
k_paths = algorithm.get('k_paths', 1)
return { return {
'serviceId': service_id, 'serviceId': service_id,
'serviceType': service_type, 'serviceType': service_type,
'service_endpoints_ids': endpoint_ids, 'service_endpoints_ids': endpoint_ids,
'service_constraints': constraints, 'service_constraints': constraints,
'algId': algorithm_id,
'syncPaths': sync_paths,
'kPaths': k_paths,
} }
# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID
from common.tools.object_factory.Constraint import json_constraint
from common.tools.object_factory.Context import json_context, json_context_id
from common.tools.object_factory.Device import json_device_emulated_packet_router_disabled, json_device_id
from common.tools.object_factory.EndPoint import json_endpoints
from common.tools.object_factory.Link import get_link_uuid, json_link, json_link_id
from common.tools.object_factory.Service import get_service_uuid, json_service_l3nm_planned
from common.tools.object_factory.Topology import json_topology, json_topology_id
def compose_device(device_uuid, endpoint_uuids, topology_id=None):
device_id = json_device_id(device_uuid)
endpoints = [(endpoint_uuid, 'copper', []) for endpoint_uuid in endpoint_uuids]
endpoints = json_endpoints(device_id, endpoints, topology_id=topology_id)
device = json_device_emulated_packet_router_disabled(device_uuid, endpoints=endpoints)
return device_id, endpoints, device
def compose_link(endpoint_a, endpoint_z):
link_uuid = get_link_uuid(endpoint_a['endpoint_id'], endpoint_z['endpoint_id'])
link_id = json_link_id(link_uuid)
link = json_link(link_uuid, [endpoint_a['endpoint_id'], endpoint_z['endpoint_id']])
return link_id, link
def compose_service(endpoint_a, endpoint_z, constraints=[]):
service_uuid = get_service_uuid(endpoint_a['endpoint_id'], endpoint_z['endpoint_id'])
endpoint_ids = [endpoint_a['endpoint_id'], endpoint_z['endpoint_id']]
service = json_service_l3nm_planned(service_uuid, endpoint_ids=endpoint_ids, constraints=constraints)
return service
# ----- Context --------------------------------------------------------------------------------------------------------
CONTEXT_ID = json_context_id(DEFAULT_CONTEXT_UUID)
CONTEXT = json_context(DEFAULT_CONTEXT_UUID)
# ----- Domains --------------------------------------------------------------------------------------------------------
# Overall network topology
TOPO_ADMIN_UUID = DEFAULT_TOPOLOGY_UUID
TOPO_ADMIN_ID = json_topology_id(TOPO_ADMIN_UUID, context_id=CONTEXT_ID)
TOPO_ADMIN = json_topology(TOPO_ADMIN_UUID, context_id=CONTEXT_ID)
# DataCenter #1 Network
TOPO_DC1_UUID = 'DC1'
TOPO_DC1_ID = json_topology_id(TOPO_DC1_UUID, context_id=CONTEXT_ID)
TOPO_DC1 = json_topology(TOPO_DC1_UUID, context_id=CONTEXT_ID)
# DataCenter #2 Network
TOPO_DC2_UUID = 'DC2'
TOPO_DC2_ID = json_topology_id(TOPO_DC2_UUID, context_id=CONTEXT_ID)
TOPO_DC2 = json_topology(TOPO_DC2_UUID, context_id=CONTEXT_ID)
# CellSite #1 Network
TOPO_CS1_UUID = 'CS1'
TOPO_CS1_ID = json_topology_id(TOPO_CS1_UUID, context_id=CONTEXT_ID)
TOPO_CS1 = json_topology(TOPO_CS1_UUID, context_id=CONTEXT_ID)
# CellSite #2 Network
TOPO_CS2_UUID = 'CS2'
TOPO_CS2_ID = json_topology_id(TOPO_CS2_UUID, context_id=CONTEXT_ID)
TOPO_CS2 = json_topology(TOPO_CS2_UUID, context_id=CONTEXT_ID)
# Transport Network Network
TOPO_TN_UUID = 'TN'
TOPO_TN_ID = json_topology_id(TOPO_TN_UUID, context_id=CONTEXT_ID)
TOPO_TN = json_topology(TOPO_TN_UUID, context_id=CONTEXT_ID)
# ----- Devices --------------------------------------------------------------------------------------------------------
# DataCenters
DEV_DC1GW_ID, DEV_DC1GW_EPS, DEV_DC1GW = compose_device('DC1-GW', ['eth1', 'eth2', 'int'], topology_id=TOPO_DC1_ID)
DEV_DC2GW_ID, DEV_DC2GW_EPS, DEV_DC2GW = compose_device('DC2-GW', ['eth1', 'eth2', 'int'], topology_id=TOPO_DC2_ID)
# CellSites
DEV_CS1GW1_ID, DEV_CS1GW1_EPS, DEV_CS1GW1 = compose_device('CS1-GW1', ['1000', '100', '200'], topology_id=TOPO_CS1_ID)
DEV_CS1GW2_ID, DEV_CS1GW2_EPS, DEV_CS1GW2 = compose_device('CS1-GW2', ['1000', '100', '200'], topology_id=TOPO_CS1_ID)
DEV_CS2GW1_ID, DEV_CS2GW1_EPS, DEV_CS2GW1 = compose_device('CS2-GW1', ['1000', '100', '200'], topology_id=TOPO_CS2_ID)
DEV_CS2GW2_ID, DEV_CS2GW2_EPS, DEV_CS2GW2 = compose_device('CS2-GW2', ['1000', '100', '200'], topology_id=TOPO_CS2_ID)
# Transport Network
DEV_TNR1_ID, DEV_TNR1_EPS, DEV_TNR1 = compose_device('TN-R1', ['100', '200', '1', '2', '3'], topology_id=TOPO_TN_ID)
DEV_TNR2_ID, DEV_TNR2_EPS, DEV_TNR2 = compose_device('TN-R2', ['100', '200', '1', '2', '3'], topology_id=TOPO_TN_ID)
DEV_TNR3_ID, DEV_TNR3_EPS, DEV_TNR3 = compose_device('TN-R3', ['100', '200', '1', '2', '3'], topology_id=TOPO_TN_ID)
DEV_TNR4_ID, DEV_TNR4_EPS, DEV_TNR4 = compose_device('TN-R4', ['100', '200', '1', '2', '3'], topology_id=TOPO_TN_ID)
# ----- Links ----------------------------------------------------------------------------------------------------------
# InterDomain DC-CSGW
LINK_DC1GW_CS1GW1_ID, LINK_DC1GW_CS1GW1 = compose_link(DEV_DC1GW_EPS[0], DEV_CS1GW1_EPS[0])
LINK_DC1GW_CS1GW2_ID, LINK_DC1GW_CS1GW2 = compose_link(DEV_DC1GW_EPS[1], DEV_CS1GW2_EPS[0])
LINK_DC2GW_CS2GW1_ID, LINK_DC2GW_CS2GW1 = compose_link(DEV_DC2GW_EPS[0], DEV_CS2GW1_EPS[0])
LINK_DC2GW_CS2GW2_ID, LINK_DC2GW_CS2GW2 = compose_link(DEV_DC2GW_EPS[1], DEV_CS2GW2_EPS[0])
# InterDomain CSGW-TN
LINK_CS1GW1_TNR1_ID, LINK_CS1GW1_TNR1 = compose_link(DEV_CS1GW1_EPS[1], DEV_TNR1_EPS[0])
LINK_CS1GW2_TNR2_ID, LINK_CS1GW2_TNR2 = compose_link(DEV_CS1GW2_EPS[1], DEV_TNR2_EPS[0])
LINK_CS1GW1_TNR2_ID, LINK_CS1GW1_TNR2 = compose_link(DEV_CS1GW1_EPS[2], DEV_TNR2_EPS[1])
LINK_CS1GW2_TNR1_ID, LINK_CS1GW2_TNR1 = compose_link(DEV_CS1GW2_EPS[2], DEV_TNR1_EPS[1])
LINK_CS2GW1_TNR3_ID, LINK_CS2GW1_TNR3 = compose_link(DEV_CS2GW1_EPS[1], DEV_TNR3_EPS[0])
LINK_CS2GW2_TNR4_ID, LINK_CS2GW2_TNR4 = compose_link(DEV_CS2GW2_EPS[1], DEV_TNR4_EPS[0])
LINK_CS2GW1_TNR4_ID, LINK_CS2GW1_TNR4 = compose_link(DEV_CS2GW1_EPS[2], DEV_TNR4_EPS[1])
LINK_CS2GW2_TNR3_ID, LINK_CS2GW2_TNR3 = compose_link(DEV_CS2GW2_EPS[2], DEV_TNR3_EPS[1])
# IntraDomain TN
LINK_TNR1_TNR2_ID, LINK_TNR1_TNR2 = compose_link(DEV_TNR1_EPS[2], DEV_TNR2_EPS[3])
LINK_TNR2_TNR3_ID, LINK_TNR2_TNR3 = compose_link(DEV_TNR2_EPS[2], DEV_TNR3_EPS[3])
LINK_TNR3_TNR4_ID, LINK_TNR3_TNR4 = compose_link(DEV_TNR3_EPS[2], DEV_TNR4_EPS[3])
LINK_TNR4_TNR1_ID, LINK_TNR4_TNR1 = compose_link(DEV_TNR4_EPS[2], DEV_TNR1_EPS[3])
LINK_TNR1_TNR3_ID, LINK_TNR1_TNR3 = compose_link(DEV_TNR1_EPS[4], DEV_TNR3_EPS[4])
LINK_TNR2_TNR4_ID, LINK_TNR2_TNR4 = compose_link(DEV_TNR2_EPS[4], DEV_TNR4_EPS[4])
# ----- Service --------------------------------------------------------------------------------------------------------
SERVICE_DC1GW_DC2GW = compose_service(DEV_DC1GW_EPS[2], DEV_DC2GW_EPS[2], constraints=[
json_constraint('bandwidth[gbps]', 10.0),
json_constraint('latency[ms]', 12.0),
])
# ----- Containers -----------------------------------------------------------------------------------------------------
CONTEXTS = [ CONTEXT]
TOPOLOGIES = [ TOPO_ADMIN, TOPO_DC1, TOPO_DC2, TOPO_CS1, TOPO_CS2, TOPO_TN]
DEVICES = [ DEV_DC1GW, DEV_DC2GW,
DEV_CS1GW1, DEV_CS1GW2, DEV_CS2GW1, DEV_CS2GW2,
DEV_TNR1, DEV_TNR2, DEV_TNR3, DEV_TNR4 ]
LINKS = [ LINK_DC1GW_CS1GW1, LINK_DC1GW_CS1GW2, LINK_DC2GW_CS2GW1, LINK_DC2GW_CS2GW2,
LINK_CS1GW1_TNR1, LINK_CS1GW2_TNR2, LINK_CS1GW1_TNR2, LINK_CS1GW2_TNR1,
LINK_CS2GW1_TNR3, LINK_CS2GW2_TNR4, LINK_CS2GW1_TNR4, LINK_CS2GW2_TNR3,
LINK_TNR1_TNR2, LINK_TNR2_TNR3, LINK_TNR3_TNR4, LINK_TNR4_TNR1, LINK_TNR1_TNR3, LINK_TNR2_TNR4 ]
SERVICES = [ SERVICE_DC1GW_DC2GW ]
OBJECTS_PER_TOPOLOGY = [
(TOPO_ADMIN_ID,
[ DEV_DC1GW_ID, DEV_DC2GW_ID,
DEV_CS1GW1_ID, DEV_CS1GW2_ID, DEV_CS2GW1_ID, DEV_CS2GW2_ID,
DEV_TNR1_ID, DEV_TNR2_ID, DEV_TNR3_ID, DEV_TNR4_ID ],
[ LINK_DC1GW_CS1GW1_ID, LINK_DC1GW_CS1GW2_ID, LINK_DC2GW_CS2GW1_ID, LINK_DC2GW_CS2GW2_ID,
LINK_CS1GW1_TNR1_ID, LINK_CS1GW2_TNR2_ID, LINK_CS1GW1_TNR2_ID, LINK_CS1GW2_TNR1_ID,
LINK_CS2GW1_TNR3_ID, LINK_CS2GW2_TNR4_ID, LINK_CS2GW1_TNR4_ID, LINK_CS2GW2_TNR3_ID,
LINK_TNR1_TNR2_ID, LINK_TNR2_TNR3_ID, LINK_TNR3_TNR4_ID, LINK_TNR4_TNR1_ID, LINK_TNR1_TNR3_ID,
LINK_TNR2_TNR4_ID ],
),
(TOPO_DC1_ID,
[DEV_DC1GW_ID],
[]),
(TOPO_DC2_ID,
[DEV_DC2GW_ID],
[]),
(TOPO_CS1_ID,
[DEV_CS1GW1_ID, DEV_CS1GW2_ID],
[]),
(TOPO_CS2_ID,
[DEV_CS2GW1_ID, DEV_CS2GW2_ID],
[]),
(TOPO_TN_ID,
[DEV_TNR1_ID, DEV_TNR2_ID, DEV_TNR3_ID, DEV_TNR4_ID],
[LINK_TNR1_TNR2_ID, LINK_TNR2_TNR3_ID, LINK_TNR3_TNR4_ID, LINK_TNR4_TNR1_ID, LINK_TNR1_TNR3_ID,
LINK_TNR2_TNR4_ID]),
]
...@@ -24,7 +24,7 @@ from pathcomp.frontend.tests.MockService_Dependencies import MockService_Depende ...@@ -24,7 +24,7 @@ from pathcomp.frontend.tests.MockService_Dependencies import MockService_Depende
LOCAL_HOST = '127.0.0.1' LOCAL_HOST = '127.0.0.1'
MOCKSERVICE_PORT = 10000 MOCKSERVICE_PORT = 10000
PATHCOMP_SERVICE_PORT = MOCKSERVICE_PORT + get_service_port_grpc(ServiceNameEnum.PATHCOMP) # avoid privileged ports PATHCOMP_SERVICE_PORT = MOCKSERVICE_PORT + int(get_service_port_grpc(ServiceNameEnum.PATHCOMP)) # avoid privileged ports
os.environ[get_env_var_name(ServiceNameEnum.PATHCOMP, ENVVAR_SUFIX_SERVICE_HOST )] = str(LOCAL_HOST) os.environ[get_env_var_name(ServiceNameEnum.PATHCOMP, ENVVAR_SUFIX_SERVICE_HOST )] = str(LOCAL_HOST)
os.environ[get_env_var_name(ServiceNameEnum.PATHCOMP, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(PATHCOMP_SERVICE_PORT) os.environ[get_env_var_name(ServiceNameEnum.PATHCOMP, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(PATHCOMP_SERVICE_PORT)
......
...@@ -12,14 +12,30 @@ ...@@ -12,14 +12,30 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import logging import copy, logging, os
from common.proto.context_pb2 import Context, ContextId, DeviceId, Link, LinkId, Topology, Device, TopologyId from common.proto.context_pb2 import Context, ContextId, DeviceId, Link, LinkId, Topology, Device, TopologyId
from common.proto.pathcomp_pb2 import PathCompRequest from common.proto.pathcomp_pb2 import PathCompRequest
from common.tools.grpc.Tools import grpc_message_to_json from common.tools.grpc.Tools import grpc_message_to_json
from common.tools.object_factory.Constraint import json_constraint
from context.client.ContextClient import ContextClient from context.client.ContextClient import ContextClient
from device.client.DeviceClient import DeviceClient from device.client.DeviceClient import DeviceClient
from pathcomp.frontend.client.PathCompClient import PathCompClient from pathcomp.frontend.client.PathCompClient import PathCompClient
from .Objects import CONTEXTS, DEVICES, LINKS, OBJECTS_PER_TOPOLOGY, SERVICES, TOPOLOGIES
# Scenarios:
#from .Objects_A_B_C import CONTEXTS, DEVICES, LINKS, OBJECTS_PER_TOPOLOGY, SERVICES, TOPOLOGIES
from .Objects_DC_CSGW_TN import CONTEXTS, DEVICES, LINKS, OBJECTS_PER_TOPOLOGY, SERVICES, TOPOLOGIES
# configure backend environment variables before overwriting them with fixtures to use real backend pathcomp
DEFAULT_PATHCOMP_BACKEND_SCHEME = 'http'
DEFAULT_PATHCOMP_BACKEND_HOST = '127.0.0.1'
DEFAULT_PATHCOMP_BACKEND_PORT = '8081'
DEFAULT_PATHCOMP_BACKEND_BASEURL = '/pathComp/api/v1/compRoute'
os.environ['PATHCOMP_BACKEND_SCHEME'] = os.environ.get('PATHCOMP_BACKEND_SCHEME', DEFAULT_PATHCOMP_BACKEND_SCHEME)
os.environ['PATHCOMP_BACKEND_HOST'] = os.environ.get('PATHCOMPSERVICE_SERVICE_HOST', DEFAULT_PATHCOMP_BACKEND_HOST)
os.environ['PATHCOMP_BACKEND_PORT'] = os.environ.get('PATHCOMPSERVICE_SERVICE_PORT_HTTP', DEFAULT_PATHCOMP_BACKEND_PORT)
os.environ['PATHCOMP_BACKEND_BASEURL'] = os.environ.get('PATHCOMP_BACKEND_BASEURL', DEFAULT_PATHCOMP_BACKEND_BASEURL)
from .PrepareTestScenario import ( # pylint: disable=unused-import from .PrepareTestScenario import ( # pylint: disable=unused-import
# be careful, order of symbols is important here! # be careful, order of symbols is important here!
mock_service, pathcomp_service, context_client, device_client, pathcomp_client) mock_service, pathcomp_service, context_client, device_client, pathcomp_client)
...@@ -27,7 +43,6 @@ from .PrepareTestScenario import ( # pylint: disable=unused-import ...@@ -27,7 +43,6 @@ from .PrepareTestScenario import ( # pylint: disable=unused-import
LOGGER = logging.getLogger(__name__) LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG) LOGGER.setLevel(logging.DEBUG)
def test_prepare_environment( def test_prepare_environment(
context_client : ContextClient, # pylint: disable=redefined-outer-name context_client : ContextClient, # pylint: disable=redefined-outer-name
device_client : DeviceClient): # pylint: disable=redefined-outer-name device_client : DeviceClient): # pylint: disable=redefined-outer-name
...@@ -55,7 +70,61 @@ def test_prepare_environment( ...@@ -55,7 +70,61 @@ def test_prepare_environment(
context_client.SetTopology(topology) context_client.SetTopology(topology)
def test_request_service( def test_request_service_shortestpath(
pathcomp_client : PathCompClient): # pylint: disable=redefined-outer-name
request_services = copy.deepcopy(SERVICES)
request_services[0]['service_constraints'] = [
json_constraint('bandwidth[gbps]', 1000.0),
json_constraint('latency[ms]', 1200.0),
]
pathcomp_request = PathCompRequest(services=request_services)
pathcomp_request.shortest_path.Clear() # hack to select the shortest path algorithm that has no attributes
pathcomp_reply = pathcomp_client.Compute(pathcomp_request)
pathcomp_reply = grpc_message_to_json(pathcomp_reply)
reply_services = pathcomp_reply['services']
reply_connections = pathcomp_reply['connections']
assert len(request_services) <= len(reply_services)
request_service_ids = {
'{:s}/{:s}'.format(
svc['service_id']['context_id']['context_uuid']['uuid'],
svc['service_id']['service_uuid']['uuid']
)
for svc in request_services
}
reply_service_ids = {
'{:s}/{:s}'.format(
svc['service_id']['context_id']['context_uuid']['uuid'],
svc['service_id']['service_uuid']['uuid']
)
for svc in reply_services
}
# Assert all requested services have a reply
# It permits having other services not requested (i.e., sub-services)
assert len(request_service_ids.difference(reply_service_ids)) == 0
reply_connection_service_ids = {
'{:s}/{:s}'.format(
conn['service_id']['context_id']['context_uuid']['uuid'],
conn['service_id']['service_uuid']['uuid']
)
for conn in reply_connections
}
# Assert all requested services have a connection associated
# It permits having other connections not requested (i.e., connections for sub-services)
assert len(request_service_ids.difference(reply_connection_service_ids)) == 0
# TODO: implement other checks. examples:
# - request service and reply service endpoints match
# - request service and reply connection endpoints match
# - reply sub-service and reply sub-connection endpoints match
# - others?
#for json_service,json_connection in zip(json_services, json_connections):
def test_request_service_kshortestpath(
pathcomp_client : PathCompClient): # pylint: disable=redefined-outer-name pathcomp_client : PathCompClient): # pylint: disable=redefined-outer-name
request_services = SERVICES request_services = SERVICES
...@@ -106,6 +175,56 @@ def test_request_service( ...@@ -106,6 +175,56 @@ def test_request_service(
#for json_service,json_connection in zip(json_services, json_connections): #for json_service,json_connection in zip(json_services, json_connections):
def test_request_service_kdisjointpath(
pathcomp_client : PathCompClient): # pylint: disable=redefined-outer-name
request_services = SERVICES
pathcomp_request = PathCompRequest(services=request_services)
pathcomp_request.k_disjoint_path.num_disjoint = 2 #pylint: disable=no-member
pathcomp_reply = pathcomp_client.Compute(pathcomp_request)
pathcomp_reply = grpc_message_to_json(pathcomp_reply)
reply_services = pathcomp_reply['services']
reply_connections = pathcomp_reply['connections']
assert len(request_services) <= len(reply_services)
request_service_ids = {
'{:s}/{:s}'.format(
svc['service_id']['context_id']['context_uuid']['uuid'],
svc['service_id']['service_uuid']['uuid']
)
for svc in request_services
}
reply_service_ids = {
'{:s}/{:s}'.format(
svc['service_id']['context_id']['context_uuid']['uuid'],
svc['service_id']['service_uuid']['uuid']
)
for svc in reply_services
}
# Assert all requested services have a reply
# It permits having other services not requested (i.e., sub-services)
assert len(request_service_ids.difference(reply_service_ids)) == 0
reply_connection_service_ids = {
'{:s}/{:s}'.format(
conn['service_id']['context_id']['context_uuid']['uuid'],
conn['service_id']['service_uuid']['uuid']
)
for conn in reply_connections
}
# Assert all requested services have a connection associated
# It permits having other connections not requested (i.e., connections for sub-services)
assert len(request_service_ids.difference(reply_connection_service_ids)) == 0
# TODO: implement other checks. examples:
# - request service and reply service endpoints match
# - request service and reply connection endpoints match
# - reply sub-service and reply sub-connection endpoints match
# - others?
#for json_service,json_connection in zip(json_services, json_connections):
def test_cleanup_environment( def test_cleanup_environment(
context_client : ContextClient, # pylint: disable=redefined-outer-name context_client : ContextClient, # pylint: disable=redefined-outer-name
device_client : DeviceClient): # pylint: disable=redefined-outer-name device_client : DeviceClient): # pylint: disable=redefined-outer-name
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment